From b83cd8eba837348bc194db8c1dd12e369602e4ac Mon Sep 17 00:00:00 2001 From: Michael Foiani Date: Wed, 4 May 2022 17:37:55 -0400 Subject: got it to run, but it hangs --- __pycache__/hyperparameters.cpython-38.pyc | Bin 343 -> 335 bytes __pycache__/losses.cpython-38.pyc | Bin 4236 -> 4495 bytes __pycache__/preprocess.cpython-38.pyc | Bin 5048 -> 5040 bytes losses.py | 25 ++++++++++++++++--------- main.py | 2 ++ 5 files changed, 18 insertions(+), 9 deletions(-) diff --git a/__pycache__/hyperparameters.cpython-38.pyc b/__pycache__/hyperparameters.cpython-38.pyc index 9b86a7da..8654cf2c 100644 Binary files a/__pycache__/hyperparameters.cpython-38.pyc and b/__pycache__/hyperparameters.cpython-38.pyc differ diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc index ebfd7772..c409cebf 100644 Binary files a/__pycache__/losses.cpython-38.pyc and b/__pycache__/losses.cpython-38.pyc differ diff --git a/__pycache__/preprocess.cpython-38.pyc b/__pycache__/preprocess.cpython-38.pyc index e2f42bca..a4fcfb04 100644 Binary files a/__pycache__/preprocess.cpython-38.pyc and b/__pycache__/preprocess.cpython-38.pyc differ diff --git a/losses.py b/losses.py index 2cb51e15..bf6f6973 100644 --- a/losses.py +++ b/losses.py @@ -68,9 +68,12 @@ class YourModel(tf.keras.Model): # for layer in self.vgg16: # layer.trainable = False - self.indexed_layers = [layer for layer in self.vgg16 if layer.name == "conv1"] + self.indexed_layers = [layer for layer in self.vgg16 if "conv1" in layer.name] print(self.indexed_layers) - self.desired = [layer.name for layer in self.vgg16 if layer.name == "conv1"] + self.desired = [layer.name for layer in self.vgg16 if "conv1" in layer.name] + + # create a map of the layers to their corresponding number of filters if it is a convolutional layer + self.layer_to_filters = {layer.name: layer.filters for layer in self.vgg16 if "conv" in layer.name} def call(self, x): layers = [] @@ -99,15 +102,18 @@ class YourModel(tf.keras.Model): return (self.alpha * content_l) + (self.beta * style_l) def content_loss(self, photo_layers, input_layers): + print(photo_layers, input_layers) L_content = tf.reduce_mean(tf.square(photo_layers - input_layers)) print(L_content) return L_content - def layer_loss(art_layers, input_layers, layer): + def layer_loss(self, art_layers, input_layers, layer): + # vectorize the art_layers + art_layers = tf.reshape(art_layers, (-1, art_layers.shape[-1])) + # vectorize the input_layers + input_layers = tf.reshape(input_layers, (-1, input_layers.shape[-1])) - #vectorize the inputs - art_vector = art_layers.reshape(-1, 224**2) - input_vector = input_layers.reshape(-1, 224**2) + print('layer', layer.name, self.layer_to_filters[layer.name]) # get the gram matrix input_dim = input_layers.shape[0] @@ -122,18 +128,19 @@ class YourModel(tf.keras.Model): # N depends on # of filters in the layer, M depends on hight and width of feature map M_l = art_layers.shape[0] * art_layers.shape[1] - # layer.filteres might not work - E_l = 1/4 * (layer.filters**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers)) + # layer.filters might not work + E_l = 1/4 * (self.layer_to_filters[layer.name]**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers)) # while Sotech is botty: # Jayson_tatum.tear_acl() # return ("this is just another day") + return E_l def style_loss(self, art_layers, input_layers): L_style = 0 for layer in self.indexed_layers: L_style += self.layer_loss(art_layers, input_layers, layer) - print('this is style loss',L_style) + print('this is style loss', L_style) return L_style def train_step(self): diff --git a/main.py b/main.py index 063670b8..8363fcef 100644 --- a/main.py +++ b/main.py @@ -55,7 +55,9 @@ def main(): print('this is',ARGS.content) content_image = imread(ARGS.content) + content_image = np.resize(content_image, (255, 255, 3)) style_image = imread(ARGS.style) + style_image = np.resize(style_image, (255, 255, 3)) my_model = YourModel(content_image=content_image, style_image=style_image) train(my_model) -- cgit v1.2.3-70-g09d2