aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--__pycache__/hyperparameters.cpython-38.pycbin369 -> 369 bytes
-rw-r--r--__pycache__/losses.cpython-38.pycbin4603 -> 4473 bytes
-rw-r--r--hyperparameters.py4
-rw-r--r--losses.py11
-rw-r--r--save.jpgbin39951 -> 30377 bytes
5 files changed, 6 insertions, 9 deletions
diff --git a/__pycache__/hyperparameters.cpython-38.pyc b/__pycache__/hyperparameters.cpython-38.pyc
index d772a682..11bc2070 100644
--- a/__pycache__/hyperparameters.cpython-38.pyc
+++ b/__pycache__/hyperparameters.cpython-38.pyc
Binary files differ
diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc
index 1112f0ad..d583a985 100644
--- a/__pycache__/losses.cpython-38.pyc
+++ b/__pycache__/losses.cpython-38.pyc
Binary files differ
diff --git a/hyperparameters.py b/hyperparameters.py
index 4f264528..a0068dd1 100644
--- a/hyperparameters.py
+++ b/hyperparameters.py
@@ -9,14 +9,14 @@ Number of epochs. If you experiment with more complex networks you
might need to increase this. Likewise if you add regularization that
slows training.
"""
-num_epochs = 10
+num_epochs = 100
"""
A critical parameter that can dramatically affect whether training
succeeds or fails. The value for this depends significantly on which
optimizer is used. Refer to the default learning rate parameter
"""
-learning_rate = 1e-4
+learning_rate = 3e-2
momentum = 0.01
diff --git a/losses.py b/losses.py
index c0989ed1..407412a1 100644
--- a/losses.py
+++ b/losses.py
@@ -102,7 +102,7 @@ class YourModel(tf.keras.Model):
il = input_layers[i]
L_content = tf.math.add(L_content, tf.reduce_mean(tf.square(pl - il)))
- print('content loss', L_content)
+ #print('content loss', L_content)
return L_content
def layer_loss(self, art_layer, input_layer):
@@ -137,7 +137,7 @@ class YourModel(tf.keras.Model):
# while Sotech is botty:
# Jayson_tatum.tear_acl()
# return ("this is just another day")
- print('Layer loss', E_l)
+ #print('Layer loss', E_l)
return E_l
def style_loss(self, art_layers, input_layers):
@@ -146,18 +146,15 @@ class YourModel(tf.keras.Model):
art_layer = art_layers[i]
input_layer = input_layers[i]
L_style = tf.math.add(L_style, self.layer_loss(art_layer, input_layer))
- print('style loss', L_style)
+ #print('style loss', L_style)
return L_style
def train_step(self):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.x)
loss = self.loss_fn(self.content_image, self.style_image, self.x)
- #print('loss', loss)
+ print('loss', loss)
#print('self.x', self.x)
gradients = tape.gradient(loss, [self.x])
#print('gradients', gradients)
- print(self.x.shape)
- print(type(self.x))
- print(type(gradients))
self.optimizer.apply_gradients(zip(gradients, [self.x]))
diff --git a/save.jpg b/save.jpg
index f479fa8d..3cd65111 100644
--- a/save.jpg
+++ b/save.jpg
Binary files differ