Skip to content

Commit

Permalink
generating 300k images and il data
Browse files Browse the repository at this point in the history
  • Loading branch information
workstation1 gpu committed May 13, 2022
1 parent 9f62660 commit 592e8f3
Show file tree
Hide file tree
Showing 26 changed files with 612 additions and 93 deletions.
33 changes: 33 additions & 0 deletions cmvae/plot_loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from numpy import genfromtxt
import matplotlib.pyplot as plt
import pandas as pd
def plot_cmvae_error_progress(epochs, errors, Type = 'Training'):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(epochs, errors[0], color='tab:blue')
ax.plot(epochs, errors[1], color='tab:orange')
ax.plot(epochs, errors[2], color='tab:red')
ax.set_title('cmvae ' + Type + ' losses')
plt.legend(["Img recon", "Gate recon", "kl"])


fig2 = plt.figure()
ax = fig2.add_subplot(1, 1, 1)
zipped = zip(errors[0], errors[1], errors[2])
Sum = [x+y+z for (x,y,z) in zipped]
ax.plot(epochs, Sum, color='tab:blue')
ax.set_title('CMVAE Total ' + Type + ' loss')
plt.show()

def plot_imitation_error_progress(epochs, errors, Type = 'Training'):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(epochs, errors, color='tab:blue')
ax.set_title('bc ' + Type + ' Error')


df = pd.read_csv('tst.csv', delimiter=',').T
# User list comprehension to create a list of lists from Dataframe rows
data = [list(row) for row in df.values]
plot_cmvae_error_progress(data[0], data[1:4])
plot_cmvae_error_progress(data[0], data[4:7], Type = "Testing")
70 changes: 55 additions & 15 deletions cmvae/train_cmvae.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import tensorflow as tf
import os
import sys
import numpy as np

curr_dir = os.path.dirname(os.path.abspath(__file__))

# imports
Expand All @@ -11,16 +13,17 @@
from racing_utils.paths import *

# DEFINE TRAINING META PARAMETERS
data_dir = ppr_img_data_dir
output_dir = ppr_cmvae_output_dir
data_dir = "/home/dell/Drone_Project/datasets/img_data_250K"
output_dir = "/home/dell/Drone_Project/models_outputs/cmvae_con_250k"
batch_size = 32
epochs = 50
epochs = 51
n_z = 10
latent_space_constraints = True
img_res = 64
max_size = None # default is None
learning_rate = 1e-4


# CUSTOM TF FUNCTIONS
@tf.function
def calc_weighted_loss_img(img_recon, images_np):
Expand All @@ -32,6 +35,7 @@ def calc_weighted_loss_img(img_recon, images_np):
loss = tf.reduce_sum(weighted_error_sq)
return loss


def reset_metrics():
train_loss_rec_img.reset_states()
train_loss_rec_gate.reset_states()
Expand All @@ -40,6 +44,7 @@ def reset_metrics():
test_loss_rec_gate.reset_states()
test_loss_kl.reset_states()


@tf.function
def regulate_weights(epoch):
# for beta
Expand Down Expand Up @@ -67,14 +72,16 @@ def regulate_weights(epoch):
w_gate = 1.0
return beta, w_img, w_gate


@tf.function
def compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, stddev, mode):
# compute reconstruction loss
if mode == 0:
img_loss = tf.losses.mean_squared_error(img_gt, img_recon)
# img_loss = tf.losses.mean_absolute_error(img_gt, img_recon)
gate_loss = tf.losses.mean_squared_error(gate_gt, gate_recon)
kl_loss = -0.5 * tf.reduce_mean(tf.reduce_sum((1 + stddev - tf.math.pow(means, 2) - tf.math.exp(stddev)), axis=1))
kl_loss = -0.5 * tf.reduce_mean(
tf.reduce_sum((1 + stddev - tf.math.pow(means, 2) - tf.math.exp(stddev)), axis=1))
# elif mode == 1:
# # labels = tf.reshape(labels, predictions.shape)
# # recon_loss = tf.losses.mean_squared_error(labels, predictions)
Expand All @@ -85,6 +92,7 @@ def compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, std
# copute KL loss: D_KL(Q(z|X,y) || P(z|X))
return img_loss, gate_loss, kl_loss


@tf.function
def train(img_gt, gate_gt, epoch, mode):
# freeze the non-utilized weights
Expand All @@ -102,13 +110,14 @@ def train(img_gt, gate_gt, epoch, mode):
# model.p_gate.trainable = True
with tf.GradientTape() as tape:
img_recon, gate_recon, means, stddev, z = model(img_gt, mode)
img_loss, gate_loss, kl_loss = compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, stddev, mode)
img_loss, gate_loss, kl_loss = compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, stddev,
mode)
img_loss = tf.reduce_mean(img_loss)
gate_loss = tf.reduce_mean(gate_loss)
beta, w_img, w_gate = regulate_weights(epoch)
# weighted_loss_img = calc_weighted_loss_img(img_recon, img_gt)
if mode == 0:
total_loss = w_img*img_loss + w_gate*gate_loss + beta*kl_loss
total_loss = w_img * img_loss + w_gate * gate_loss + beta * kl_loss
# total_loss = w_img * img_loss + beta * kl_loss
# total_loss = weighted_loss_img + gate_loss + beta * kl_loss
# total_loss = img_loss
Expand All @@ -124,17 +133,20 @@ def train(img_gt, gate_gt, epoch, mode):
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))


@tf.function
def test(img_gt, gate_gt, mode):
img_recon, gate_recon, means, stddev, z = model(img_gt, mode)
img_loss, gate_loss, kl_loss = compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, stddev, mode)
img_loss, gate_loss, kl_loss = compute_loss_unsupervised(img_gt, gate_gt, img_recon, gate_recon, means, stddev,
mode)
img_loss = tf.reduce_mean(img_loss)
gate_loss = tf.reduce_mean(gate_loss)
if mode == 0:
test_loss_rec_img.update_state(img_loss)
test_loss_rec_gate.update_state(gate_loss)
test_loss_kl.update_state(kl_loss)


os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
# 0 = all messages are logged (default behavior)
# 1 = INFO messages are not printed
Expand Down Expand Up @@ -171,6 +183,14 @@ def test(img_gt, gate_gt, mode):
if not os.path.isdir(output_dir):
os.makedirs(output_dir)

loss_dict = {"epochs": [],
"train_loss_rec_img": [],
"train_loss_rec_gate": [],
"train_loss_kl": [],
"test_loss_rec_img": [],
"test_loss_rec_gate": [],
"test_loss_kl": []}
loss_list = []
# train
print('Start training ...')
mode = 0
Expand All @@ -197,12 +217,32 @@ def test(img_gt, gate_gt, mode):
tf.summary.scalar('test_loss_rec_img', test_loss_rec_img.result(), step=epoch)
tf.summary.scalar('test_loss_rec_gate', test_loss_rec_gate.result(), step=epoch)
tf.summary.scalar('test_loss_kl', test_loss_kl.result(), step=epoch)
print('Epoch {} | TRAIN: L_img: {}, L_gate: {}, L_kl: {}, L_tot: {} | TEST: L_img: {}, L_gate: {}, L_kl: {}, L_tot: {}'
.format(epoch, train_loss_rec_img.result(), train_loss_rec_gate.result(), train_loss_kl.result(),
train_loss_rec_img.result()+train_loss_rec_gate.result()+train_loss_kl.result(),
test_loss_rec_img.result(), test_loss_rec_gate.result(), test_loss_kl.result(),
test_loss_rec_img.result() + test_loss_rec_gate.result() + test_loss_kl.result()
))
reset_metrics() # reset all the accumulators of metrics

print(
'Epoch {} \n| TRAIN: L_img: {}, L_gate: {}, L_kl: {}, L_tot: {} \n| TEST: L_img: {}, L_gate: {}, '
'L_kl: {}, L_tot: {} '
.format(epoch, train_loss_rec_img.result(), train_loss_rec_gate.result(), train_loss_kl.result(),
train_loss_rec_img.result() + train_loss_rec_gate.result() + train_loss_kl.result(),
test_loss_rec_img.result(), test_loss_rec_gate.result(), test_loss_kl.result(),
test_loss_rec_img.result() + test_loss_rec_gate.result() + test_loss_kl.result()
))

loss_dict["train_loss_rec_img"].append(train_loss_rec_img.result())
loss_dict["train_loss_rec_gate"].append(train_loss_rec_gate.result())
loss_dict["train_loss_kl"].append(train_loss_kl.result())
loss_dict["test_loss_rec_img"].append(test_loss_rec_img.result())
loss_dict["test_loss_rec_gate"].append(test_loss_rec_gate.result())
loss_dict["test_loss_kl"].append(test_loss_kl.result())

l = [epoch,
train_loss_rec_img.result(),
train_loss_rec_gate.result(),
train_loss_kl.result(),
test_loss_rec_img.result(),
test_loss_rec_gate.result(),
train_loss_kl.result()]
loss_list.append(l)
reset_metrics() # reset all the accumulators of metrics
np.savetxt("training_cmvae_losses.csv", loss_list, delimiter=",", fmt='% s')
# df = pd.DataFrame.from_dict(loss_dict)
# df.to_csv("training_results.csv", header= False, index= False)
print('End of training')
10 changes: 10 additions & 0 deletions cmvae/tst.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
0.0,0.23140303790569305,0.3254210650920868,3.1158947944641113,0.17979291081428528,0.27923378348350525,3.1158947944641113
1.0,0.15479321777820587,0.3123328685760498,2.1645729541778564,0.1421280950307846,0.27504462003707886,2.1645729541778564
2.0,0.13745853304862976,0.3074854612350464,1.2580513954162598,0.1331387311220169,0.27260622382164,1.2580513954162598
3.0,0.13198621571063995,0.3034461438655853,0.4452038109302521,0.13156656920909882,0.2675051987171173,0.4452038109302521
4.0,0.12863336503505707,0.29639697074890137,0.10872875154018402,0.12640348076820374,0.2596818208694458,0.10872875154018402
5.0,0.12491415441036224,0.2890583276748657,0.05528435856103897,0.12311731278896332,0.25211650133132935,0.05528435856103897
6.0,0.12222196906805038,0.28153902292251587,0.049212679266929626,0.12009679526090622,0.2448357343673706,0.049212679266929626
7.0,0.12039639800786972,0.2744167149066925,0.052575163543224335,0.11832506954669952,0.2389456182718277,0.052575163543224335
8.0,0.11916572600603104,0.26751983165740967,0.03737931698560715,0.1169983297586441,0.23230648040771484,0.03737931698560715
9.0,0.11809031665325165,0.2609376609325409,0.04091877117753029,0.11578226834535599,0.22665441036224365,0.04091877117753029
Loading

0 comments on commit 592e8f3

Please sign in to comment.