Issue
i am triying to train a model with tensorflow. i have a custom loss function and ste the batch size to 1960 but the fit only seems to be giving my loss func one value at atime. my loss function processes the hole batch though. code:
import tensorflow as tf
from tensorflow import keras
import math
import numpy as np
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
chunksize = 40
stepsize = 1961
x = tf.data.Dataset.from_tensor_slices(np.random.rand(9000, 40, 7))
y = tf.data.Dataset.from_tensor_slices(np.random.rand(9000, 40, 7))
dataset = tf.data.Dataset.zip((x, y)).batch(stepsize - 1)
def Generator():
model_m = keras.models.Sequential()
model_m.add(tf.keras.layers.Conv1D(3, 1, input_shape=(40, 7)))
model_m.add(tf.compat.v1.keras.layers.CuDNNLSTM(units=50, return_sequences=True))
model_m.add(Dropout(0.2))
model_m.add(tf.compat.v1.keras.layers.CuDNNLSTM(units=50,return_sequences=True))
model_m.add(Dropout(0.2))
model_m.add(tf.compat.v1.keras.layers.CuDNNLSTM(units=50,return_sequences=True))
model_m.add(Dropout(0.2))
model_m.add(tf.compat.v1.keras.layers.CuDNNLSTM(units=50))
model_m.add(Dropout(0.2))
model_m.add(Dense(units=1, activation="sigmoid"))
return model_m
model = Generator()
def generator_loss(target, genor_output1):
dat = tf.cast(tf.reshape(target, (len(target), 1)), tf.float32)
mult = tf.reduce_sum(dat)
dat = tf.math.multiply(genor_output1, dat)
dat2 = tf.reduce_sum(dat)
dat2 = tf.math.divide(tf.math.add(tf.math.add(tf.math.abs(mult), 1), mult), tf.math.add(tf.math.add(tf.math.abs(mult), 1), dat2))
return dat2, dat
generator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.00007, epsilon=2e-4, beta_1=0.5)
model.compile(loss=generator_loss, optimizer="Adam")
model.fit(dataset, epochs=100, batch_size=(stepsize - 1))
also why am i getting this error?
ValueError: Shapes must be equal rank, but are 0 and 2
From merging shape 0 with other shapes. for '{{node generator_loss/weighted_loss/packed}} = Pack[N=2, T=DT_FLOAT, axis=0](generator_loss/truediv, generator_loss/Mul)' with input shapes: [], [?,1].
Solution
π§Έπ¬ Hi, it is simple you need to create valid pairs of training data similar to the random functions and create a dataset with data and labels.
π§Έπ¬ See the fixed custom loss function and a sample of them from the website and codes example. Loss functions
Sample: π§Έπ¬ Target layer name and fixed function name mismatch because of the versions and usage. π§π¬ Seamlessly works by its pinpoints, matching of names can cause different pains.
import tensorflow as tf
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
stepsize = 1961
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def Generator():
model_m = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(3, 1, input_shape=(40, 7)),
tf.compat.v1.keras.layers.CuDNNLSTM(units=50, return_sequences=True),
tf.keras.layers.Dropout(0.2),
tf.compat.v1.keras.layers.CuDNNLSTM(units=50,return_sequences=True),
tf.keras.layers.Dropout(0.2),
tf.compat.v1.keras.layers.CuDNNLSTM(units=50,return_sequences=True),
tf.keras.layers.Dropout(0.2),
tf.compat.v1.keras.layers.CuDNNLSTM(units=50),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=1, activation="sigmoid")
])
return model_m
class MeanSquaredError(tf.keras.losses.Loss):
def call(self, y_true, y_pred):
return tf.reduce_mean(tf.math.square(y_pred - y_true), axis=-1)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
g = tf.random.Generator.from_seed(1234)
g.reset_from_seed( 1235 )
data1 = tf.random.uniform( shape=(9000, 1, 40, 7), minval=0, maxval=6, dtype=tf.dtypes.int64,seed=1235, name="random" ).numpy()
g.reset_from_seed( 1237 )
data2 = tf.random.uniform( shape=(9000, 1, 1), minval=0, maxval=6, dtype=tf.dtypes.int64,seed=1237, name="random" ).numpy()
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(data1, dtype=tf.int64), shape=(9000, 1, 40, 7), dtype=tf.int64),tf.constant(data2, shape=(9000, 1, 1), dtype=tf.int64)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Working
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = Generator()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def generator_loss(target, genor_output1):
dat = tf.cast(tf.reshape(target, (len(target), 1)), tf.float32)
mult = tf.math.reduce_sum(dat, axis=0)
dat = tf.math.multiply(genor_output1, dat)
dat2 = tf.math.reduce_sum(dat, axis=0)
dat2 = tf.math.divide(tf.math.add(tf.math.add(tf.math.abs(mult), 1), mult), tf.math.add(tf.math.add(tf.math.abs(mult), 1), dat2))
return dat2
# return dat2, dat
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=generator_loss, metrics=['accuracy'])
history = model.fit( dataset, batch_size=100, epochs=50 )
Output:
4200/9000 [=============>................] - ETA: 1:37 - loss: 1.0252 - accuracy: 0.1660
Answered By - Jirayu Kaewprateep
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.