Issue
How do you get the "actual" shape of a tensor at training time? e.g.,
(None, 64) -> (128, 64)
In other words, at training time, I get a shape like (None, 64)
where None
means the first dimension of the tensor is dynamic w.r.t to the input size, and 64
is an example value for the second dimension. I assume at training time, the "actual" size of that tensor is known to the framework, so I am wondering how/if I can get the actual size of the tensor, where None
is evaluated to the train/test/eval dataset size. Hence, I am interested to get (128, 64)
instead of (None, 64)
where 128
is the size of the input.
Please consider the following simplified code example.
class ALayer(tensorflow.keras.layers.Layer):
def call(self, inputs):
features = tf.matmul(inputs, self.kernel) + self.bias
# These are the different approaches I've tried.
print(features.shape)
# This prints: (None, 64)
print(tf.shape(features)
# This prints: Tensor("ALayer/Shape:0", shape=(2,), dtype=int32)
return features
input_layer = layers.Input(input_dim)
x = ALayer()([input_layer])
x = layers.Dense(1)(x)
model = keras.Model(inputs=[input_layer], outputs=[x])
model.compile()
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, (y_train)))
val_dataset = tf.data.Dataset.from_tensor_slices((X_val, (y_val)))
model.fit(train_dataset, validation_data=val_dataset)
Solution
You should use tf.print
since eager execution is activated by default in TF 2.7:
import tensorflow as tf
class ALayer(tf.keras.layers.Layer):
def __init__(self, units=32):
super(ALayer, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
features = tf.matmul(inputs, self.w) + self.b
tf.print('Features shape -->', tf.shape(features), '\n')
return features
input_layer = tf.keras.layers.Input(shape=(10,))
x = ALayer(10)(input_layer)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=[input_layer], outputs=[x])
model.compile(loss=tf.keras.losses.BinaryCrossentropy())
X_train, y_train = tf.random.normal((64, 10)), tf.random.uniform((64,), maxval=2, dtype=tf.int32)
X_val, y_val = tf.random.normal((64, 10)), tf.random.uniform((64,), maxval=2, dtype=tf.int32)
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).batch(32)
val_dataset = tf.data.Dataset.from_tensor_slices((X_val, y_val)).batch(32)
model.fit(train_dataset, validation_data=val_dataset, epochs=1, verbose=0)
Features shape --> [32 10]
Features shape --> [32 10]
Features shape --> [32 10]
Features shape --> [32 10]
<keras.callbacks.History at 0x7fab3ce15910>
Answered By - AloneTogether
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.