Issue
I am getting the following error:
KeyError Traceback (most recent call last)
<ipython-input-254-f01ba8163f7d> in <module>
1 out_batch = NBatchLogger(display=1000)
2 model.fit(X_train, Y_train, epochs=1000, batch_size=250,verbose = 0,
----> 3 callbacks=[out_batch])
1 frames
<ipython-input-247-55bb2505c62e> in on_batch_end(self, batch, logs)
14 def on_batch_end(self, batch, logs={}):
15 self.step += 1
---> 16 for k in self.params['metrics']:
17 if k in logs:
18 self.metric_cache[k] = self.metric_cache.get(k, 0) + logs[k]
KeyError: 'metrics
Here is my code:
class PrintProgress(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('Epoch', epoch)
class NBatchLogger(keras.callbacks.Callback):
"""
A Logger that log average performance per `display` steps.
"""
def __init__(self, display):
self.step = 0
self.display = display
self.metric_cache = {}
def on_batch_end(self, batch, logs={}):
self.step += 1
for k in self.params['metrics']:
if k in logs:
self.metric_cache[k] = self.metric_cache.get(k, 0) + logs[k]
if self.step % self.display == 0:
metrics_log = ''
for (k, v) in self.metric_cache.items():
val = v / self.display
if abs(val) > 1e-3:
metrics_log += ' - %s: %.4f' % (k, val)
else:
metrics_log += ' - %s: %.4e' % (k, val)
print('step: {}/{} ... {}'.format(self.step,
self.params['steps'],
metrics_log))
self.metric_cache.clear()
tf.keras.backend.clear_session(
)
When trying to compute confusion matrix
confusion_matrix(np.argmax(Y_train, axis = 1), pred_train)
I got the following error:
ValueError: Classification metrics can't handle a mix of multiclass and continuous-multioutput targets
Solution
The callback's params only have values used in fit call (in this case verbose, epochs and steps). If you want to access model's metrics from within callback, you need to set the model for callback with
out_batch.set_model(model)
And then access it with self.model.metrics
inside callback's method.
Here is your Callback implementation with fixes:
class NBatchLogger(keras.callbacks.Callback):
"""
A Logger that log average performance per `display` steps.
"""
def __init__(self, display):
self.step = 0
self.display = display
self.metric_cache = {}
def on_batch_end(self, batch, logs=None):
self.step += 1
for k in self.model.metrics:
if k.name not in self.metric_cache.keys():
self.metric_cache[k.name] = 0.0
self.metric_cache[k.name] += logs.get(k.name)
if self.step % self.display == 0:
metrics_log = ''
for (k, v) in self.metric_cache.items():
val = v / self.display
if abs(val) > 1e-3:
metrics_log += ' - %s: %.4f' % (k, val)
else:
metrics_log += ' - %s: %.4e' % (k, val)
print('step: {}/{} ... {}'.format(self.step,
self.params['steps'],
metrics_log))
self.metric_cache.clear()
And output I got:
step: 10/240 ... - loss: 2.2448 - accuracy: 0.2207
step: 20/240 ... - loss: 2.0735 - accuracy: 0.3876
step: 30/240 ... - loss: 1.8155 - accuracy: 0.4899
step: 40/240 ... - loss: 1.5696 - accuracy: 0.5502
step: 50/240 ... - loss: 1.3779 - accuracy: 0.6002
step: 60/240 ... - loss: 1.2252 - accuracy: 0.6412
EDIT:
To fix the error ValueError: Classification metrics can't handle a mix of multiclass and continuous-multioutput targets
with confusion matrix, you should change
confusion_matrix(np.argmax(y_train, axis=1), pred_train)
to
confusion_matrix(np.argmax(y_train, axis=1), np.argmax(pred_train, axis=1))
because you need to convert predicted labels same way as train labels.
Answered By - maciek97x
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.