Issue
I used a convolutional neural network (CNN) for training a dataset and I want to plotting accuracy for this. Before, I tried to use matplotlib but I couldn't success so how can I plot accuracy for this code?
from matplotlib import pyplot
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
tf.compat.v1.reset_default_graph()
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 4, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model yuklendi!')
train = train_data[:-200]
test = train_data[-200:]
X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
test_y = [i[1] for i in test]
model.fit({'input': X}, {'targets': Y}, n_epoch=1, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=40, show_metric=True, run_id=MODEL_NAME)
model.save(MODEL_NAME)
Solution
🧸💬 Thank you, I fall asleep last night after feeding my cats and them tose works. You cannot export the hist from model.fit in tflearn but you may use helpers or callbacks.
[ Sample ]:
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import os
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import matplotlib.pyplot as plt
import numpy as np
# add training - 1 DNN ==============================================================================================
tflearn.init_graph(num_cores=1, gpu_memory_fraction=1.0) # num_cores=8, gpu_memory_fraction=0.5
accuracy = [ ]
loss = [ ]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Class
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MonitorCallback(tflearn.callbacks.Callback):
def __init__(self, api):
self.my_monitor_api = api
#def on_batch_end(training_state, snapshot, log={}):
def on_sub_batch_end(self, training_state, train_index=0):
try:
accuracy.append( str(training_state.acc_value) )
loss.append( str(training_state.loss_value) )
except Exception as e:
print(str(e))
monitorCallback = MonitorCallback(tflearn)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = 'F:\\datasets\\downloads\\sample\\cats_dogs\\training'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
MODEL_NAME = 'DEKDEE'
images = [ 'F:\\datasets\\downloads\\sample\\cats_dogs\\training\\train\\cats\\01 32x32.jpg',
'F:\\datasets\\downloads\\sample\\cats_dogs\\training\\train\\cats\\02 32x32.jpg',
'F:\\datasets\\downloads\\sample\\cats_dogs\\training\\train\\cats\\03 32x32.jpg',
'F:\\datasets\\downloads\\sample\\cats_dogs\\training\\train\\cats\\04 32x32.jpg',
'F:\\datasets\\downloads\\sample\\cats_dogs\\training\\train\\cats\\05 32x32.jpg',
'F:\\datasets\\downloads\\sample\\cats_dogs\\training\\train\\cats\\06 32x32.jpg',
'F:\\datasets\\downloads\\sample\\cats_dogs\\training\\train\\cats\\07 32x32.jpg' ]
labels = [ ]
labels.append( [ 1, 0 ,0 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 1 ,0 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,1 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,1 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,1 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,0 ,1 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,0 ,0 ,1 ] )
list_image = [ ]
for item in images :
list_image.append( plt.imread( item ) )
convnet = input_data(shape=[None, 32, 32, 3], name='input')
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 7, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=0.0001, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
hist = model.fit({'input': list_image}, {'targets': labels}, n_epoch=15, validation_set=({'input': list_image}, {'targets': labels}), snapshot_step=40, show_metric=True, run_id=MODEL_NAME, callbacks=monitorCallback)
print( hist )
plt.plot( accuracy )
plt.plot( loss )
plt.show()
plt.close()
input('...')
[ Output ]:
--
Training Step: 7 | total loss: [1m[32m1.86701[0m[0m | time: 1.035s
| Adam | epoch: 007 | loss: 1.86701 - acc: 0.4050 | val_loss: 1.66956 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 8 | total loss: [1m[32m1.82322[0m[0m | time: 1.038s
| Adam | epoch: 008 | loss: 1.82322 - acc: 0.4183 | val_loss: 1.63044 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 9 | total loss: [1m[32m1.72288[0m[0m | time: 1.031s
| Adam | epoch: 009 | loss: 1.72288 - acc: 0.4994 | val_loss: 1.59387 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 10 | total loss: [1m[32m1.67410[0m[0m | time: 1.018s
| Adam | epoch: 010 | loss: 1.67410 - acc: 0.3925 | val_loss: 1.54277 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 11 | total loss: [1m[32m1.57482[0m[0m | time: 1.019s
| Adam | epoch: 011 | loss: 1.57482 - acc: 0.4773 | val_loss: 1.48715 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 12 | total loss: [1m[32m1.59550[0m[0m | time: 1.021s
| Adam | epoch: 012 | loss: 1.59550 - acc: 0.5196 | val_loss: 1.43774 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 13 | total loss: [1m[32m1.68833[0m[0m | time: 1.027s
| Adam | epoch: 013 | loss: 1.68833 - acc: 0.4194 | val_loss: 1.39414 - val_acc: 0.8571 -- iter: 7/7
--
Training Step: 14 | total loss: [1m[32m1.59238[0m[0m | time: 1.020s
| Adam | epoch: 014 | loss: 1.59238 - acc: 0.4816 | val_loss: 1.34550 - val_acc: 0.8571 -- iter: 7/7
--
Training Step: 15 | total loss: [1m[32m1.52356[0m[0m | time: 1.031s
| Adam | epoch: 015 | loss: 1.52356 - acc: 0.5167 | val_loss: 1.29574 - val_acc: 1.0000 -- iter: 7/7
--
None
Answered By - Jirayu Kaewprateep
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.