Hi.
I commented out some of the code for the first training.
# training
for i in range(max_loop):
for j in range(max_in_loop):
idx=i+j
result_1[idx] = model.train_on_batch(x_data_keras, y_data_keras)
result_2[idx] = model.test_on_batch(x_data_keras, y_data_keras)
result_3[idx] = model.evaluate(x_data_keras, y_data_keras, verbose = 0)
print(result_1)
|
v
# training
for i in range(max_loop):
for j in range(max_in_loop):
idx=i+j
result_1[idx] = model.train_on_batch(x_data_keras, y_data_keras)
#result_2[idx] = model.test_on_batch(x_data_keras, y_data_keras)
#result_3[idx] = model.evaluate(x_data_keras, y_data_keras, verbose = 0)
print(result_1)
The whole source code is below:
import os
os.environ["PYTHONHASHSEED"]=str(1234)
import numpy as np
import unittest
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense, Activation, Dropout
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.models import load_model
import tensorflow as tf
import random as python_random
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(123)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
python_random.seed(123)
# The below set_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/random/set_seed
tf.random.set_seed(1234)
# See https://github.com/tensorflow/tensorflow/issues/31149
initializer = tf.keras.initializers.GlorotUniform(seed=42)
def get_model():
model = Sequential()
model.add(Input(shape=(3,)))
model.add(BatchNormalization())
model.add(Dense(10,kernel_initializer=initializer))
model.add(Activation('relu'))
model.add(Dense(4,kernel_initializer=initializer))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
return model
x_data = [
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 1.0],
]
y_data = [
[0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0],
]
x_data_keras = np.array(x_data)
y_data_keras = np.array(y_data)
max_loop = 1
max_in_loop = 10
model = get_model()
result_1={}
result_2={}
result_3={}
# training
for i in range(max_loop):
for j in range(max_in_loop):
idx=i+j
result_1[idx] = model.train_on_batch(x_data_keras, y_data_keras)
#result_2[idx] = model.test_on_batch(x_data_keras, y_data_keras)
#result_3[idx] = model.evaluate(x_data_keras, y_data_keras, verbose = 0)
print(result_1)
model = get_model()
result_1_1 = {}
result_2_2 = {}
result_3_3 = {}
# training
for i in range(max_loop):
for j in range(max_in_loop):
idx=i+j
result_1_1[idx] = model.train_on_batch(x_data_keras, y_data_keras)
#result_2_2[idx] = model.test_on_batch(x_data_keras, y_data_keras)
result_3_3[idx] = model.evaluate(x_data_keras, y_data_keras, verbose = 0)
print(result_1_1)
case = unittest.TestCase()
case.assertDictEqual(result_1,result_1_1)
Dump result
AssertionError: {0: 0[20 chars]: 0.730724573135376, 2: 0.7224830389022827, 3:[15
3 chars]1531} != {0: 0[20 chars]: 0.7113834619522095, 2: 0.704352080821991, 3:[1
51 chars]1235}
{0: 0.7390658855438232,
- 1: 0.730724573135376,
- 2: 0.7224830389022827,
- 3: 0.7143421769142151,
- 4: 0.7063030004501343,
- 5: 0.6983664035797119,
- 6: 0.6905329823493958,
- 7: 0.6828033328056335,
- 8: 0.6751779317855835,
- 9: 0.6676561236381531}
+ 1: 0.7113834619522095,
+ 2: 0.704352080821991,
+ 3: 0.6973998546600342,
+ 4: 0.6905273199081421,
+ 5: 0.683735191822052,
+ 6: 0.6770237684249878,
+ 7: 0.6703934669494629,
+ 8: 0.663844645023346,
+ 9: 0.6573768854141235}
The loss differs depending on whether the evaluate () is inserted or not.
Why does this happen?
BRs
take hamster