it’slerrning machine but it will infinity reading images
train_data_dir = ‘C:\Users\92020\source\repos\exercise\ex.data\train’
test_data_dir = ‘C:\Users\92020\source\repos\exercise\ex.data\test’
Create an ImageDataGenerator for training data
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode=‘nearest’
)
Load the training data from the directory
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(64, 64), # Adjust the target size as needed
batch_size=32,
class_mode=‘categorical’, # Change this based on your problem
shuffle=True
)
class MyModel(Model):
def init(self):
super(MyModel, self).init()
self.conv1 = Conv2D(32, 3, activation=‘relu’)
self.flatten = Flatten()
self.d1 = Dense(128, activation=‘relu’)
self.d2 = Dense(train_generator.num_classes, activation=‘softmax’)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model = MyModel()
Define loss, optimizer, and metrics
loss_object = tf.keras.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
Define the number of classes based on your problem
num_classes = train_generator.num_classes
train_loss = tf.keras.metrics.Mean(name=‘train_loss’)
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name=‘train_accuracy’)
test_loss = tf.keras.metrics.Mean(name=‘test_loss’)
test_accuracy = tf.keras.metrics.CategoricalAccuracy(name=‘test_accuracy’)
Train step
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
for images, labels in train_generator:
train_step(images, labels)
##when it implement here ,it will infinity conduct