diff --git a/divide_and_conquer/__init__.py b/boolean_algebra/divide_and_conquer/__init__.py similarity index 100% rename from divide_and_conquer/__init__.py rename to boolean_algebra/divide_and_conquer/__init__.py diff --git a/divide_and_conquer/closest_pair_of_points.py b/boolean_algebra/divide_and_conquer/closest_pair_of_points.py similarity index 100% rename from divide_and_conquer/closest_pair_of_points.py rename to boolean_algebra/divide_and_conquer/closest_pair_of_points.py diff --git a/divide_and_conquer/convex_hull.py b/boolean_algebra/divide_and_conquer/convex_hull.py similarity index 100% rename from divide_and_conquer/convex_hull.py rename to boolean_algebra/divide_and_conquer/convex_hull.py diff --git a/divide_and_conquer/heaps_algorithm.py b/boolean_algebra/divide_and_conquer/heaps_algorithm.py similarity index 100% rename from divide_and_conquer/heaps_algorithm.py rename to boolean_algebra/divide_and_conquer/heaps_algorithm.py diff --git a/divide_and_conquer/heaps_algorithm_iterative.py b/boolean_algebra/divide_and_conquer/heaps_algorithm_iterative.py similarity index 100% rename from divide_and_conquer/heaps_algorithm_iterative.py rename to boolean_algebra/divide_and_conquer/heaps_algorithm_iterative.py diff --git a/divide_and_conquer/inversions.py b/boolean_algebra/divide_and_conquer/inversions.py similarity index 100% rename from divide_and_conquer/inversions.py rename to boolean_algebra/divide_and_conquer/inversions.py diff --git a/divide_and_conquer/kth_order_statistic.py b/boolean_algebra/divide_and_conquer/kth_order_statistic.py similarity index 100% rename from divide_and_conquer/kth_order_statistic.py rename to boolean_algebra/divide_and_conquer/kth_order_statistic.py diff --git a/divide_and_conquer/max_difference_pair.py b/boolean_algebra/divide_and_conquer/max_difference_pair.py similarity index 100% rename from divide_and_conquer/max_difference_pair.py rename to boolean_algebra/divide_and_conquer/max_difference_pair.py diff --git a/divide_and_conquer/max_subarray.py b/boolean_algebra/divide_and_conquer/max_subarray.py similarity index 100% rename from divide_and_conquer/max_subarray.py rename to boolean_algebra/divide_and_conquer/max_subarray.py diff --git a/divide_and_conquer/mergesort.py b/boolean_algebra/divide_and_conquer/mergesort.py similarity index 100% rename from divide_and_conquer/mergesort.py rename to boolean_algebra/divide_and_conquer/mergesort.py diff --git a/divide_and_conquer/peak.py b/boolean_algebra/divide_and_conquer/peak.py similarity index 100% rename from divide_and_conquer/peak.py rename to boolean_algebra/divide_and_conquer/peak.py diff --git a/divide_and_conquer/power.py b/boolean_algebra/divide_and_conquer/power.py similarity index 100% rename from divide_and_conquer/power.py rename to boolean_algebra/divide_and_conquer/power.py diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/boolean_algebra/divide_and_conquer/strassen_matrix_multiplication.py similarity index 100% rename from divide_and_conquer/strassen_matrix_multiplication.py rename to boolean_algebra/divide_and_conquer/strassen_matrix_multiplication.py diff --git a/machine_learning/Cnn.py b/machine_learning/Cnn.py new file mode 100644 index 000000000000..0e90e928b4ab --- /dev/null +++ b/machine_learning/Cnn.py @@ -0,0 +1,89 @@ +import tensorflow as tf +from tensorflow.keras import layers, models +from tensorflow.keras.callbacks import EarlyStopping +from tensorflow.keras.preprocessing.image import ImageDataGenerator +import numpy as np +import matplotlib.pyplot as plt + +# Load and preprocess dataset (using MNIST dataset as an example) +mnist = tf.keras.datasets.mnist +(train_images, train_labels), (test_images, test_labels) = mnist.load_data() + +# Reshape and normalize the data +train_images = train_images.reshape((60000, 28, 28, 1)).astype('float32') / 255 +test_images = test_images.reshape((10000, 28, 28, 1)).astype('float32') / 255 + +# Data Augmentation +datagen = ImageDataGenerator( + rotation_range=10, + width_shift_range=0.1, + height_shift_range=0.1, + zoom_range=0.1 +) +datagen.fit(train_images) + +# Model building +model = models.Sequential() + +# Adding Convolutional layers +model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) +model.add(layers.MaxPooling2D((2, 2))) + +model.add(layers.Conv2D(64, (3, 3), activation='relu')) +model.add(layers.MaxPooling2D((2, 2))) + +model.add(layers.Conv2D(64, (3, 3), activation='relu')) + +# Flattening and adding Dense layers +model.add(layers.Flatten()) +model.add(layers.Dense(64, activation='relu')) +model.add(layers.Dropout(0.5)) # Dropout to prevent overfitting + +# Output layer +model.add(layers.Dense(10, activation='softmax')) + +# Compile the model +model.compile(optimizer='adam', + loss='sparse_categorical_crossentropy', + metrics=['accuracy']) + +# Early stopping to avoid overfitting +early_stopping = EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True) + +# Train the model with data augmentation and early stopping +history = model.fit( + datagen.flow(train_images, train_labels, batch_size=32), + epochs=10, + validation_data=(test_images, test_labels), + callbacks=[early_stopping] +) + +# Plotting training and validation accuracy/loss +def plot_history(history): + acc = history.history['accuracy'] + val_acc = history.history['val_accuracy'] + loss = history.history['loss'] + val_loss = history.history['val_loss'] + + epochs_range = range(len(acc)) + + plt.figure(figsize=(12, 6)) + + # Plot accuracy + plt.subplot(1, 2, 1) + plt.plot(epochs_range, acc, label='Training Accuracy') + plt.plot(epochs_range, val_acc, label='Validation Accuracy') + plt.legend(loc='lower right') + plt.title('Training and Validation Accuracy') + + # Plot loss + plt.subplot(1, 2, 2) + plt.plot(epochs_range, loss, label='Training Loss') + plt.plot(epochs_range, val_loss, label='Validation Loss') + plt.legend(loc='upper right') + plt.title('Training and Validation Loss') + + plt.show() + +# Call the plot function to visualize training progress +plot_history(history)