Spaces:
Running
Running
Commit
·
1038417
1
Parent(s):
0e85a8a
Update utils.py
Browse files
utils.py
CHANGED
|
@@ -1,64 +1,26 @@
|
|
| 1 |
-
import tensorflow as tf
|
| 2 |
-
from tensorflow.keras.models import Sequential
|
| 3 |
-
from tensorflow.keras.layers import Conv2D, Flatten, MaxPooling2D, Dense, Dropout, SpatialDropout2D
|
| 4 |
-
from tensorflow.keras.losses import sparse_categorical_crossentropy, binary_crossentropy
|
| 5 |
-
from tensorflow.keras.optimizers import Adam
|
| 6 |
-
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
| 7 |
-
import numpy as np
|
| 8 |
from PIL import Image
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
|
| 11 |
-
train = 'Dataset'
|
| 12 |
-
train_generator = ImageDataGenerator(rescale=1/255)
|
| 13 |
-
|
| 14 |
-
train_generator = train_generator.flow_from_directory(train,
|
| 15 |
-
target_size=(256, 256),
|
| 16 |
-
batch_size=32,
|
| 17 |
-
class_mode='sparse')
|
| 18 |
-
labels = train_generator.class_indices
|
| 19 |
-
labels = dict((v, k) for k, v in labels.items())
|
| 20 |
-
|
| 21 |
-
return labels
|
| 22 |
-
|
| 23 |
-
data_augmentation = tf.keras.Sequential([
|
| 24 |
-
tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1),
|
| 25 |
-
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
|
| 26 |
-
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
|
| 27 |
-
tf.keras.layers.experimental.preprocessing.RandomZoom(0.2)
|
| 28 |
-
], name='data_augmentation')
|
| 29 |
-
|
| 30 |
-
#Instantiating the base model
|
| 31 |
-
input_shape = (256,256,3)
|
| 32 |
-
base_model = tf.keras.applications.ResNet50V2(include_top=False, input_shape=input_shape)
|
| 33 |
-
|
| 34 |
-
#Making the layers of the model trainable
|
| 35 |
-
base_model.trainable = True
|
| 36 |
|
| 37 |
-
def
|
| 38 |
-
|
| 39 |
-
img = Image.open(img_path_or_img)
|
| 40 |
-
elif isinstance(img_path_or_img, Image.Image): # Check if input is already an Image object
|
| 41 |
-
img = img_path_or_img
|
| 42 |
-
else:
|
| 43 |
-
raise ValueError("Input must be a string path or a PIL Image object.")
|
| 44 |
-
|
| 45 |
img = img.resize((256, 256))
|
| 46 |
img_array = np.array(img)
|
|
|
|
| 47 |
return img_array
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
]
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
loss='sparse_categorical_crossentropy',
|
| 60 |
-
optimizer=tf.keras.optimizers.Adam(learning_rate),
|
| 61 |
-
metrics=['accuracy']
|
| 62 |
-
)
|
| 63 |
|
| 64 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from PIL import Image
|
| 2 |
+
import numpy as np
|
| 3 |
+
from Downloading_model import model_download
|
| 4 |
|
| 5 |
+
labels = ['cardboard', 'glass', 'metal', 'paper', 'plastic', 'trash']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
+
def preprocess_image(img_path):
|
| 8 |
+
img = Image.open(img_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
img = img.resize((256, 256))
|
| 10 |
img_array = np.array(img)
|
| 11 |
+
img_array = np.expand_dims(img_array, axis=0)
|
| 12 |
return img_array
|
| 13 |
|
| 14 |
+
# Function to classify the garbage
|
| 15 |
+
def classify_garbage(img_path, model):
|
| 16 |
+
processed_img = preprocess_image(img_path)
|
| 17 |
+
prediction = model.predict(processed_img)
|
| 18 |
+
|
| 19 |
+
class_labels = ["cardboard", "glass", "metal", "paper", "plastic", "trash"]
|
| 20 |
+
predicted_class = np.argmax(prediction, axis=1)[0]
|
| 21 |
+
classification_result = class_labels[predicted_class]
|
| 22 |
|
| 23 |
+
# Get the confidence (probability) of the predicted class
|
| 24 |
+
confidence = prediction[0][predicted_class] * 100 # Convert probability to percentage
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
return classification_result, confidence
|