hwberry2 commited on
Commit
49e8cd4
·
1 Parent(s): 2162cdc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -47
app.py CHANGED
@@ -21,60 +21,67 @@ with gr.Blocks() as demo:
21
  # Train, evaluate and test a ML
22
  # image classification model for
23
  # clothes images
24
- def modelTraining(img):
25
- class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
26
- 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
 
 
 
27
 
28
- # Normalize the pixel values
29
- img = np.array(img) / 255.0
30
-
31
- # clothing dataset
32
- mnist = tf.keras.datasets.mnist
33
-
34
- #split the training data in to a train/test sets
35
- (x_train, y_train), (x_test, y_test) = mnist.load_data()
36
- x_train, x_test = x_train / 255.0, x_test / 255.0
37
-
38
- # create the neural net layers
39
- model = tf.keras.models.Sequential([
40
- tf.keras.layers.Flatten(input_shape=(28, 28)),
41
- tf.keras.layers.Dense(128, activation='relu'),
42
- tf.keras.layers.Dropout(0.2),
43
- tf.keras.layers.Dense(10)
44
- ])
45
-
46
- #make a post-training predition on the
47
- #training set data
48
- predictions = model(x_train[:1]).numpy()
49
-
50
- # converts the logits into a probability
51
- tf.nn.softmax(predictions).numpy()
52
-
53
- #create and train the loss function
54
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
55
- loss_fn(y_train[:1], predictions).numpy()
56
-
57
- # compile the model with the loss function
58
- model.compile(optimizer='adam',
59
- loss=loss_fn,
60
- metrics=['accuracy'])
61
-
62
- # train the model - 5 runs
63
- # evaluate the model on the test set
64
- model.fit(x_train, y_train, epochs=5)
65
- test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
66
- post_train_results = f"Test accuracy: {test_acc} Test Loss: {test_loss}"
67
- print(post_train_results)
68
 
69
- # create the final model for production
70
- probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
 
 
 
 
 
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  input_array = np.expand_dims(img, axis=0) # add an extra dimension to represent the batch size
73
 
74
  # Make a prediction using the model
75
  prediction = probability_model.predict(input_array)
76
- predicted_label = class_names[np.argmax(prediction)]
77
  # Postprocess the prediction and return it
 
 
78
  return predicted_label
79
 
80
 
@@ -83,8 +90,10 @@ with gr.Blocks() as demo:
83
  with gr.Column(scale=2):
84
  image_data = gr.Image(label="Upload Image", type="numpy", image_mode="L", shape=[28,28], invert_colors=True)
85
  with gr.Column(scale=1):
 
 
86
  model_prediction = gr.Text(label="Model Prediction", interactive=False)
87
- image_data.change(modelTraining, image_data, model_prediction)
88
 
89
 
90
  # creates a local web server
 
21
  # Train, evaluate and test a ML
22
  # image classification model for
23
  # clothes images
24
+
25
+ class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
26
+ 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
27
+
28
+ # Normalize the pixel values
29
+ img = np.array(img) / 255.0
30
 
31
+ # clothing dataset
32
+ mnist = tf.keras.datasets.mnist
33
+
34
+ #split the training data in to a train/test sets
35
+ (x_train, y_train), (x_test, y_test) = mnist.load_data()
36
+ x_train, x_test = x_train / 255.0, x_test / 255.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
+ # create the neural net layers
39
+ model = tf.keras.models.Sequential([
40
+ tf.keras.layers.Flatten(input_shape=(28, 28)),
41
+ tf.keras.layers.Dense(128, activation='relu'),
42
+ tf.keras.layers.Dropout(0.2),
43
+ tf.keras.layers.Dense(10)
44
+ ])
45
 
46
+ #make a post-training predition on the
47
+ #training set data
48
+ predictions = model(x_train[:1]).numpy()
49
+
50
+ # converts the logits into a probability
51
+ tf.nn.softmax(predictions).numpy()
52
+
53
+ #create and train the loss function
54
+ loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
55
+ loss_fn(y_train[:1], predictions).numpy()
56
+
57
+ # compile the model with the loss function
58
+ model.compile(optimizer='adam',
59
+ loss=loss_fn,
60
+ metrics=['accuracy'])
61
+
62
+ # train the model - 5 runs
63
+ # evaluate the model on the test set
64
+ model.fit(x_train, y_train, epochs=5)
65
+ test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
66
+ post_train_results = f"Test accuracy: {test_acc} Test Loss: {test_loss}"
67
+ print(post_train_results)
68
+
69
+ # create the final model for production
70
+ probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
71
+
72
+
73
+ def classifyImage(img):
74
+ #global probability_model
75
+ #global class_names
76
+
77
  input_array = np.expand_dims(img, axis=0) # add an extra dimension to represent the batch size
78
 
79
  # Make a prediction using the model
80
  prediction = probability_model.predict(input_array)
81
+
82
  # Postprocess the prediction and return it
83
+ predicted_label = class_names[np.argmax(prediction)]
84
+
85
  return predicted_label
86
 
87
 
 
90
  with gr.Column(scale=2):
91
  image_data = gr.Image(label="Upload Image", type="numpy", image_mode="L", shape=[28,28], invert_colors=True)
92
  with gr.Column(scale=1):
93
+ train_test_btn = gr.Button(value="Train/Test")
94
+ model_performance = gr.Text(label="Model Performance Results", interactive=False)
95
  model_prediction = gr.Text(label="Model Prediction", interactive=False)
96
+ image_data.change(classifyImage, image_data, model_prediction)
97
 
98
 
99
  # creates a local web server