Spaces:
Runtime error
Runtime error
Commit
·
21f5a8b
1
Parent(s):
dbc734c
updating fast
Browse files
app.py
CHANGED
|
@@ -8,6 +8,99 @@ import requests
|
|
| 8 |
import json
|
| 9 |
from json import JSONEncoder
|
| 10 |
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
myControls = {
|
|
@@ -30,7 +123,7 @@ class NumpyEncoder(JSONEncoder):
|
|
| 30 |
if isinstance(obj, np.ndarray) :
|
| 31 |
return obj.tolist()
|
| 32 |
return JSONEncoder.default(self,obj)
|
| 33 |
-
|
| 34 |
|
| 35 |
|
| 36 |
def uploadFile() :
|
|
@@ -43,7 +136,7 @@ def uploadFile() :
|
|
| 43 |
payload = json.dumps(dataToSend)
|
| 44 |
|
| 45 |
|
| 46 |
-
r = requests.post("http://127.0.0.1:
|
| 47 |
return r
|
| 48 |
|
| 49 |
def saveStats(predictionStatus) :
|
|
@@ -57,35 +150,54 @@ def saveStats(predictionStatus) :
|
|
| 57 |
else :
|
| 58 |
d['PredictionStatus'] = 0
|
| 59 |
|
| 60 |
-
r = requests.post("http://127.0.0.1:
|
| 61 |
return r
|
| 62 |
|
| 63 |
-
|
| 64 |
def predict(imageToProcess):
|
| 65 |
global imageData
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
reply = "Nothing to display"
|
|
|
|
| 67 |
try :
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
| 73 |
messages = []
|
| 74 |
-
messages.append(
|
| 75 |
-
{"role": "user", "content": message},
|
| 76 |
-
)
|
| 77 |
-
chat = openai.ChatCompletion.create(
|
| 78 |
-
model="gpt-3.5-turbo", messages=messages
|
| 79 |
-
)
|
| 80 |
-
|
| 81 |
-
reply = chat.choices[0].message.content
|
| 82 |
-
except :
|
| 83 |
-
|
| 84 |
|
| 85 |
-
imageData = imageToProcess
|
| 86 |
-
print("Image Dimensions", imageData.height, imageData.width)
|
| 87 |
|
| 88 |
-
|
|
|
|
| 89 |
|
| 90 |
def submitFeedback(correctOrWrong, plantName, userData):
|
| 91 |
global dataToSend
|
|
@@ -107,12 +219,12 @@ def submitFeedback(correctOrWrong, plantName, userData):
|
|
| 107 |
|
| 108 |
saveStats(correctOrWrong)
|
| 109 |
|
| 110 |
-
with gr.Blocks() as
|
| 111 |
|
| 112 |
gr.Markdown(
|
| 113 |
"""
|
| 114 |
# AI based plant Disease Detection Application
|
| 115 |
-
|
| 116 |
"""
|
| 117 |
)
|
| 118 |
myControls["ImageInput"] = gr.Image(type="pil")
|
|
@@ -123,7 +235,7 @@ with gr.Blocks() as app :
|
|
| 123 |
myControls["AdditionalInfo"] = gr.TextArea(label='Additional Info')
|
| 124 |
controls.append(myControls["ResultControl"])
|
| 125 |
controls.append(myControls["AdditionalInfo"])
|
| 126 |
-
|
| 127 |
|
| 128 |
predictBtn = gr.Button(value='Predict')
|
| 129 |
predictBtn.click(predict, inputs=[myControls["ImageInput"]], outputs=controls)
|
|
@@ -138,10 +250,10 @@ with gr.Blocks() as app :
|
|
| 138 |
feedbackBtn = gr.Button(value='Submit Feedback')
|
| 139 |
feedbackBtn.click(submitFeedback, inputs =[myControls["PredictionSelection"], myControls["PlantName"], myControls["UserInput"]])
|
| 140 |
|
|
|
|
|
|
|
| 141 |
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
app.queue().launch()
|
|
|
|
| 8 |
import json
|
| 9 |
from json import JSONEncoder
|
| 10 |
from datetime import datetime
|
| 11 |
+
import requests
|
| 12 |
+
from os import listdir
|
| 13 |
+
import json
|
| 14 |
+
import os
|
| 15 |
+
import cv2
|
| 16 |
+
import joblib
|
| 17 |
+
from json import JSONEncoder
|
| 18 |
+
from keras.optimizers import Adam
|
| 19 |
+
from keras.preprocessing import image
|
| 20 |
+
from keras.preprocessing.image import img_to_array
|
| 21 |
+
from sklearn.preprocessing import MultiLabelBinarizer
|
| 22 |
+
from sklearn.model_selection import train_test_split
|
| 23 |
+
from datetime import datetime
|
| 24 |
+
from fastapi import FastAPI, Request, Response
|
| 25 |
+
from sklearn.metrics import precision_score, recall_score, f1_score
|
| 26 |
+
import prometheus_client as prom
|
| 27 |
+
# Load your trained model
|
| 28 |
+
app = FastAPI()
|
| 29 |
+
DEFAULT_IMAGE_SIZE = tuple((256, 256))
|
| 30 |
+
N_IMAGES = 100
|
| 31 |
+
# Define the model and its parameters
|
| 32 |
+
#model = joblib.load(filename="plant_leaf_diseases_model.keras", 'utf-8')
|
| 33 |
+
model = tf.keras.models.load_model('plant_leaf_diseases_model.keras')
|
| 34 |
+
#test_dir_img_file_path = os.path.getcwd("imagesfolder")
|
| 35 |
+
test_dir_img_file_path = os.path.join(os.getcwd(),"imagesfolder")
|
| 36 |
+
# Define the metrics
|
| 37 |
+
accuracy = prom.Gauge("accuracy", "precision score for random 50 test samples")
|
| 38 |
+
precision_metric = prom.Gauge("plant_precision", "precision score for random 50 test samples")
|
| 39 |
+
recall_metric = prom.Gauge("plant_recall_score", "recall score for random 50 test samples")
|
| 40 |
+
f1_metric = prom.Gauge("plant_f1_score", "F1 score for random 50 test samples")
|
| 41 |
+
|
| 42 |
+
# Define the function to make predictions
|
| 43 |
+
#def make_prediction(test_dir_img_file_path):
|
| 44 |
+
# # Load the test images
|
| 45 |
+
# image_list = []
|
| 46 |
+
# for plant_image in os.listdir(test_dir_img_file_path):
|
| 47 |
+
## if plant_image.endswith(".jpg") or plant_image.endswith(".JPG"):
|
| 48 |
+
# image_list.append(convert_image_to_array(os.path.join(test_dir_img_file_path, plant_image)))
|
| 49 |
+
# np_image_list = np.array(image_list, dtype=np.float16) / config.model_config.scaling_factor
|
| 50 |
+
#
|
| 51 |
+
# # Make predictions
|
| 52 |
+
# predictions = model.predict(np_image_list, verbose=0)
|
| 53 |
+
|
| 54 |
+
# Return the predictions
|
| 55 |
+
# return {"predictions": predictions, "version": _version}
|
| 56 |
+
def convert_image_to_array(image_dir):
|
| 57 |
+
try:
|
| 58 |
+
print(image_dir)
|
| 59 |
+
image = cv2.imread(image_dir)
|
| 60 |
+
if image is not None:
|
| 61 |
+
image = cv2.resize(image, DEFAULT_IMAGE_SIZE)
|
| 62 |
+
return img_to_array(image)
|
| 63 |
+
else :
|
| 64 |
+
return np.array([])
|
| 65 |
+
except Exception as e:
|
| 66 |
+
print(f"Error : {e}")
|
| 67 |
+
return None
|
| 68 |
+
# Define the function to update the metrics
|
| 69 |
+
x_test, y_test = train_test_split(test_dir_img_file_path)
|
| 70 |
+
def update_metrics():
|
| 71 |
+
# Load the test images
|
| 72 |
+
image_list = []
|
| 73 |
+
plant_disease_folder_list = listdir(test_dir_img_file_path)
|
| 74 |
+
for plant_disease_folder in plant_disease_folder_list:
|
| 75 |
+
plant_disease_image_list = listdir(f"{test_dir_img_file_path}/{plant_disease_folder}/")
|
| 76 |
+
for plant_image in plant_disease_image_list[:N_IMAGES]:
|
| 77 |
+
plant_image_directory = f"{test_dir_img_file_path}/{plant_disease_folder}/{image}"
|
| 78 |
+
if plant_image_directory.endswith(".jpg") or plant_image_directory.endswith(".JPG"):
|
| 79 |
+
image_list.append(convert_image_to_array(os.path.join(plant_image_directory)))
|
| 80 |
+
label_list.append(plant_disease_folder)
|
| 81 |
+
image_len = len(image_list)
|
| 82 |
+
print(f"Total number of images: {image_len}")
|
| 83 |
+
np_image_list = np.array(image_list, dtype=np.float16) / 255.0
|
| 84 |
+
# Make predictions
|
| 85 |
+
predictions = model.predict(np_image_list, verbose=0)
|
| 86 |
+
|
| 87 |
+
# Calculate the metrics
|
| 88 |
+
pred = np.argmax(predictions, axis=1)
|
| 89 |
+
y_test2 = np.argmax(y_test, axis=1)
|
| 90 |
+
accuracy = accuracy_score(y_true, y_pred)
|
| 91 |
+
precision = precision_score(y_test2, pred)
|
| 92 |
+
recall = recall_score(y_test2, predictions)
|
| 93 |
+
f1 = f1_score(y_test2, predictions)
|
| 94 |
+
|
| 95 |
+
# Update the metrics
|
| 96 |
+
precision_metric.set(precision)
|
| 97 |
+
recall_metric.set(recall)
|
| 98 |
+
f1_metric.set(f1)
|
| 99 |
+
|
| 100 |
+
@app.get("/metrics")
|
| 101 |
+
async def get_metrics():
|
| 102 |
+
update_metrics()
|
| 103 |
+
return Response(media_type="text/plain", content= prom.generate_latest())
|
| 104 |
|
| 105 |
|
| 106 |
myControls = {
|
|
|
|
| 123 |
if isinstance(obj, np.ndarray) :
|
| 124 |
return obj.tolist()
|
| 125 |
return JSONEncoder.default(self,obj)
|
| 126 |
+
|
| 127 |
|
| 128 |
|
| 129 |
def uploadFile() :
|
|
|
|
| 136 |
payload = json.dumps(dataToSend)
|
| 137 |
|
| 138 |
|
| 139 |
+
r = requests.post("http://127.0.0.1:5005/todb", data=payload)
|
| 140 |
return r
|
| 141 |
|
| 142 |
def saveStats(predictionStatus) :
|
|
|
|
| 150 |
else :
|
| 151 |
d['PredictionStatus'] = 0
|
| 152 |
|
| 153 |
+
r = requests.post("http://127.0.0.1:5005/predictionstats", data=json.dumps(d))
|
| 154 |
return r
|
| 155 |
|
| 156 |
+
|
| 157 |
def predict(imageToProcess):
|
| 158 |
global imageData
|
| 159 |
+
imagelist = []
|
| 160 |
+
if imageToProcess == None :
|
| 161 |
+
gr.Error("No image is given to process")
|
| 162 |
+
return [None, None]
|
| 163 |
+
|
| 164 |
+
imageToProcess.save("./temp/test.jpg")
|
| 165 |
+
image_array = convert_image_to_array("./temp/test.jpg")
|
| 166 |
+
imagelist.append(image_array)
|
| 167 |
+
np_image = np.array(image_array, dtype=np.float16) / 225.0
|
| 168 |
+
print(np_image.shape)
|
| 169 |
+
prediction = model.predict(np_image.reshape(1,256, 256, 3))
|
| 170 |
+
leaf_class_category_mappings = {0:"Apple Apple scab", 1:"Apple Black rot", 2:"Apple Cedar apple rust",3:"Apple healthy", 4:"Background without leaves", 5:"Blueberry healthy", 6:"Cherry Powdery mildew", 7:"Cherry healthy",8:"Corn Cercospora leaf spotGray leaf spot", 9:"Corn Common rust", 10:"Corn Northern Leaf Blight", 11:"Corn healthy",12:"Grape Black rot", 13:"Grape Esca (Black Measles)", 14:"Grape Leaf blight (Isariopsis Leaf Spot)", 15:"Grape healthy",16:"Orange Haunglongbing (Citrus greening)", 17:"Peach Bacterial spot", 18:"Peach healthy",19:"Pepper bell Bacterial spot",20:"Pepper bell healthy", 21:"Potato Early blight", 22:"Potato Late blight", 23:"Potato healthy", 24:"Raspberry healthy",25:"Soybean healthy", 26:"Squash Powdery mildew", 27:"Strawberry Leaf scorch", 28:"Strawberry healthy", 29:"Tomato Bacterial spot",30:"Tomato Early blight", 31:"Tomato Late blight", 32:"Tomato Leaf Mold", 33:"Tomato Septoria leaf spot", 34:"Tomato Spider mitesTwo-spotted spider mite",35:"Tomato Target Spot", 36:"Tomato Tomato Yellow Leaf Curl Virus", 37:"Tomato Tomato mosaic virus", 38:"Tomato healthy"}
|
| 171 |
+
print(prediction)
|
| 172 |
+
#return ["leaf_class_category_mappings[np.argmax(prediction)]"
|
| 173 |
+
#return [leaf_class_category_mappings[np.argmax(prediction)], "none"]
|
| 174 |
+
predicteddisease = leaf_class_category_mappings[np.argmax(prediction)]
|
| 175 |
reply = "Nothing to display"
|
| 176 |
+
|
| 177 |
try :
|
| 178 |
+
key1="sk"
|
| 179 |
+
key2="-ico1dAjc3rZL3"
|
| 180 |
+
key3="ssVVc4LT3BlbkFJwKTdY4IeCkMtZehqRpSU"
|
| 181 |
+
|
| 182 |
+
openai.api_key = key1+key2+key3
|
| 183 |
+
message = "What is "+predicteddisease+" and how to treat the disease"
|
| 184 |
+
|
| 185 |
+
if message:
|
| 186 |
messages = []
|
| 187 |
+
messages.append(
|
| 188 |
+
{"role": "user", "content": message},
|
| 189 |
+
)
|
| 190 |
+
chat = openai.ChatCompletion.create(
|
| 191 |
+
model="gpt-3.5-turbo", messages=messages
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
reply = chat.choices[0].message.content
|
| 195 |
+
except Exception as ex :
|
| 196 |
+
print(ex)
|
| 197 |
|
|
|
|
|
|
|
| 198 |
|
| 199 |
+
imageData = imageToProcess
|
| 200 |
+
return [predicteddisease, reply]
|
| 201 |
|
| 202 |
def submitFeedback(correctOrWrong, plantName, userData):
|
| 203 |
global dataToSend
|
|
|
|
| 219 |
|
| 220 |
saveStats(correctOrWrong)
|
| 221 |
|
| 222 |
+
with gr.Blocks() as gradioapp :
|
| 223 |
|
| 224 |
gr.Markdown(
|
| 225 |
"""
|
| 226 |
# AI based plant Disease Detection Application
|
| 227 |
+
|
| 228 |
"""
|
| 229 |
)
|
| 230 |
myControls["ImageInput"] = gr.Image(type="pil")
|
|
|
|
| 235 |
myControls["AdditionalInfo"] = gr.TextArea(label='Additional Info')
|
| 236 |
controls.append(myControls["ResultControl"])
|
| 237 |
controls.append(myControls["AdditionalInfo"])
|
| 238 |
+
|
| 239 |
|
| 240 |
predictBtn = gr.Button(value='Predict')
|
| 241 |
predictBtn.click(predict, inputs=[myControls["ImageInput"]], outputs=controls)
|
|
|
|
| 250 |
feedbackBtn = gr.Button(value='Submit Feedback')
|
| 251 |
feedbackBtn.click(submitFeedback, inputs =[myControls["PredictionSelection"], myControls["PlantName"], myControls["UserInput"]])
|
| 252 |
|
| 253 |
+
#app.queue().launch()
|
| 254 |
+
gradioapp = gr.mount_gradio_app(app, gradioapp, path="/")
|
| 255 |
|
| 256 |
+
if __name__ == "__main__":
|
| 257 |
+
# Use this for debugging purposes only
|
| 258 |
+
import uvicorn
|
| 259 |
+
uvicorn.run(app, host="0.0.0.0", port=8001) # Ref: https://www.gradio.app/docs/interface
|
|
|
|
|
|