Spaces:
Sleeping
Sleeping
Fixing model and most of UI
Browse files- models/{model.onnx → model.ckpt} +2 -2
- pics/Defect/0.jpg +2 -2
- pics/Defect/1.jpg +2 -2
- pics/Defect/2.jpg +2 -2
- pics/Defect/3.jpg +2 -2
- pics/Defect/5.jpg +2 -2
- pics/Defect/6.jpg +2 -2
- pics/Defect/7.jpg +2 -2
- pics/Defect/8.jpg +2 -2
- pics/Defect/9.jpg +2 -2
- pics/nDefect/0.jpg +2 -2
- pics/nDefect/1.jpg +2 -2
- pics/nDefect/2.jpg +2 -2
- pics/nDefect/3.jpg +2 -2
- pics/nDefect/4.jpg +2 -2
- pics/nDefect/5.jpg +2 -2
- pics/nDefect/6.jpg +2 -2
- pics/nDefect/7.jpg +2 -2
- pics/nDefect/9.jpg +2 -2
- requirements.txt +1 -2
- app.py → scripts/app.py +81 -31
- pics/Rename.py → scripts/rename.py +14 -5
models/{model.onnx → model.ckpt}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5601b234e608862cde6159ba32bd77a3e5e2b23e41ce488ee778bf4154419090
|
3 |
+
size 94409193
|
pics/Defect/0.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/Defect/1.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/Defect/2.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/Defect/3.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/Defect/5.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/Defect/6.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/Defect/7.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/Defect/8.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/Defect/9.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/0.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/1.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/2.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/3.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/4.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/5.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/6.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/7.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
pics/nDefect/9.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
requirements.txt
CHANGED
@@ -3,5 +3,4 @@ torch
|
|
3 |
torchvision
|
4 |
pytorch-lightning
|
5 |
matplotlib
|
6 |
-
numpy
|
7 |
-
onnxruntime
|
|
|
3 |
torchvision
|
4 |
pytorch-lightning
|
5 |
matplotlib
|
6 |
+
numpy
|
|
app.py → scripts/app.py
RENAMED
@@ -4,50 +4,103 @@ from PIL import Image
|
|
4 |
import pytorch_lightning as pl
|
5 |
import torch.nn as nn
|
6 |
from torchvision import transforms as T
|
7 |
-
from torchvision
|
8 |
import matplotlib.pyplot as plt
|
9 |
import onnxruntime as ort
|
10 |
from glob import glob
|
11 |
import streamlit as st
|
12 |
import numpy as np
|
|
|
|
|
13 |
|
14 |
#Define the labels
|
15 |
-
labels = ['Defect', '
|
16 |
|
17 |
# Define the sample images
|
18 |
sample_images = {
|
19 |
-
"Defect01": "pics/Defect/
|
20 |
-
"Defect02": "pics/Defect/
|
21 |
-
"Defect03": "pics/Defect/
|
22 |
-
"Non-Defect01": "pics/nDefect/
|
23 |
-
"Non-Defect02": "pics/nDefect/
|
24 |
-
"Non-Defect03": "pics/nDefect/
|
25 |
}
|
26 |
|
27 |
-
class
|
28 |
-
def __init__(self,
|
29 |
-
super().__init__()
|
30 |
-
|
31 |
-
|
32 |
self.n_classes = n_classes
|
33 |
-
|
34 |
-
|
|
|
35 |
# self.backbone = models.resnet152(pretrained=True)
|
36 |
# self.backbone = models.vgg19(pretrained=True)
|
37 |
for param in self.backbone.parameters():
|
38 |
param.requires_grad = False
|
39 |
-
|
|
|
40 |
self.backbone.fc = torch.nn.Linear(self.backbone.fc.in_features, n_classes) #For ResNet base mdoel
|
41 |
# self.backbone.classifier[6] = torch.nn.Linear(self.backbone.classifier[6].in_features, n_classes) #For VGG bse model
|
42 |
-
|
43 |
-
self.
|
|
|
|
|
|
|
|
|
44 |
def forward(self, x):
|
45 |
preds = self.backbone(x)
|
46 |
return preds
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
# Load the model on the appropriate device
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
51 |
|
52 |
transform = T.Compose([
|
53 |
T.Resize((224, 224)),
|
@@ -56,24 +109,21 @@ transform = T.Compose([
|
|
56 |
|
57 |
def predict(image):
|
58 |
image = transform(image).unsqueeze(0)
|
59 |
-
image = image.to("cpu")
|
60 |
|
61 |
# Perform the prediction
|
62 |
with torch.no_grad():
|
63 |
-
logits =
|
64 |
probs = F.softmax(logits, dim=1)
|
65 |
return probs
|
66 |
|
67 |
# Define the Streamlit app
|
68 |
def app():
|
69 |
predictions = None
|
70 |
-
st.title("
|
71 |
-
|
72 |
uploaded_file = st.file_uploader("Upload your image...", type=["jpg"])
|
73 |
|
74 |
with st.expander("Or choose from sample here..."):
|
75 |
-
|
76 |
-
st.header("Sample Defect Images")
|
77 |
col1, col2, col3 = st.columns(3)
|
78 |
with col1:
|
79 |
st.image(sample_images["Defect01"], caption="Defect01", use_column_width=True)
|
@@ -81,7 +131,6 @@ def app():
|
|
81 |
st.image(sample_images["Defect02"], caption="Defect02", use_column_width=True)
|
82 |
with col3:
|
83 |
st.image(sample_images["Defect03"], caption="Defect03", use_column_width=True)
|
84 |
-
st.header("Sample Non-Defect Images")
|
85 |
col1, col2, col3 = st.columns(3)
|
86 |
with col1:
|
87 |
st.image(sample_images["Non-Defect01"], caption="Non-Defect01", use_column_width=True)
|
@@ -90,8 +139,6 @@ def app():
|
|
90 |
with col3:
|
91 |
st.image(sample_images["Non-Defect03"], caption="Non-Defect03", use_column_width=True)
|
92 |
|
93 |
-
sample = st.selectbox(label = "Select here", options = list(sample_images.keys()), label_visibility="hidden")
|
94 |
-
|
95 |
# If an image is uploaded, make a prediction on it
|
96 |
if uploaded_file is not None:
|
97 |
image = Image.open(uploaded_file)
|
@@ -104,13 +151,16 @@ def app():
|
|
104 |
|
105 |
# Show predictions with their probabilities
|
106 |
if predictions is not None:
|
107 |
-
st.write(predictions)
|
|
|
108 |
for pred, prob in zip(labels, predictions[0]):
|
109 |
st.write(f"{pred}: {prob * 100:.2f}%")
|
110 |
st.progress(prob.item())
|
111 |
else:
|
112 |
st.write("No predictions.")
|
113 |
-
|
|
|
|
|
114 |
|
115 |
# Run the app
|
116 |
if __name__ == "__main__":
|
|
|
4 |
import pytorch_lightning as pl
|
5 |
import torch.nn as nn
|
6 |
from torchvision import transforms as T
|
7 |
+
from torchvision import models
|
8 |
import matplotlib.pyplot as plt
|
9 |
import onnxruntime as ort
|
10 |
from glob import glob
|
11 |
import streamlit as st
|
12 |
import numpy as np
|
13 |
+
from torchmetrics.functional import accuracy
|
14 |
+
from torchmetrics import Accuracy
|
15 |
|
16 |
#Define the labels
|
17 |
+
labels = ['Defect', 'Non-Defect']
|
18 |
|
19 |
# Define the sample images
|
20 |
sample_images = {
|
21 |
+
"Defect01": "../pics/Defect/2.jpg",
|
22 |
+
"Defect02": "../pics/Defect/6.jpg",
|
23 |
+
"Defect03": "../pics/Defect/8.jpg",
|
24 |
+
"Non-Defect01": "../pics/nDefect/3.jpg",
|
25 |
+
"Non-Defect02": "../pics/nDefect/4.jpg",
|
26 |
+
"Non-Defect03": "../pics/nDefect/8.jpg"
|
27 |
}
|
28 |
|
29 |
+
class DefectResNet(pl.LightningModule):
|
30 |
+
def __init__(self, n_classes=2):
|
31 |
+
super(DefectResNet, self).__init__()
|
32 |
+
|
33 |
+
# จำนวนของพันธุ์output (2)
|
34 |
self.n_classes = n_classes
|
35 |
+
|
36 |
+
#เปลี่ยน layer สุดท้าย
|
37 |
+
self.backbone = models.resnet50(pretrained=True)
|
38 |
# self.backbone = models.resnet152(pretrained=True)
|
39 |
# self.backbone = models.vgg19(pretrained=True)
|
40 |
for param in self.backbone.parameters():
|
41 |
param.requires_grad = False
|
42 |
+
|
43 |
+
# เปลี่ยน fc layer เป็น output ขนาด 2
|
44 |
self.backbone.fc = torch.nn.Linear(self.backbone.fc.in_features, n_classes) #For ResNet base mdoel
|
45 |
# self.backbone.classifier[6] = torch.nn.Linear(self.backbone.classifier[6].in_features, n_classes) #For VGG bse model
|
46 |
+
|
47 |
+
self.entropy_loss = nn.CrossEntropyLoss()
|
48 |
+
self.accuracy = Accuracy(task="multiclass", num_classes=2)
|
49 |
+
|
50 |
+
self.save_hyperparameters(logger=False)
|
51 |
+
|
52 |
def forward(self, x):
|
53 |
preds = self.backbone(x)
|
54 |
return preds
|
55 |
|
56 |
+
def training_step(self, batch, batch_idx):
|
57 |
+
x, y = batch
|
58 |
+
logits = self.backbone(x)
|
59 |
+
loss = self.entropy_loss(logits, y)
|
60 |
+
y_pred = torch.argmax(logits, dim=1)
|
61 |
+
self.log("train_loss", loss)
|
62 |
+
self.log("train_acc", self.accuracy(y_pred, y))
|
63 |
+
return loss
|
64 |
+
|
65 |
+
def validation_step(self, batch, batch_idx):
|
66 |
+
x, y = batch
|
67 |
+
logits = self.backbone(x)
|
68 |
+
loss = self.entropy_loss(logits, y)
|
69 |
+
y_pred = torch.argmax(logits, dim=1)
|
70 |
+
self.log("val_loss", loss)
|
71 |
+
self.log("val_acc", self.accuracy(y_pred, y))
|
72 |
+
return loss
|
73 |
+
|
74 |
+
def configure_optimizers(self):
|
75 |
+
self.optimizer = torch.optim.AdamW(self.parameters(), lr=1e-3)
|
76 |
+
return {
|
77 |
+
"optimizer": self.optimizer,
|
78 |
+
"monitor": "val_loss",
|
79 |
+
}
|
80 |
+
|
81 |
+
def test_step(self, batch, batch_idx):
|
82 |
+
x, y = batch
|
83 |
+
logits = self.backbone(x)
|
84 |
+
loss = self.entropy_loss(logits, y)
|
85 |
+
y_pred = torch.argmax(logits, dim=1)
|
86 |
+
self.log("val_loss", loss)
|
87 |
+
self.log("val_acc", self.accuracy(y_pred, y))
|
88 |
+
return loss
|
89 |
+
|
90 |
+
def _shared_eval_step(self, batch, batch_idx):
|
91 |
+
x, y = batch
|
92 |
+
y_hat = self.model(x)
|
93 |
+
logits = self.backbone(x)
|
94 |
+
loss = self.entropy_loss(logits, y)
|
95 |
+
acc = accuracy(y_hat, y)
|
96 |
+
return loss, acc
|
97 |
+
|
98 |
# Load the model on the appropriate device
|
99 |
+
loadmodel = DefectResNet()
|
100 |
+
def load_checkpoint(checkpoint):
|
101 |
+
loadmodel.load_state_dict(checkpoint["state_dict"])
|
102 |
+
load_checkpoint(torch.load("../models/model.ckpt"))
|
103 |
+
loadmodel.eval()
|
104 |
|
105 |
transform = T.Compose([
|
106 |
T.Resize((224, 224)),
|
|
|
109 |
|
110 |
def predict(image):
|
111 |
image = transform(image).unsqueeze(0)
|
|
|
112 |
|
113 |
# Perform the prediction
|
114 |
with torch.no_grad():
|
115 |
+
logits = loadmodel(image)
|
116 |
probs = F.softmax(logits, dim=1)
|
117 |
return probs
|
118 |
|
119 |
# Define the Streamlit app
|
120 |
def app():
|
121 |
predictions = None
|
122 |
+
st.title("Digital textile printing defect classification for industrial.")
|
|
|
123 |
uploaded_file = st.file_uploader("Upload your image...", type=["jpg"])
|
124 |
|
125 |
with st.expander("Or choose from sample here..."):
|
126 |
+
sample = st.selectbox(label = "Select here", options = list(sample_images.keys()), label_visibility="hidden")
|
|
|
127 |
col1, col2, col3 = st.columns(3)
|
128 |
with col1:
|
129 |
st.image(sample_images["Defect01"], caption="Defect01", use_column_width=True)
|
|
|
131 |
st.image(sample_images["Defect02"], caption="Defect02", use_column_width=True)
|
132 |
with col3:
|
133 |
st.image(sample_images["Defect03"], caption="Defect03", use_column_width=True)
|
|
|
134 |
col1, col2, col3 = st.columns(3)
|
135 |
with col1:
|
136 |
st.image(sample_images["Non-Defect01"], caption="Non-Defect01", use_column_width=True)
|
|
|
139 |
with col3:
|
140 |
st.image(sample_images["Non-Defect03"], caption="Non-Defect03", use_column_width=True)
|
141 |
|
|
|
|
|
142 |
# If an image is uploaded, make a prediction on it
|
143 |
if uploaded_file is not None:
|
144 |
image = Image.open(uploaded_file)
|
|
|
151 |
|
152 |
# Show predictions with their probabilities
|
153 |
if predictions is not None:
|
154 |
+
# st.write(predictions)
|
155 |
+
st.subheader(f'Predictions : {labels[torch.argmax(predictions[0]).item()]}')
|
156 |
for pred, prob in zip(labels, predictions[0]):
|
157 |
st.write(f"{pred}: {prob * 100:.2f}%")
|
158 |
st.progress(prob.item())
|
159 |
else:
|
160 |
st.write("No predictions.")
|
161 |
+
st.subheader("Credits")
|
162 |
+
st.write("By : Settapun Laoaree | AI-Builders")
|
163 |
+
st.markdown("Source : [Github](https://github.com/ShokulSet/DefectDetection-AIBuilders) [Hugging Face](https://huggingface.co/spaces/sh0kul/DefectDetection-Deploy)")
|
164 |
|
165 |
# Run the app
|
166 |
if __name__ == "__main__":
|
pics/Rename.py → scripts/rename.py
RENAMED
@@ -1,5 +1,5 @@
|
|
1 |
# Python 3 code to rename multiple
|
2 |
-
# files in a directory or
|
3 |
|
4 |
# importing os module
|
5 |
import os
|
@@ -7,15 +7,24 @@ import os
|
|
7 |
# Function to rename multiple files
|
8 |
def main():
|
9 |
|
10 |
-
|
11 |
-
|
|
|
12 |
dst = f"{str(count)}.jpg"
|
13 |
-
src =f"{
|
14 |
-
dst =f"{
|
15 |
|
16 |
# rename() function will
|
17 |
# rename all the files
|
18 |
os.rename(src, dst)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
# Driver Code
|
21 |
if __name__ == '__main__':
|
|
|
1 |
# Python 3 code to rename multiple
|
2 |
+
# files in a directory or defect_path
|
3 |
|
4 |
# importing os module
|
5 |
import os
|
|
|
7 |
# Function to rename multiple files
|
8 |
def main():
|
9 |
|
10 |
+
defect_path = "../pics/Defect"
|
11 |
+
ndefect_path = "../pics/nDefect"
|
12 |
+
for count, filename in enumerate(os.listdir(defect_path)):
|
13 |
dst = f"{str(count)}.jpg"
|
14 |
+
src =f"{defect_path}/{filename}" # defect_pathname/filename, if .py file is outside defect_path
|
15 |
+
dst =f"{defect_path}/{dst}"
|
16 |
|
17 |
# rename() function will
|
18 |
# rename all the files
|
19 |
os.rename(src, dst)
|
20 |
+
|
21 |
+
for count, filename in enumerate(os.listdir(ndefect_path)):
|
22 |
+
dst = f"{str(count)}.jpg"
|
23 |
+
src =f"{ndefect_path}/{filename}" # defect_pathname/filename, if .py file is outside defect_path
|
24 |
+
dst =f"{ndefect_path}/{dst}"
|
25 |
+
# rename() function will
|
26 |
+
# rename all the files
|
27 |
+
os.rename(src, dst)
|
28 |
|
29 |
# Driver Code
|
30 |
if __name__ == '__main__':
|