File size: 2,859 Bytes
2e2d109
 
 
 
 
8bec0eb
2e2d109
 
 
 
 
 
5498862
328bc37
b27840e
5fe2837
9ae63ea
 
 
 
 
 
203d3c9
 
 
 
2e2d109
 
 
 
 
 
 
 
 
21f2ecb
2e2d109
 
c77e389
2e2d109
 
5e01755
2e2d109
 
 
 
 
 
 
 
203d3c9
 
 
2e2d109
 
 
 
 
 
 
b00e48c
2e2d109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
#!/usr/bin/env python
# coding: utf-8

# In[3]:

import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from PIL import Image
import cv2
from sklearn.neighbors import NearestNeighbors
import os
import shutil
import zipfile
import streamlit as st


# Open the zip file
with zipfile.ZipFile("lfw.zip", "r") as zip_ref:
    # Extract its contents to the current working directory
    zip_ref.extractall(".")
if os.path.exists('lfw/Aaron_Pena/Aaron_Pena_0001.jpg'):
    print('Image file extracted successfully')
else:
    print('Error: Image file not found')
# Load pre-trained ResNet-50 model
model = ResNet50(weights='imagenet', include_top=False, pooling='avg')

# Define preprocessing function
def preprocess(image):
    x = preprocess_input(image)
    return x

# Load features dictionary
features_dict = np.load('new_data.npy', allow_pickle=True).item()

# Convert feature vectors dictionary to numpy array
features_array = np.array(list(features_dict.values()))

# Train k-NN model
k = 10  # Number of neighbors to retrieve
nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(features_array)

# Define function to retrieve k-NN for a query image
def get_similar_images(query_image):
    query_feature = model.predict(np.expand_dims(preprocess(query_image), axis=0)).squeeze()
    distances, indices = nbrs.kneighbors(np.array([query_feature]))
    similar_images = []
    for idx in indices.squeeze():
        image_path = list(features_dict.keys())[idx].replace('\\', '/')
        similar_images.append(image_path)
        #similar_images.append(list(features_dict.keys())[idx])
    return similar_images

# Define Streamlit app
def app():
    st.title("Image Similarity Search System")

    # Create option to select image from test dataset
    test_images = ['Aaron_Pena_0001.jpg', 'Alain_Cervantes_0001.jpg', 'Alan_Jackson_0001.jpg']
    test_image = st.selectbox("Select an image from the test dataset", test_images)

    # Display selected image
    st.image(test_image)

    # Display similar images
    st.write("Similar Images:")
    similar_images = get_similar_images(cv2.imread(test_image))
    for image_path in similar_images:
        image = Image.open(image_path)
        st.image(image)

    # Create option to upload an image
    uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
    if uploaded_file is not None:
        image = Image.open(uploaded_file)
        st.image(image)

        # Display similar images
        st.write("Similar Images:")
        similar_images = get_similar_images(np.array(image))
        for image_path in similar_images:
            image = Image.open(image_path)
            st.image(image)

if __name__ == '__main__':
    app()


# In[ ]: