Update app.py
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ with open(".config/application_default_credentials.json", 'w') as file:
|
|
14 |
|
15 |
vertexai.init(project=os.getenv('project_id'))
|
16 |
model = GenerativeModel("gemini-1.0-pro-vision")
|
17 |
-
client = InferenceClient("google/gemma-7b")
|
18 |
|
19 |
def extract_image_urls(text):
|
20 |
url_regex = r"(https?:\/\/.*\.(?:png|jpg|jpeg|gif|webp|svg))"
|
@@ -40,17 +40,22 @@ def search(url):
|
|
40 |
response = model.generate_content([image,"Describe what is shown in this image."])
|
41 |
return response.text
|
42 |
|
43 |
-
def format_prompt(message, history):
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
50 |
|
51 |
def generate(
|
52 |
prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
|
53 |
):
|
|
|
54 |
temperature = float(temperature)
|
55 |
if temperature < 1e-2:
|
56 |
temperature = 1e-2
|
@@ -70,7 +75,7 @@ def generate(
|
|
70 |
image_description = "Image Description: " + search(image)
|
71 |
prompt = prompt.replace(image, image_description)
|
72 |
print(prompt)
|
73 |
-
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
74 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
75 |
output = ""
|
76 |
|
|
|
14 |
|
15 |
vertexai.init(project=os.getenv('project_id'))
|
16 |
model = GenerativeModel("gemini-1.0-pro-vision")
|
17 |
+
client = InferenceClient("google/gemma-7b-it")
|
18 |
|
19 |
def extract_image_urls(text):
|
20 |
url_regex = r"(https?:\/\/.*\.(?:png|jpg|jpeg|gif|webp|svg))"
|
|
|
40 |
response = model.generate_content([image,"Describe what is shown in this image."])
|
41 |
return response.text
|
42 |
|
43 |
+
def format_prompt(message, history, cust_p):
|
44 |
+
prompt = ""
|
45 |
+
if history:
|
46 |
+
for user_prompt, bot_response in history:
|
47 |
+
prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
|
48 |
+
prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
|
49 |
+
if VERBOSE==True:
|
50 |
+
print(prompt)
|
51 |
+
#prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
|
52 |
+
prompt+=cust_p.replace("USER_INPUT",message)
|
53 |
+
return prompt
|
54 |
|
55 |
def generate(
|
56 |
prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
|
57 |
):
|
58 |
+
custom_prompt="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model"
|
59 |
temperature = float(temperature)
|
60 |
if temperature < 1e-2:
|
61 |
temperature = 1e-2
|
|
|
75 |
image_description = "Image Description: " + search(image)
|
76 |
prompt = prompt.replace(image, image_description)
|
77 |
print(prompt)
|
78 |
+
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history, custom_prompt)
|
79 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
80 |
output = ""
|
81 |
|