Ritvik commited on
Commit
3028ab2
·
1 Parent(s): f63c2e8

Updated app V1

Browse files
Files changed (6) hide show
  1. .gitignore +3 -0
  2. .gradio/certificate.pem +31 -0
  3. LICENSE.txt +11 -0
  4. README.md +7 -1
  5. app.py +312 -60
  6. requirements.txt +5 -1
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Ignore environment files
2
+ .env
3
+
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
LICENSE.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BigScience OpenRAIL-M License
2
+
3
+ This model is licensed under the BigScience OpenRAIL-M License.
4
+
5
+ You are free to use, share, and adapt this model for research and non-commercial purposes, provided you give proper attribution to the author (Ritvik Gaur).
6
+
7
+ 🛑 Commercial use is not permitted without explicit written permission.
8
+
9
+ To request a commercial license, please contact: [[email protected]]
10
+
11
+ For the full license text, see: https://huggingface.co/spaces/BigScience/OpenRAIL-M/blob/main/LICENSE.md
README.md CHANGED
@@ -11,4 +11,10 @@ license: openrail
11
  short_description: Car AI Doctor
12
  ---
13
 
14
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
 
 
11
  short_description: Car AI Doctor
12
  ---
13
 
14
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
15
+
16
+ ## License
17
+ This project is distributed under the [BigScience OpenRAIL-M License](https://huggingface.co/spaces/BigScience/OpenRAIL-M).
18
+ Commercial use is prohibited unless explicit permission is granted by the author.
19
+
20
+ For commercial licensing inquiries, contact: [[email protected]]
app.py CHANGED
@@ -1,64 +1,316 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ from groq import Groq
3
+ from dotenv import load_dotenv
4
+ from duckduckgo_search import DDGS
5
+ import os
6
+ import traceback
7
+ import json
8
+ import time
9
+ from collections import defaultdict
10
+ import requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # Load .env environment variables
13
+ load_dotenv()
14
+ api_key = os.getenv("GROQ_API_KEY")
15
+ client = Groq(api_key=api_key)
16
+ MODEL_NAME = "llama-3.3-70b-versatile"
17
+
18
+ # In-memory cache for search results
19
+ search_cache = defaultdict(str)
20
+ cache_timeout = 3600 # 1 hour
21
+
22
+ # In-memory Q&A store for community simulation
23
+ community_qa = []
24
+
25
+ # Diagnostics knowledge base (simplified)
26
+ diagnostics_db = {
27
+ "Maruti Alto": {
28
+ "check engine light": {
29
+ "causes": ["Faulty oxygen sensor", "Loose fuel cap", "Spark plug issues"],
30
+ "solutions": ["Run OBD-II scan (₹500-₹1500)", "Tighten/replace fuel cap (₹100-₹500)", "Replace spark plugs (₹1000-₹2000)"],
31
+ "severity": "Moderate"
32
+ },
33
+ "poor fuel efficiency": {
34
+ "causes": ["Clogged air filter", "Tire underinflation", "Fuel injector issues"],
35
+ "solutions": ["Replace air filter (₹300-₹800)", "Check tire pressure (free)", "Clean/replace injectors (₹2000-₹5000)"],
36
+ "severity": "Low"
37
+ }
38
+ },
39
+ "Hyundai i20": {
40
+ "ac not cooling": {
41
+ "causes": ["Low refrigerant", "Faulty compressor", "Clogged condenser"],
42
+ "solutions": ["Refill refrigerant (₹1500-₹3000)", "Repair/replace compressor (₹5000-₹15000)", "Clean condenser (₹1000-₹2000)"],
43
+ "severity": "High"
44
+ }
45
+ }
46
+ }
47
+
48
+ # Maintenance tips
49
+ maintenance_tips = [
50
+ "Check tire pressure monthly to improve fuel efficiency.",
51
+ "Change engine oil every 10,000 km or 6 months for Indian road conditions.",
52
+ "Inspect brakes regularly, especially during monsoon seasons.",
53
+ "Keep your car clean to prevent rust in humid climates."
54
+ ]
55
+
56
+ # Tool: DuckDuckGo web search with retry and structured output
57
+ def web_search_duckduckgo(query: str, max_results: int = 5, max_retries: int = 2):
58
+ cache_key = query.lower()
59
+ if cache_key in search_cache:
60
+ cached_time, cached_results = search_cache[cache_key]
61
+ if time.time() - cached_time < cache_timeout:
62
+ print(f"Using cached results for: {query}")
63
+ return cached_results
64
+
65
+ results = []
66
+ for attempt in range(max_retries):
67
+ try:
68
+ with DDGS() as ddgs:
69
+ for r in ddgs.text(query, region="in-en", safesearch="Moderate", max_results=max_results):
70
+ results.append({"title": r['title'], "url": r['href']})
71
+ formatted_results = "\n\n".join(f"- {r['title']}\n {r['url']}" for r in results)
72
+ search_cache[cache_key] = (time.time(), formatted_results)
73
+ return formatted_results
74
+ except Exception as e:
75
+ print(f"Search attempt {attempt + 1} failed: {str(e)}")
76
+ if attempt + 1 == max_retries:
77
+ return f"⚠️ Web search failed after {max_retries} attempts: {str(e)}"
78
+ time.sleep(1)
79
+
80
+ # ReAct agent response with thought process
81
+ def respond(message, history, system_message, max_tokens, temperature, top_p, vehicle_profile):
82
+ try:
83
+ # Initialize messages with ReAct system prompt
84
+ react_prompt = (
85
+ f"{system_message}\n\n"
86
+ "You are using the ReAct framework. For each user query, follow these steps:\n"
87
+ "1. **Thought**: Reason about the query and decide the next step. Check the diagnostics database first for known issues. For location-specific queries (e.g., garages, repair shops) or real-time data (e.g., pricing, availability), prioritize web search. For community questions, check the Q&A store.\n"
88
+ "2. **Observation**: Note relevant information (e.g., user input, vehicle profile, tool results, or context).\n"
89
+ "3. **Action**: Choose an action: 'search' (web search), 'respond' (final answer), 'clarify' (ask for details), 'add_qa' (add to Q&A store), or 'get_qa' (retrieve Q&A).\n"
90
+ "Format your response as a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'. Example:\n"
91
+ "{\n"
92
+ " \"thought\": \"User asks for garages in Dehradun, need to search.\",\n"
93
+ " \"observation\": \"Location: Dehradun\",\n"
94
+ " \"action\": \"search\",\n"
95
+ " \"search_query\": \"car repair shops Dehradun\"\n"
96
+ "}\n"
97
+ f"User vehicle profile: {json.dumps(vehicle_profile)}\n"
98
+ "Use the search tool for locations, prices, or real-time data. Ensure valid JSON."
99
+ )
100
+ messages = [{"role": "system", "content": react_prompt}]
101
+
102
+ # Add history
103
+ for msg in history:
104
+ role = msg.get("role")
105
+ content = msg.get("content")
106
+ if role in ["user", "assistant"] and content:
107
+ messages.append({"role": role, "content": content})
108
+ messages.append({"role": "user", "content": message})
109
+
110
+ # Trigger keywords for garage search
111
+ trigger_keywords = [
112
+ "garage near", "car service near", "repair shop in", "mechanic in", "car workshop near",
113
+ "tyre change near", "puncture repair near", "engine repair near", "car wash near",
114
+ "car ac repair", "suspension work", "car battery replacement", "headlight change",
115
+ "oil change near", "nearby service center", "wheel alignment near", "wheel balancing",
116
+ "car painting service", "denting and painting", "car insurance repair", "maruti workshop",
117
+ "hyundai service", "honda repair center", "toyota garage", "tata motors service",
118
+ "mahindra car repair", "nexa service center", "kia workshop", "ev charging near",
119
+ "ev repair", "gearbox repair", "clutch repair", "brake pad replacement",
120
+ "windshield repair", "car glass replacement", "coolant top up", "engine tuning",
121
+ "car noise issue", "check engine light", "dashboard warning light", "local garage",
122
+ "trusted mechanic", "authorized service center", "car towing service near me",
123
+ "car not starting", "flat battery", "jump start service", "roadside assistance",
124
+ "ac not cooling", "car breakdown", "pickup and drop car service"
125
+ ]
126
+
127
+ # Check diagnostics database
128
+ if vehicle_profile.get("make_model") and any(kw in message.lower() for kw in diagnostics_db.get(vehicle_profile["make_model"], {})):
129
+ for issue, details in diagnostics_db[vehicle_profile["make_model"]].items():
130
+ if issue in message.lower():
131
+ response = (
132
+ f"**Diagnosed Issue**: {issue}\n"
133
+ f"- **Possible Causes**: {', '.join(details['causes'])}\n"
134
+ f"- **Solutions**: {', '.join(details['solutions'])}\n"
135
+ f"- **Severity**: {details['severity']}\n"
136
+ f"Would you like to search for garages to address this issue or learn more?"
137
+ )
138
+ yield response
139
+ return
140
+
141
+ # Check for community Q&A keywords
142
+ if any(kw in message.lower() for kw in ["community", "forum", "discussion", "share advice", "ask community"]):
143
+ if "post" in message.lower() or "share" in message.lower():
144
+ community_qa.append({"question": message, "answers": []})
145
+ yield "Your question has been posted to the community! Check back for answers."
146
+ return
147
+ elif "view" in message.lower() or "see" in message.lower():
148
+ if community_qa:
149
+ response = "Community Q&A:\n" + "\n".join(
150
+ f"Q: {qa['question']}\nA: {', '.join(qa['answers']) or 'No answers yet'}"
151
+ for qa in community_qa
152
+ )
153
+ else:
154
+ response = "No community questions yet. Post one with 'share' or 'post'!"
155
+ yield response
156
+ return
157
+
158
+ # Check for trigger keywords to directly perform search
159
+ if any(keyword in message.lower() for keyword in trigger_keywords):
160
+ print(f"Trigger keyword detected in query: {message}")
161
+ search_results = web_search_duckduckgo(message)
162
+ print(f"Search Results:\n{search_results}")
163
+ final_response = f"🔍 Here are some results I found:\n\n{search_results}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
164
+ for i in range(0, len(final_response), 10):
165
+ yield final_response[:i + 10]
166
+ return
167
+
168
+ # ReAct loop (up to 3 iterations)
169
+ max_iterations = 3
170
+ max_json_retries = 2
171
+ current_response = ""
172
+ for iteration in range(max_iterations):
173
+ print(f"\n--- ReAct Iteration {iteration + 1} ---")
174
+
175
+ # Call LLM with current messages
176
+ for retry in range(max_json_retries):
177
+ try:
178
+ completion = client.chat.completions.create(
179
+ model=MODEL_NAME,
180
+ messages=messages,
181
+ temperature=temperature,
182
+ max_completion_tokens=max_tokens,
183
+ top_p=top_p,
184
+ stream=False,
185
+ )
186
+ raw_response = completion.choices[0].message.content
187
+
188
+ # Parse LLM response
189
+ try:
190
+ react_step = json.loads(raw_response)
191
+ thought = react_step.get("thought", "")
192
+ observation = react_step.get("observation", "")
193
+ action = react_step.get("action", "")
194
+
195
+ # Log to console
196
+ print("Thought:", thought)
197
+ print("Observation:", observation)
198
+ print("Action:", action)
199
+ break
200
+ except json.JSONDecodeError:
201
+ print(f"Error: LLM response is not valid JSON (attempt {retry + 1}/{max_json_retries}).")
202
+ if retry + 1 == max_json_retries:
203
+ print("Max retries reached. Treating as direct response.")
204
+ react_step = {"response": raw_response, "action": "respond"}
205
+ thought = "N/A (Invalid JSON)"
206
+ observation = "N/A (Invalid JSON)"
207
+ action = "respond"
208
+ else:
209
+ messages.append({
210
+ "role": "system",
211
+ "content": "Previous response was not valid JSON. Please provide a valid JSON object with 'thought', 'observation', 'action', and optionally 'search_query', 'response', or 'qa_content'."
212
+ })
213
+ except Exception as e:
214
+ print(f"LLM call failed (attempt {retry + 1}/{max_json_retries}): {str(e)}")
215
+ if retry + 1 == max_json_retries:
216
+ react_step = {"response": f"⚠️ Failed to process query: {str(e)}", "action": "respond"}
217
+ thought = "N/A (LLM error)"
218
+ observation = "N/A (LLM error)"
219
+ action = "respond"
220
+ else:
221
+ time.sleep(1)
222
+
223
+ # Handle action
224
+ if action == "search":
225
+ search_query = react_step.get("search_query", message)
226
+ print(f"Performing web search for: {search_query}")
227
+ search_results = web_search_duckduckgo(search_query)
228
+ messages.append({"role": "assistant", "content": raw_response})
229
+ messages.append({
230
+ "role": "system",
231
+ "content": f"Search results for '{search_query}':\n{search_results}"
232
+ })
233
+ print(f"Search Results:\n{search_results}")
234
+
235
+ elif action == "respond":
236
+ final_response = react_step.get("response", raw_response)
237
+ current_response = f"{final_response}\n\n**Tip**: {maintenance_tips[hash(message) % len(maintenance_tips)]}"
238
+ print(f"Final Response:\n{current_response}")
239
+ break
240
+ elif action == "clarify":
241
+ clarification = react_step.get("response", "Please provide more details.")
242
+ messages.append({"role": "assistant", "content": raw_response})
243
+ current_response = clarification
244
+ print(f"Clarification Request:\n{current_response}")
245
+ elif action == "add_qa":
246
+ qa_content = react_step.get("qa_content", message)
247
+ community_qa.append({"question": qa_content, "answers": []})
248
+ current_response = "Your question has been posted to the community! Check back for answers."
249
+ print(f"Community Q&A Added:\n{qa_content}")
250
+ break
251
+ elif action == "get_qa":
252
+ if community_qa:
253
+ current_response = "Community Q&A:\n" + "\n".join(
254
+ f"Q: {qa['question']}\nA: {', '.join(qa['answers']) or 'No answers yet'}"
255
+ for qa in community_qa
256
+ )
257
+ else:
258
+ current_response = "No community questions yet. Post one with 'share' or 'post'!"
259
+ print(f"Community Q&A Retrieved:\n{current_response}")
260
+ break
261
+ else:
262
+ print("Unknown action, continuing to next iteration.")
263
+ messages.append({"role": "assistant", "content": raw_response})
264
+
265
+ # Stream final response to Gradio
266
+ for i in range(0, len(current_response), 10):
267
+ yield current_response[:i + 10]
268
+
269
+ except Exception as e:
270
+ error_msg = f"❌ Error: {str(e)}\n{traceback.format_exc()}"
271
+ print(error_msg)
272
+ yield error_msg
273
+
274
+ # Gradio interface with vehicle profile
275
+ with gr.Blocks(title="CarMaa - India's AI Car Doctor") as demo:
276
+ gr.Markdown("# CarMaa - India's AI Car Doctor")
277
+ gr.Markdown("Your trusted AI for car diagnostics, garage searches, and community advice.")
278
+
279
+ # Vehicle profile inputs
280
+ with gr.Row():
281
+ make_model = gr.Textbox(label="Vehicle Make and Model (e.g., Maruti Alto)", placeholder="Enter your car's make and model")
282
+ year = gr.Textbox(label="Year", placeholder="Enter the year of manufacture")
283
+ city = gr.Textbox(label="City", placeholder="Enter your city")
284
+ vehicle_profile = gr.State(value={"make_model": "", "year": "", "city": ""})
285
+
286
+ # Update vehicle profile
287
+ def update_vehicle_profile(make_model, year, city):
288
+ return {"make_model": make_model, "year": year, "city": city}
289
+
290
+ gr.Button("Save Vehicle Profile").click(
291
+ fn=update_vehicle_profile,
292
+ inputs=[make_model, year, city],
293
+ outputs=vehicle_profile
294
+ )
295
+
296
+ # Chat interface
297
+ chatbot = gr.ChatInterface(
298
+ fn=respond,
299
+ additional_inputs=[
300
+ gr.Textbox(value=(
301
+ "You are CarMaa, a highly intelligent and trusted AI Car Doctor trained on comprehensive automobile data, diagnostics, "
302
+ "and service records with specialized knowledge of Indian vehicles, road conditions, and market pricing. Your role is to "
303
+ "guide car owners with accurate insights, including service intervals, symptoms, estimated repair costs, garage locations, "
304
+ "climate effects, and fuel-efficiency tips. Personalize answers by vehicle details and city. Engage users as a community by "
305
+ "allowing Q&A posts and sharing maintenance tips."
306
+ ), label="System message"),
307
+ gr.Slider(minimum=1, maximum=4096, value=1024, step=1, label="Max new tokens"),
308
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
309
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
310
+ vehicle_profile
311
+ ],
312
+ type="messages"
313
+ )
314
 
315
  if __name__ == "__main__":
316
+ demo.launch(share=True)
requirements.txt CHANGED
@@ -1 +1,5 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
1
+ huggingface_hub>=0.28.1
2
+ gradio==5.29.0
3
+ groq
4
+ python-dotenv
5
+ duckduckgo_search==6.2.12