krrishD commited on
Commit
a8f882c
·
1 Parent(s): 9971a23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -154
app.py CHANGED
@@ -4,7 +4,7 @@ from langchain.prompts import PromptTemplate
4
  from langchain.prompts.few_shot import FewShotPromptTemplate
5
 
6
  import os
7
- print(os.environ)
8
  os.environ["OPENAI_API_KEY"] = os.environ.get("open_ai_key") #openai key
9
 
10
  llm = OpenAI(temperature=.7)
@@ -99,56 +99,6 @@ DMPrompt = FewShotPromptTemplate(
99
 
100
  DMChain = LLMChain(llm=llm, prompt=DMPrompt)
101
 
102
- StackTrace = r"""ImportError Traceback (most recent call last)
103
- <ipython-input-13-43eca54f7d45> in <module>
104
- ----> 1 import gradio as gr
105
-
106
- ~\anaconda3\lib\site-packages\gradio\__init__.py in <module>
107
- 1 import pkgutil
108
- 2
109
- ----> 3 import gradio.components as components
110
- 4 import gradio.inputs as inputs
111
- 5 import gradio.outputs as outputs
112
-
113
- ~\anaconda3\lib\site-packages\gradio\components.py in <module>
114
- 29 from markdown_it import MarkdownIt
115
- 30
116
- ---> 31 from gradio import media_data, processing_utils, utils
117
- 32 from gradio.blocks import Block
118
- 33 from gradio.documentation import document, set_documentation_group
119
-
120
- ~\anaconda3\lib\site-packages\gradio\processing_utils.py in <module>
121
- 18 from PIL import Image, ImageOps, PngImagePlugin
122
- 19
123
- ---> 20 from gradio import encryptor, utils
124
- 21
125
- 22 with warnings.catch_warnings():
126
-
127
- ~\anaconda3\lib\site-packages\gradio\utils.py in <module>
128
- 32
129
- 33 import aiohttp
130
- ---> 34 import fsspec.asyn
131
- 35 import httpx
132
- 36 import requests
133
-
134
- ~\anaconda3\lib\site-packages\fsspec\asyn.py in <module>
135
- 14 from .exceptions import FSTimeoutError
136
- 15 from .spec import AbstractBufferedFile, AbstractFileSystem
137
- ---> 16 from .utils import is_exception, other_paths
138
- 17
139
- 18 private = re.compile("_[^_]")
140
-
141
- ImportError: cannot import name 'is_exception' from 'fsspec.utils' (C:\Users\tompe\anaconda3\lib\site-packages\fsspec\utils.py)
142
-
143
- def is_cat(x): return x[0].isupper()"""
144
-
145
- UserResponse = "How do I fix this gradio fsspec error?"
146
- isLanguage = "python"
147
- Context = ""
148
-
149
- chainOutput = DMChain({"StackTrace": StackTrace, "UserResponse": UserResponse, "isLanguage": isLanguage, "Context": Context})
150
- chainOutput
151
-
152
  """## Debugger Model v2
153
 
154
  ### isFurtherContextNeeded
@@ -221,55 +171,6 @@ isFurtherContextNeededPrompt = FewShotPromptTemplate(
221
 
222
  isFurtherContextNeededChain = LLMChain(llm=llm, prompt=isFurtherContextNeededPrompt)
223
 
224
- StackTrace = r"""ImportError Traceback (most recent call last)
225
- <ipython-input-13-43eca54f7d45> in <module>
226
- ----> 1 import gradio as gr
227
-
228
- ~\anaconda3\lib\site-packages\gradio\__init__.py in <module>
229
- 1 import pkgutil
230
- 2
231
- ----> 3 import gradio.components as components
232
- 4 import gradio.inputs as inputs
233
- 5 import gradio.outputs as outputs
234
-
235
- ~\anaconda3\lib\site-packages\gradio\components.py in <module>
236
- 29 from markdown_it import MarkdownIt
237
- 30
238
- ---> 31 from gradio import media_data, processing_utils, utils
239
- 32 from gradio.blocks import Block
240
- 33 from gradio.documentation import document, set_documentation_group
241
-
242
- ~\anaconda3\lib\site-packages\gradio\processing_utils.py in <module>
243
- 18 from PIL import Image, ImageOps, PngImagePlugin
244
- 19
245
- ---> 20 from gradio import encryptor, utils
246
- 21
247
- 22 with warnings.catch_warnings():
248
-
249
- ~\anaconda3\lib\site-packages\gradio\utils.py in <module>
250
- 32
251
- 33 import aiohttp
252
- ---> 34 import fsspec.asyn
253
- 35 import httpx
254
- 36 import requests
255
-
256
- ~\anaconda3\lib\site-packages\fsspec\asyn.py in <module>
257
- 14 from .exceptions import FSTimeoutError
258
- 15 from .spec import AbstractBufferedFile, AbstractFileSystem
259
- ---> 16 from .utils import is_exception, other_paths
260
- 17
261
- 18 private = re.compile("_[^_]")
262
-
263
- ImportError: cannot import name 'is_exception' from 'fsspec.utils' (C:\Users\tompe\anaconda3\lib\site-packages\fsspec\utils.py)
264
-
265
- def is_cat(x): return x[0].isupper()"""
266
-
267
- UserResponse = "How do I fix this gradio fsspec error?"
268
- isLanguage = "python"
269
- Context = "None"
270
-
271
- print(isFurtherContextNeededChain.apply([{"StackTrace": StackTrace, "UserResponse": UserResponse, "isLanguage": isLanguage, "Context": Context}]))
272
-
273
  """### SystemResponse"""
274
 
275
  SystemResponseTemplate = """
@@ -350,55 +251,6 @@ SystemResponsePrompt = FewShotPromptTemplate(
350
 
351
  SystemResponseChain = LLMChain(llm=llm, prompt=SystemResponsePrompt)
352
 
353
- StackTrace = r"""ImportError Traceback (most recent call last)
354
- <ipython-input-13-43eca54f7d45> in <module>
355
- ----> 1 import gradio as gr
356
-
357
- ~\anaconda3\lib\site-packages\gradio\__init__.py in <module>
358
- 1 import pkgutil
359
- 2
360
- ----> 3 import gradio.components as components
361
- 4 import gradio.inputs as inputs
362
- 5 import gradio.outputs as outputs
363
-
364
- ~\anaconda3\lib\site-packages\gradio\components.py in <module>
365
- 29 from markdown_it import MarkdownIt
366
- 30
367
- ---> 31 from gradio import media_data, processing_utils, utils
368
- 32 from gradio.blocks import Block
369
- 33 from gradio.documentation import document, set_documentation_group
370
-
371
- ~\anaconda3\lib\site-packages\gradio\processing_utils.py in <module>
372
- 18 from PIL import Image, ImageOps, PngImagePlugin
373
- 19
374
- ---> 20 from gradio import encryptor, utils
375
- 21
376
- 22 with warnings.catch_warnings():
377
-
378
- ~\anaconda3\lib\site-packages\gradio\utils.py in <module>
379
- 32
380
- 33 import aiohttp
381
- ---> 34 import fsspec.asyn
382
- 35 import httpx
383
- 36 import requests
384
-
385
- ~\anaconda3\lib\site-packages\fsspec\asyn.py in <module>
386
- 14 from .exceptions import FSTimeoutError
387
- 15 from .spec import AbstractBufferedFile, AbstractFileSystem
388
- ---> 16 from .utils import is_exception, other_paths
389
- 17
390
- 18 private = re.compile("_[^_]")
391
-
392
- ImportError: cannot import name 'is_exception' from 'fsspec.utils' (C:\Users\tompe\anaconda3\lib\site-packages\fsspec\utils.py)
393
-
394
- def is_cat(x): return x[0].isupper()"""
395
-
396
- UserResponse = "How do I fix this gradio fsspec error?"
397
- isLanguage = "python"
398
-
399
- chainOutput = DMChain.chain([{"StackTrace": StackTrace, "UserResponse": UserResponse, "isLanguage": isLanguage}])
400
- chainOutput
401
-
402
  """## Summarizer Model"""
403
 
404
  SummarizerTemplate = """ You are an expert {isLanguage} machine learning developer. Summarize the given context, system response for the stacktrace, for somebody that is trying to debug this stacktrace:
@@ -545,11 +397,6 @@ def chat(message, history):
545
  response = SystemResponseChain({"StackTrace": chat_variables["StackTrace"], "UserResponse": UserResponse, "isLanguage": chat_variables["isLanguage"], "Context": chat_variables["Context"], "isFurtherContextNeeded": initDebuggerModelResponse})['text']
546
  else:
547
  response = initDebuggerModelResponse.split("SYSTEM RESPONSE:")[1]
548
- # # check if further context is needed
549
- # isFurtherContextNeeded = isFurtherContextNeededChain({"StackTrace": chat_variables["StackTrace"], "UserResponse": UserResponse, "isLanguage": chat_variables["isLanguage"], "Context": chat_variables["Context"]})['text']
550
- # print(isFurtherContextNeeded)
551
- # # pass the result of that + pre-existing context
552
- # response = SystemResponseChain({"StackTrace": chat_variables["StackTrace"], "UserResponse": UserResponse, "isLanguage": chat_variables["isLanguage"], "Context": chat_variables["Context"], "isFurtherContextNeeded": isFurtherContextNeeded})['text']
553
  # summarize the conversation
554
  SummarizerChain = LLMChain(llm=llm, prompt=SummarizerPrompt)
555
  chat_variables["Context"] = SummarizerChain({"StackTrace": chat_variables["StackTrace"], "Context": chat_variables["Context"], "isLanguage": chat_variables["isLanguage"], "SystemResponse": response})['text']
 
4
  from langchain.prompts.few_shot import FewShotPromptTemplate
5
 
6
  import os
7
+
8
  os.environ["OPENAI_API_KEY"] = os.environ.get("open_ai_key") #openai key
9
 
10
  llm = OpenAI(temperature=.7)
 
99
 
100
  DMChain = LLMChain(llm=llm, prompt=DMPrompt)
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  """## Debugger Model v2
103
 
104
  ### isFurtherContextNeeded
 
171
 
172
  isFurtherContextNeededChain = LLMChain(llm=llm, prompt=isFurtherContextNeededPrompt)
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  """### SystemResponse"""
175
 
176
  SystemResponseTemplate = """
 
251
 
252
  SystemResponseChain = LLMChain(llm=llm, prompt=SystemResponsePrompt)
253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
  """## Summarizer Model"""
255
 
256
  SummarizerTemplate = """ You are an expert {isLanguage} machine learning developer. Summarize the given context, system response for the stacktrace, for somebody that is trying to debug this stacktrace:
 
397
  response = SystemResponseChain({"StackTrace": chat_variables["StackTrace"], "UserResponse": UserResponse, "isLanguage": chat_variables["isLanguage"], "Context": chat_variables["Context"], "isFurtherContextNeeded": initDebuggerModelResponse})['text']
398
  else:
399
  response = initDebuggerModelResponse.split("SYSTEM RESPONSE:")[1]
 
 
 
 
 
400
  # summarize the conversation
401
  SummarizerChain = LLMChain(llm=llm, prompt=SummarizerPrompt)
402
  chat_variables["Context"] = SummarizerChain({"StackTrace": chat_variables["StackTrace"], "Context": chat_variables["Context"], "isLanguage": chat_variables["isLanguage"], "SystemResponse": response})['text']