Stevie23 commited on
Commit
bca04f9
1 Parent(s): f78cc68

Upload microMKIA.py

Browse files
Files changed (1) hide show
  1. microMKIA.py +75 -0
microMKIA.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from silence_tensorflow import silence_tensorflow # import and call silence_tensor_flow to make tensorflow shutup about files it thinks I need but don't
2
+ silence_tensorflow()
3
+ import logging
4
+ logging.disable(logging.WARNING) # disable logging warnings to get rid of warnings about things that aren't really errors
5
+ from langchain import HuggingFacePipeline,PromptTemplate # import the stuff for setting up langchain with huggingface
6
+ from langchain.memory import ConversationBufferMemory # import stuff langchain uses to remember stuff
7
+ from langchain.chains import ConversationChain # import stuff it uses in relation to conversations
8
+ from transformers import pipeline # import the main pipeline from transformers
9
+ import readline # import readline for a slightly nicer and slightly easier to us interface
10
+ from transformers import GenerationConfig # import stuff to configure for text generation
11
+ import re # import re to match regular expression
12
+
13
+ modelPath = "LittleMKIA" # local path to the language model
14
+ mode1 = "text2text-generation" # task we want the transformers pipeline to perform
15
+
16
+ config = GenerationConfig.from_pretrained(modelPath) # set up the configuration object
17
+
18
+ pipe = pipeline(task= mode1, model=modelPath,min_length = 20,max_new_tokens = 200,temperature = 0.7,early_stopping = True,
19
+ no_repeat_ngram_size=3,do_sample = True,top_k = 150,generation_config=config) # set up the pipeline
20
+
21
+ llm = HuggingFacePipeline(pipeline=pipe) # make transformers pipeline usable by langchain
22
+
23
+ # create a template for the prompt
24
+ template = '''
25
+ {history}
26
+ You are MKIA an intelligent companion and assistent.
27
+ User: {input}'''
28
+
29
+ # create the prompt from the template
30
+ prompt = PromptTemplate(
31
+ input_variables=[ "input","history"],
32
+ template=template)
33
+
34
+ # set up a memory object
35
+ mem = ConversationBufferMemory(k = 1000,memory_key = "history",return_messase = False,ai_prefix = "MKIA")
36
+
37
+ # make a conversation chain and pass all the necessary parameters to it, tell it we don't want verbose so we only get regular output
38
+ chat_chain = ConversationChain(
39
+ llm=llm,
40
+ prompt = prompt,
41
+ memory= mem,
42
+ verbose=False
43
+ )
44
+ #create a function that will act as the program's main loop
45
+ def loop():
46
+ while 1: # python is optimize for while 1: not while True: so I will use while 1:
47
+ In = input('User > ') # ask for input
48
+ if re.match('think[:] (.*)|think[:](.*)|Think[:] (.*)|Think[:](.*)',In) != None:
49
+ # if the input text matches the pattern then we will bypass langchain
50
+
51
+ In2 = re.sub('think[:]|Think[:]','',In).strip()
52
+ # remove the prefix at the begining
53
+
54
+ out= pipe(In2)[0]['generated_text']
55
+ # get the output directly from the language model
56
+
57
+ print(out)
58
+
59
+ elif In == 'quit':
60
+ break
61
+
62
+ else:
63
+ out1 = llm.predict(In)
64
+ mem.chat_memory.add_user_message(In)
65
+ #mem.chat_history.add_ai_message(out1)
66
+
67
+ print(f'MKIA-model > {out1}\n')
68
+ out2 = chat_chain.run(input=In+' '+out1) # we feed the input to langchain and get the result
69
+ mem.chat_memory.add_ai_message(out1+ ' '+out2)
70
+ #mem.chat_memory.add_ai_message(out2)
71
+
72
+ print(f'MKIA-bot > {out2}\n') # let the user know what MKIA said and that she said it
73
+ print('\n\n') # print 2 newlines to help output be prettier
74
+
75
+ loop()