|
from silence_tensorflow import silence_tensorflow |
|
silence_tensorflow() |
|
import logging |
|
logging.disable(logging.WARNING) |
|
from langchain import HuggingFacePipeline,PromptTemplate |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.chains import ConversationChain |
|
from transformers import pipeline |
|
import readline |
|
from transformers import GenerationConfig |
|
import re |
|
|
|
modelPath = "LittleMKIA" |
|
mode1 = "text2text-generation" |
|
|
|
config = GenerationConfig.from_pretrained(modelPath) |
|
|
|
pipe = pipeline(task= mode1, model=modelPath,min_length = 20,max_new_tokens = 200,temperature = 0.7,early_stopping = True, |
|
no_repeat_ngram_size=3,do_sample = True,top_k = 150,generation_config=config) |
|
|
|
llm = HuggingFacePipeline(pipeline=pipe) |
|
|
|
|
|
template = ''' |
|
{history} |
|
You are MKIA an intelligent companion and assistent. |
|
User: {input}''' |
|
|
|
|
|
prompt = PromptTemplate( |
|
input_variables=[ "input","history"], |
|
template=template) |
|
|
|
|
|
mem = ConversationBufferMemory(k = 1000,memory_key = "history",return_messase = False,ai_prefix = "MKIA") |
|
|
|
|
|
chat_chain = ConversationChain( |
|
llm=llm, |
|
prompt = prompt, |
|
memory= mem, |
|
verbose=False |
|
) |
|
|
|
def loop(): |
|
while 1: |
|
In = input('User > ') |
|
if re.match('think[:] (.*)|think[:](.*)|Think[:] (.*)|Think[:](.*)',In) != None: |
|
|
|
|
|
In2 = re.sub('think[:]|Think[:]','',In).strip() |
|
|
|
|
|
out= pipe(In2)[0]['generated_text'] |
|
|
|
|
|
print(out) |
|
|
|
elif In == 'quit': |
|
break |
|
|
|
else: |
|
out1 = llm.predict(In) |
|
mem.chat_memory.add_user_message(In) |
|
|
|
|
|
print(f'MKIA-model > {out1}\n') |
|
out2 = chat_chain.run(input=In+' '+out1) |
|
mem.chat_memory.add_ai_message(out1+ ' '+out2) |
|
|
|
|
|
print(f'MKIA-bot > {out2}\n') |
|
print('\n\n') |
|
|
|
loop() |
|
|