Zwea Htet commited on
Commit
38bc9e2
1 Parent(s): 9839b9f

added chat history

Browse files
Files changed (3) hide show
  1. .gitignore +2 -1
  2. app.py +5 -2
  3. models/bloom.py +3 -1
.gitignore CHANGED
@@ -2,4 +2,5 @@ venv
2
  data/__pycache__
3
  models/__pycache__
4
  .env
5
- __pycache__
 
 
2
  data/__pycache__
3
  models/__pycache__
4
  .env
5
+ __pycache__
6
+ vectorStores
app.py CHANGED
@@ -31,9 +31,12 @@ if input_text is not None:
31
  st.session_state.messages.append(('User', input_text))
32
  with st.spinner("Processing your query..."):
33
  bot_response = get_response(index, input_text)
34
-
35
  st.session_state.messages.append(('Bot', bot_response))
36
 
37
  # Display previous messages
 
38
  for sender, msg in st.session_state.messages[::-1]:
39
- message(msg, is_user = (sender=='User'))
 
 
 
31
  st.session_state.messages.append(('User', input_text))
32
  with st.spinner("Processing your query..."):
33
  bot_response = get_response(index, input_text)
34
+ print("bot: ", bot_response)
35
  st.session_state.messages.append(('Bot', bot_response))
36
 
37
  # Display previous messages
38
+ msg_key = 0
39
  for sender, msg in st.session_state.messages[::-1]:
40
+ is_user = sender == "User"
41
+ message(str(msg), is_user, key=str(msg_key)+f'_{sender}')
42
+ msg_key += 1
models/bloom.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  from json import dumps, loads
3
 
4
  import numpy as np
 
5
  import pandas as pd
6
  from dotenv import load_dotenv
7
  from llama_index import (Document, GPTVectorStoreIndex, LLMPredictor,
@@ -12,6 +13,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
12
  from utils.customLLM import CustomLLM
13
 
14
  load_dotenv()
 
15
 
16
  # get model
17
  # model_name = "bigscience/bloom-560m"
@@ -68,7 +70,7 @@ def initialize_index(index_name):
68
  storage_context = StorageContext.from_defaults(persist_dir=file_path)
69
  # load index
70
  index = load_index_from_storage(storage_context)
71
- return GPTVectorStoreIndex.load_from_disk(file_path)
72
  else:
73
  documents = prepare_data(r"./assets/regItems.json")
74
  index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
 
2
  from json import dumps, loads
3
 
4
  import numpy as np
5
+ import openai
6
  import pandas as pd
7
  from dotenv import load_dotenv
8
  from llama_index import (Document, GPTVectorStoreIndex, LLMPredictor,
 
13
  from utils.customLLM import CustomLLM
14
 
15
  load_dotenv()
16
+ openai.api_key = os.getenv("OPENAI_API_KEY")
17
 
18
  # get model
19
  # model_name = "bigscience/bloom-560m"
 
70
  storage_context = StorageContext.from_defaults(persist_dir=file_path)
71
  # load index
72
  index = load_index_from_storage(storage_context)
73
+ return index
74
  else:
75
  documents = prepare_data(r"./assets/regItems.json")
76
  index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)