captain-awesome commited on
Commit
9150421
1 Parent(s): 734cbbd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -13,6 +13,7 @@
13
  # from chromadb.utils import embedding_functions
14
  # from langchain.embeddings import SentenceTransformerEmbeddings
15
  # from langchain.embeddings import HuggingFaceBgeEmbeddings
 
16
  from langchain.embeddings import HuggingFaceInstructEmbeddings
17
  from langchain.document_loaders import (
18
  CSVLoader,
@@ -347,15 +348,19 @@ def main():
347
  'threads': int(os.cpu_count() / 2)
348
  }
349
 
350
- llm = CTransformers(
351
- model = "TheBloke/zephyr-7B-beta-GGUF",
352
- model_file = "zephyr-7b-beta.Q4_0.gguf",
353
- model_type="mistral",
354
- lib="avx2", #for CPU use
355
- **config
356
- )
 
 
357
  st.write("LLM Initialized:")
358
 
 
 
359
  model_name = "BAAI/bge-large-en"
360
  model_kwargs = {'device': 'cpu'}
361
  encode_kwargs = {'normalize_embeddings': False}
 
13
  # from chromadb.utils import embedding_functions
14
  # from langchain.embeddings import SentenceTransformerEmbeddings
15
  # from langchain.embeddings import HuggingFaceBgeEmbeddings
16
+ ffrom langchain_community.llms import Llamacpp
17
  from langchain.embeddings import HuggingFaceInstructEmbeddings
18
  from langchain.document_loaders import (
19
  CSVLoader,
 
348
  'threads': int(os.cpu_count() / 2)
349
  }
350
 
351
+ # llm = CTransformers(
352
+ # model = "TheBloke/zephyr-7B-beta-GGUF",
353
+ # model_file = "zephyr-7b-beta.Q4_0.gguf",
354
+ # model_type="mistral",
355
+ # lib="avx2", #for CPU use
356
+ # **config
357
+ # )
358
+
359
+ llm = Llamacpp(model_path = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF",temperature=0.75,max_tokens=2000,top_p=1,)
360
  st.write("LLM Initialized:")
361
 
362
+
363
+
364
  model_name = "BAAI/bge-large-en"
365
  model_kwargs = {'device': 'cpu'}
366
  encode_kwargs = {'normalize_embeddings': False}