Spaces:
Paused
Paused
Daniel Marques
commited on
Commit
•
48e8fbb
1
Parent(s):
4ec7545
fix: add trupple
Browse files- run_localGPT.py +2 -2
run_localGPT.py
CHANGED
@@ -35,7 +35,6 @@ from constants import (
|
|
35 |
)
|
36 |
|
37 |
|
38 |
-
|
39 |
def load_model(device_type, model_id, model_basename=None, LOGGING=logging, stream=False):
|
40 |
"""
|
41 |
Select a model for text generation using the HuggingFace library.
|
@@ -77,6 +76,7 @@ def load_model(device_type, model_id, model_basename=None, LOGGING=logging, stre
|
|
77 |
|
78 |
# Create a pipeline for text generation
|
79 |
|
|
|
80 |
streamer = TextStreamer(tokenizer, skip_prompt=True)
|
81 |
|
82 |
pipe = pipeline(
|
@@ -95,7 +95,7 @@ def load_model(device_type, model_id, model_basename=None, LOGGING=logging, stre
|
|
95 |
local_llm = HuggingFacePipeline(pipeline=pipe)
|
96 |
logging.info("Local LLM Loaded")
|
97 |
|
98 |
-
return
|
99 |
|
100 |
|
101 |
def retrieval_qa_pipline(device_type, use_history, promptTemplate_type="llama"):
|
|
|
35 |
)
|
36 |
|
37 |
|
|
|
38 |
def load_model(device_type, model_id, model_basename=None, LOGGING=logging, stream=False):
|
39 |
"""
|
40 |
Select a model for text generation using the HuggingFace library.
|
|
|
76 |
|
77 |
# Create a pipeline for text generation
|
78 |
|
79 |
+
|
80 |
streamer = TextStreamer(tokenizer, skip_prompt=True)
|
81 |
|
82 |
pipe = pipeline(
|
|
|
95 |
local_llm = HuggingFacePipeline(pipeline=pipe)
|
96 |
logging.info("Local LLM Loaded")
|
97 |
|
98 |
+
return local_llm, streamer
|
99 |
|
100 |
|
101 |
def retrieval_qa_pipline(device_type, use_history, promptTemplate_type="llama"):
|