Changed default temperature to 0.5 and token length 128 * 4
Browse files
llm_response_generator.py
CHANGED
@@ -36,8 +36,8 @@ class LLLResponseGenerator():
|
|
36 |
openai_model_name: str = "",
|
37 |
# hf_repo_id: str = "tiiuae/falcon-7b-instruct",
|
38 |
hf_repo_id: str = "mistralai/Mistral-7B-Instruct-v0.2",
|
39 |
-
temperature: float = 0.
|
40 |
-
max_length: int = 128,
|
41 |
) -> str:
|
42 |
"""Call HuggingFace/OpenAI model for inference
|
43 |
|
@@ -142,8 +142,8 @@ if __name__ == "__main__":
|
|
142 |
Response;
|
143 |
"""
|
144 |
|
145 |
-
temperature = 0.
|
146 |
-
max_length = 128
|
147 |
|
148 |
model = LLLResponseGenerator()
|
149 |
|
|
|
36 |
openai_model_name: str = "",
|
37 |
# hf_repo_id: str = "tiiuae/falcon-7b-instruct",
|
38 |
hf_repo_id: str = "mistralai/Mistral-7B-Instruct-v0.2",
|
39 |
+
temperature: float = 0.5,
|
40 |
+
max_length: int = 128 * 4,
|
41 |
) -> str:
|
42 |
"""Call HuggingFace/OpenAI model for inference
|
43 |
|
|
|
142 |
Response;
|
143 |
"""
|
144 |
|
145 |
+
temperature = 0.5
|
146 |
+
max_length = 128 *4
|
147 |
|
148 |
model = LLLResponseGenerator()
|
149 |
|