Phaneendra99 commited on
Commit
de89c02
1 Parent(s): 74f6047

Update llm_generator.py

Browse files
Files changed (1) hide show
  1. llm_generator.py +3 -4
llm_generator.py CHANGED
@@ -26,17 +26,16 @@ config = {'max_new_tokens': 256,
26
  'context_length': 4096, # Set to max for Chat Summary, Llama-2 has a max context length of 4096
27
  }
28
 
29
- # Load model directly
30
- from transformers import AutoModel
31
- model = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGUF")
32
 
33
- llm = CTransformers(model,
 
34
  callbacks=[StreamingStdOutCallbackHandler()],
35
  config=config)
36
 
37
 
38
 
39
 
 
40
  # Define system and user message templates
41
  with open(os.path.join(script_dir, 'prompts', 'system_message_template.txt'), 'r') as file:
42
  system_message_template = file.read().replace('\n', '')
 
26
  'context_length': 4096, # Set to max for Chat Summary, Llama-2 has a max context length of 4096
27
  }
28
 
 
 
 
29
 
30
+
31
+ llm = CTransformers(model=os.path.join(script_dir, 'transformers', 'llama-2-7b-chat.Q2_K.gguf'),
32
  callbacks=[StreamingStdOutCallbackHandler()],
33
  config=config)
34
 
35
 
36
 
37
 
38
+
39
  # Define system and user message templates
40
  with open(os.path.join(script_dir, 'prompts', 'system_message_template.txt'), 'r') as file:
41
  system_message_template = file.read().replace('\n', '')