TheBloke commited on
Commit
9934f59
1 Parent(s): 3007d9d

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -11,7 +11,7 @@ model_creator: Open-Orca
11
  model_name: OpenOrca x OpenChat - Preview2 - 13B
12
  model_type: llama
13
  pipeline_tag: text-generation
14
- prompt_template: 'user: {prompt}<|end_of_turn|>assistant:
15
 
16
  '
17
  quantized_by: TheBloke
@@ -60,10 +60,10 @@ It is also now supported by continuous batching server [vLLM](https://github.com
60
  <!-- repositories-available end -->
61
 
62
  <!-- prompt-template start -->
63
- ## Prompt template: openchat llama2 v1
64
 
65
  ```
66
- user: {prompt}<|end_of_turn|>assistant:
67
 
68
  ```
69
 
@@ -153,7 +153,7 @@ model = AutoAWQForCausalLM.from_quantized(model_name_or_path, fuse_layers=True,
153
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False)
154
 
155
  prompt = "Tell me about AI"
156
- prompt_template=f'''user: {prompt}<|end_of_turn|>assistant:
157
 
158
  '''
159
 
 
11
  model_name: OpenOrca x OpenChat - Preview2 - 13B
12
  model_type: llama
13
  pipeline_tag: text-generation
14
+ prompt_template: 'GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:
15
 
16
  '
17
  quantized_by: TheBloke
 
60
  <!-- repositories-available end -->
61
 
62
  <!-- prompt-template start -->
63
+ ## Prompt template: OpenChat
64
 
65
  ```
66
+ GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:
67
 
68
  ```
69
 
 
153
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False)
154
 
155
  prompt = "Tell me about AI"
156
+ prompt_template=f'''GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:
157
 
158
  '''
159