TheBloke commited on
Commit
21f06e5
1 Parent(s): 611a7a9

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +31 -26
README.md CHANGED
@@ -6,15 +6,16 @@ license_name: microsoft-research-license
6
  model_creator: Jeb Carter
7
  model_name: Psyonic Cetacean 20B
8
  model_type: llama
9
- prompt_template: '<|im_start|>system
 
10
 
11
- {system_message}<|im_end|>
12
 
13
- <|im_start|>user
14
 
15
- {prompt}<|im_end|>
16
 
17
- <|im_start|>assistant
 
18
 
19
  '
20
  quantized_by: TheBloke
@@ -77,14 +78,15 @@ It is supported by:
77
  <!-- repositories-available end -->
78
 
79
  <!-- prompt-template start -->
80
- ## Prompt template: ChatML
81
 
82
  ```
83
- <|im_start|>system
84
- {system_message}<|im_end|>
85
- <|im_start|>user
86
- {prompt}<|im_end|>
87
- <|im_start|>assistant
 
88
 
89
  ```
90
 
@@ -157,11 +159,12 @@ prompts = [
157
  "What is 291 - 150?",
158
  "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
159
  ]
160
- prompt_template=f'''<|im_start|>system
161
- {system_message}<|im_end|>
162
- <|im_start|>user
163
- {prompt}<|im_end|>
164
- <|im_start|>assistant
 
165
  '''
166
 
167
  prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
@@ -203,11 +206,12 @@ from huggingface_hub import InferenceClient
203
  endpoint_url = "https://your-endpoint-url-here"
204
 
205
  prompt = "Tell me about AI"
206
- prompt_template=f'''<|im_start|>system
207
- {system_message}<|im_end|>
208
- <|im_start|>user
209
- {prompt}<|im_end|>
210
- <|im_start|>assistant
 
211
  '''
212
 
213
  client = InferenceClient(endpoint_url)
@@ -270,11 +274,12 @@ model = AutoModelForCausalLM.from_pretrained(
270
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
271
 
272
  prompt = "Tell me about AI"
273
- prompt_template=f'''<|im_start|>system
274
- {system_message}<|im_end|>
275
- <|im_start|>user
276
- {prompt}<|im_end|>
277
- <|im_start|>assistant
 
278
  '''
279
 
280
  # Convert prompt to tokens
 
6
  model_creator: Jeb Carter
7
  model_name: Psyonic Cetacean 20B
8
  model_type: llama
9
+ prompt_template: 'Below is an instruction that describes a task. Write a response
10
+ that appropriately completes the request.
11
 
 
12
 
13
+ ### Instruction:
14
 
15
+ {prompt}
16
 
17
+
18
+ ### Response:
19
 
20
  '
21
  quantized_by: TheBloke
 
78
  <!-- repositories-available end -->
79
 
80
  <!-- prompt-template start -->
81
+ ## Prompt template: Alpaca
82
 
83
  ```
84
+ Below is an instruction that describes a task. Write a response that appropriately completes the request.
85
+
86
+ ### Instruction:
87
+ {prompt}
88
+
89
+ ### Response:
90
 
91
  ```
92
 
 
159
  "What is 291 - 150?",
160
  "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
161
  ]
162
+ prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
163
+
164
+ ### Instruction:
165
+ {prompt}
166
+
167
+ ### Response:
168
  '''
169
 
170
  prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
 
206
  endpoint_url = "https://your-endpoint-url-here"
207
 
208
  prompt = "Tell me about AI"
209
+ prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
210
+
211
+ ### Instruction:
212
+ {prompt}
213
+
214
+ ### Response:
215
  '''
216
 
217
  client = InferenceClient(endpoint_url)
 
274
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
275
 
276
  prompt = "Tell me about AI"
277
+ prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
278
+
279
+ ### Instruction:
280
+ {prompt}
281
+
282
+ ### Response:
283
  '''
284
 
285
  # Convert prompt to tokens