dar-tau commited on
Commit
6dbcb5e
1 Parent(s): e048966

Update configs.py

Browse files
Files changed (1) hide show
  1. configs.py +13 -13
configs.py CHANGED
@@ -37,19 +37,19 @@ model_info = {
37
  'GPT-2 Small': dict(model_path='gpt2', original_prompt_template='{prompt}',
38
  interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
39
  layers_format=gpt_layers_format),
40
- 'Mixtral 8x7B Instruct (Experimental)': dict(model_path='TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ',
41
- token=os.environ['hf_token'], wait_with_hidden_states=True,
42
- original_prompt_template='<s>{prompt}',
43
- interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
44
- layers_format=llama_layers_format
45
- ),
46
- 'Wizard Vicuna 30B Uncensored (Experimental)': dict(model_path='TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ',
47
- token=os.environ['hf_token'],
48
- wait_with_hidden_states=True, dont_cuda=True, device_map='cuda',
49
- original_prompt_template='<s>USER: {prompt}',
50
- interpretation_prompt_template='<s>USER: [X] ASSISTANT: {prompt}',
51
- layers_format=llama_layers_format
52
- ),
53
  # 'GPT-2 Medium': dict(model_path='gpt2-medium', original_prompt_template='{prompt}',
54
  # interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
55
  # layers_format=gpt_layers_format),
 
37
  'GPT-2 Small': dict(model_path='gpt2', original_prompt_template='{prompt}',
38
  interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
39
  layers_format=gpt_layers_format),
40
+ # 'Mixtral 8x7B Instruct (Experimental)': dict(model_path='TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ',
41
+ # token=os.environ['hf_token'], wait_with_hidden_states=True,
42
+ # original_prompt_template='<s>{prompt}',
43
+ # interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
44
+ # layers_format=llama_layers_format
45
+ # ),
46
+ # 'Wizard Vicuna 30B Uncensored (Experimental)': dict(model_path='TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ',
47
+ # token=os.environ['hf_token'],
48
+ # wait_with_hidden_states=True, dont_cuda=True, device_map='cuda',
49
+ # original_prompt_template='<s>USER: {prompt}',
50
+ # interpretation_prompt_template='<s>USER: [X] ASSISTANT: {prompt}',
51
+ # layers_format=llama_layers_format
52
+ # ),
53
  # 'GPT-2 Medium': dict(model_path='gpt2-medium', original_prompt_template='{prompt}',
54
  # interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
55
  # layers_format=gpt_layers_format),