Spaces:
Sleeping
Sleeping
File size: 1,061 Bytes
72a1159 ee83d59 8d94857 72a1159 4b28b29 72a1159 6a7b873 4b28b29 52c2264 72a1159 d572506 4dd6602 c1b5167 72a1159 cc8b2eb 72a1159 52c67ef 72a1159 ee83d59 72a1159 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
[server]
host = str:0.0.0.0
port = int:6969
workers = int:1
[models.names]
gpt2 = str:openai-community/gpt2
gpt2_medium = str:openai-community/gpt2-medium
gpt2_large = str:openai-community/gpt2-large
gpt2_xl = str:openai-community/gpt2-xl
llama_3_8b_intruct = str:meta-llama/Meta-Llama-3-8B-Instruct
# llama_3_70b_instruct = str:meta-llama/Meta-Llama-3-70B-Instruct
opt_125m = str:facebook/opt-125m
opt_1.3b = str:facebook/opt-1.3b
opt_2.7b = str:facebook/opt-2.7b
opt_6.7b = str:facebook/opt-6.7b
opt_13b = str:facebook/opt-13b
# opt_66b = str:facebook/opt-66b
[models.params]
dtype = str:float32
load_device = str:cuda
run_device = str:cuda
[encrypt.default]
gen_model = str:gpt2
start_pos = int:0
delta = float:10.0
msg_base = int:2
seed_scheme = str:sha_left_hash
window_length = int:1
private_key = int:0
min_new_tokens_ratio = float:1.0
max_new_tokens_ratio = float:2.0
num_beams = int:4
repetition_penalty = float:1.0
[decrypt.default]
gen_model = str:gpt2
msg_base = int:2
seed_scheme = str:sha_left_hash
window_length = int:1
private_key = int:0
|