Spaces:
Runtime error
Runtime error
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import cpu_ai | |
models = [ | |
"abacaj/Replit-v2-CodeInstruct-3B-ggml", | |
"marella/gpt-2-ggml", | |
"WizardLM/WizardCoder-Python-34B-V1.0", | |
"WizardLM/WizardCoder-15B-V1.0", | |
"WizardLM/WizardCoder-Python-7B-V1.0", | |
"WizardLM/WizardCoder-3B-V1.0", | |
"WizardLM/WizardCoder-1B-V1.0", | |
] | |
def run_general_model(model_name, prompt, max_tokens, temperature=0.6): | |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) | |
return run_model(model, tokenizer, prompt, max_tokens, temperature) | |
def run_model(model, tokenizer, prompt, max_tokens, temperature=0.6): | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
tokens = model.generate( | |
**inputs, | |
max_new_tokens=max_tokens, | |
do_sample=True, | |
temperature=temperature, | |
eos_token_id=2, | |
) | |
output = tokenizer.decode(tokens[0], skip_special_tokens=True) | |
return output | |
def cleanup_response(generated_text): | |
# TODO: | |
# - remove comments (or convert them to python comments) | |
# - test if code is valid (e.g. opening brackets have closing brackets etc.) | |
# - wrap code in async if not yet wrapped | |
code = generated_text | |
return code | |
def generate_code(prompt, model_index, max_tokens, temperature=0.6): | |
model_full_name = models[model_index] | |
if model_index == 0: | |
output = cpu_ai.generate_code(prompt, model_full_name, max_tokens, temperature) | |
elif model_index == 1: | |
output = cpu_ai.generate_code(prompt, model_full_name, max_tokens, temperature) | |
else: | |
output = run_general_model(model_full_name, prompt, max_tokens, temperature) | |
generated_code = cleanup_response(output) | |
return generated_code | |