Spaces:
Sleeping
Sleeping
Create LordgpT
Browse files
LordgpT
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
|
4 |
+
# Load the model and tokenizer
|
5 |
+
model_name = "aifeifei798/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored"
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, load_in_8bit=True)
|
8 |
+
|
9 |
+
def generate_text(prompt, max_length=100, temperature=0.7):
|
10 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
11 |
+
outputs = model.generate(
|
12 |
+
inputs["input_ids"],
|
13 |
+
max_length=max_length,
|
14 |
+
temperature=temperature,
|
15 |
+
do_sample=True,
|
16 |
+
top_p=0.9,
|
17 |
+
top_k=50,
|
18 |
+
num_return_sequences=1,
|
19 |
+
pad_token_id=tokenizer.eos_token_id,
|
20 |
+
)
|
21 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
22 |
+
|
23 |
+
# Create a Gradio interface
|
24 |
+
gr.Interface(
|
25 |
+
fn=generate_text,
|
26 |
+
inputs=[
|
27 |
+
gr.inputs.Textbox(label="Input Text"),
|
28 |
+
gr.inputs.Slider(label="Max Length", minimum=1, maximum=500, value=100, step=1),
|
29 |
+
gr.inputs.Slider(label="Temperature", minimum=0.1, maximum=1.0, value=0.7, step=0.1),
|
30 |
+
],
|
31 |
+
outputs=gr.outputs.Textbox(label="Generated Text"),
|
32 |
+
title="LLAMA 3.1 8B Model",
|
33 |
+
description="Generate text using the LLAMA 3.1 8B model.",
|
34 |
+
).launch()
|