SHASWATSINGH3101 commited on
Commit
91710e4
1 Parent(s): 42836bf

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -0
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pandas as pd
3
+ import numpy as np
4
+ import gradio as gr
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
6
+
7
+ # Load the base model with device_map set to 'auto'
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ "SHASWATSINGH3101/Qwen2-0.5B-Instruct_lora_merge",
10
+ device_map='auto'
11
+ )
12
+
13
+ # Load the tokenizer
14
+ tokenizer = AutoTokenizer.from_pretrained("SHASWATSINGH3101/Qwen2-0.5B-Instruct_lora_merge")
15
+ tokenizer.pad_token = tokenizer.eos_token
16
+
17
+ def gen(model, p, maxlen=100, sample=True):
18
+ toks = tokenizer(p, return_tensors="pt").to(model.device)
19
+ res = model.generate(**toks, max_new_tokens=maxlen, do_sample=sample,
20
+ num_return_sequences=1, temperature=0.1, num_beams=1, top_p=0.95)
21
+ return tokenizer.batch_decode(res, skip_special_tokens=True)
22
+
23
+ def generate_letter(prompt):
24
+ seed = 42
25
+ set_seed(seed)
26
+
27
+ in_data = f"Instruct: {prompt}\n{prompt}\nOutput:\n"
28
+
29
+ # Generate response
30
+ peft_model_res = gen(model, in_data, 259)
31
+ peft_model_output = peft_model_res[0].split('Output:\n')[1]
32
+
33
+ # Extract the relevant parts of the output
34
+ prefix, success, result = peft_model_output.partition('#End')
35
+
36
+ return prefix.strip()
37
+
38
+ # Create Gradio interface
39
+ iface = gr.Interface(
40
+ fn=generate_letter,
41
+ inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
42
+ outputs="text",
43
+ title="Legal Letter Generator",
44
+ description="Generate a letter informing someone of potential legal action due to a dispute or violation."
45
+ )
46
+
47
+ # Launch the app
48
+ iface.launch(server_name="0.0.0.0", server_port=7860)