Shariar00 commited on
Commit
28dd5be
1 Parent(s): fa5bd35

initial commit

Browse files
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
3
+
4
+ # Assuming you have loaded your model and tokenizer
5
+ # Replace this with your actual model and tokenizer
6
+
7
+
8
+ # Define the model function for Gradio
9
+ def generate_summary(input_text):
10
+ # # Tokenize the input text
11
+ # inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True)
12
+
13
+ # # Generate summary using the model
14
+ # outputs = model.generate(**inputs)
15
+
16
+ # # Decode the generated summary
17
+ # summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
+ # return summary
19
+
20
+
21
+ # Create a text generation pipeline
22
+ # text_generation_pipeline = pipeline("Falconsai/medical_summarization", model=model, tokenizer=tokenizer)
23
+
24
+ tokenizer = AutoTokenizer.from_pretrained("Shariar00/medical_summarization_finetune_medical_qa")
25
+ model = AutoModelForSeq2SeqLM.from_pretrained("Shariar00/medical_summarization_finetune_medical_qa")
26
+ text_generation_pipeline = pipeline("summarization", model=model, tokenizer=tokenizer)
27
+
28
+ # Generate text using the pipeline
29
+ prompt = "Hello, I am feeling very pain on my leg, I can not walk properly. I have some knee pain also. what can I do now?"
30
+ output = text_generation_pipeline(input_text, max_length=512, num_return_sequences=1)
31
+
32
+ # Print the generated text
33
+ generated_text = output[0]
34
+
35
+
36
+ return generated_text
37
+
38
+ # Create a Gradio interface
39
+ iface = gr.Interface(
40
+ fn=generate_summary,
41
+ inputs="text",
42
+ outputs="text",
43
+ # Set to True for live updates without restarting the server
44
+ )
45
+
46
+ # Launch the Gradio interface
47
+ iface.launch()