lucidmorto commited on
Commit
6d02da4
1 Parent(s): 0cc4a5a

feat: replace client API with direct model inference

Browse files

Switched from using the external client to directly loading the model and tokenizer with transformers library. This change improves efficiency by reducing dependency on the client API and allows for more direct control over model inference.

Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -1,16 +1,15 @@
1
  import gradio as gr
2
- from gradio_client import Client
3
 
4
- # Create a client to interact with your Space
5
- client = Client("umutbozdag/humanizer_model")
 
6
 
7
  def generate_text(input_text):
8
- # Use the client to predict using your Space
9
- result = client.predict(
10
- input_text, # str in 'Input Text' Textbox component
11
- api_name="/predict"
12
- )
13
- return result
14
 
15
  iface = gr.Interface(
16
  fn=generate_text,
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load the model and tokenizer
5
+ model = AutoModelForCausalLM.from_pretrained("umutbozdag/autotrain-g39vl-h3lir")
6
+ tokenizer = AutoTokenizer.from_pretrained("umutbozdag/autotrain-g39vl-h3lir")
7
 
8
  def generate_text(input_text):
9
+ # Tokenize input and generate text
10
+ inputs = tokenizer(input_text, return_tensors="pt")
11
+ outputs = model.generate(**inputs, max_length=100)
12
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
13
 
14
  iface = gr.Interface(
15
  fn=generate_text,