lucidmorto commited on
Commit
0cc4a5a
1 Parent(s): 0f051eb

refactor: Replace custom model loading with Gradio Client

Browse files

Simplified the code by using Gradio Client to interact with the model hosted on 'umutbozdag/humanizer_model' Space. This removes the need for manual model loading and preprocessing, reducing potential points of failure and streamlining text generation.

Files changed (1) hide show
  1. app.py +8 -34
app.py CHANGED
@@ -1,42 +1,16 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
- from huggingface_hub import HfApi
4
 
5
- # Get the latest model from your space
6
- api = HfApi()
7
- space_name = "umut-bozdag/humanizer_model" # Replace with your actual space name
8
- model_files = api.list_repo_files(space_name)
9
- model_file = next(file for file in model_files if file.endswith('.bin'))
10
- model_revision = api.get_repo_info(space_name).sha
11
-
12
- # Load the model and tokenizer from the space
13
- tokenizer = AutoTokenizer.from_pretrained(space_name, revision=model_revision)
14
- model = AutoModelForSeq2SeqLM.from_pretrained(space_name, revision=model_revision)
15
 
16
  def generate_text(input_text):
17
- # Preprocess input text
18
- input_text = input_text.strip()
19
-
20
- # Prepare input for the model
21
- input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=256, truncation=True)
22
-
23
- # Generate text with parameters matching your training setup
24
- outputs = model.generate(
25
- input_ids,
26
- max_length=256,
27
- num_return_sequences=1,
28
- no_repeat_ngram_size=2,
29
- top_k=30,
30
- top_p=0.9,
31
- temperature=0.7,
32
- do_sample=True,
33
- early_stopping=True,
34
- num_beams=4
35
  )
36
-
37
- # Decode and clean up the generated text
38
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
39
- return generated_text.strip()
40
 
41
  iface = gr.Interface(
42
  fn=generate_text,
 
1
  import gradio as gr
2
+ from gradio_client import Client
 
3
 
4
+ # Create a client to interact with your Space
5
+ client = Client("umutbozdag/humanizer_model")
 
 
 
 
 
 
 
 
6
 
7
  def generate_text(input_text):
8
+ # Use the client to predict using your Space
9
+ result = client.predict(
10
+ input_text, # str in 'Input Text' Textbox component
11
+ api_name="/predict"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  )
13
+ return result
 
 
 
14
 
15
  iface = gr.Interface(
16
  fn=generate_text,