TuringsSolutions commited on
Commit
c0b89f3
1 Parent(s): 0ce095d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -7
app.py CHANGED
@@ -1,27 +1,37 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
 
4
 
5
  # Load the tokenizer
6
  model_name = "TuringsSolutions/TechLegalV1"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
- # Load the model
10
- # Assuming it's a CausalLM model, you might need to adjust based on your model's architecture
11
- model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
 
 
 
 
 
 
 
 
 
12
 
13
  # Function to make predictions
14
  def predict(text):
15
  inputs = tokenizer(text, return_tensors="pt")
16
  with torch.no_grad():
17
- outputs = model.generate(**inputs)
18
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
19
 
20
  # Create a Gradio interface
21
  iface = gr.Interface(
22
  fn=predict,
23
  inputs=gr.inputs.Textbox(lines=2, placeholder="Enter text here..."),
24
- outputs="text",
25
  title="Tech Legal Model",
26
  description="A model for analyzing tech legal documents."
27
  )
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModel
3
  import torch
4
+ import json
5
 
6
  # Load the tokenizer
7
  model_name = "TuringsSolutions/TechLegalV1"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
 
10
+ # Load adapter configuration manually
11
+ adapter_config_path = "https://huggingface.co/TuringsSolutions/TechLegalV1/resolve/main/adapter_config.json"
12
+ adapter_model_path = "https://huggingface.co/TuringsSolutions/TechLegalV1/resolve/main/adapter_model.safetensors"
13
+
14
+ with open(adapter_config_path, 'r') as f:
15
+ adapter_config = json.load(f)
16
+
17
+ # Initialize the model with the adapter configuration
18
+ model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
19
+
20
+ # Load adapter weights
21
+ model.load_adapter(adapter_model_path, config=adapter_config)
22
 
23
  # Function to make predictions
24
  def predict(text):
25
  inputs = tokenizer(text, return_tensors="pt")
26
  with torch.no_grad():
27
+ outputs = model(**inputs)
28
+ return outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
29
 
30
  # Create a Gradio interface
31
  iface = gr.Interface(
32
  fn=predict,
33
  inputs=gr.inputs.Textbox(lines=2, placeholder="Enter text here..."),
34
+ outputs="json",
35
  title="Tech Legal Model",
36
  description="A model for analyzing tech legal documents."
37
  )