raj-vir-singh commited on
Commit
3d69e25
β€’
1 Parent(s): de8cfd7

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +4 -4
  2. app.py +34 -0
  3. requirements.txt +2 -0
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: CodeGeneration CodeT5 Small
3
- emoji: πŸ¦€
4
- colorFrom: gray
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 3.29.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: TF CodeT5 Small
3
+ emoji: 🌍
4
+ colorFrom: yellow
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 3.28.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import torch
4
+ from transformers import RobertaTokenizer, T5ForConditionalGeneration
5
+ access_token = os.environ.get("access_token") or True
6
+
7
+ model_name = "ThoughtFocusAI/CodeGeneration-CodeT5-small"
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ model = T5ForConditionalGeneration.from_pretrained(
10
+ model_name, use_auth_token=access_token).to(device)
11
+ tokenizer = RobertaTokenizer.from_pretrained(
12
+ model_name, use_auth_token=access_token)
13
+
14
+
15
+ def generate_code(user_input):
16
+ query = "Generate Python: " + user_input
17
+ encoded_text = tokenizer(query, return_tensors='pt', padding='max_length',
18
+ truncation=True, max_length=512).input_ids.to(device)
19
+
20
+ # inference
21
+ generated_code = model.generate(encoded_text, max_length=512)
22
+
23
+ # decode generated tokens
24
+ decoded_code = tokenizer.decode(
25
+ generated_code.numpy()[0], skip_special_tokens=True)
26
+
27
+ return decoded_code
28
+
29
+
30
+ interface = gr.Interface(fn=generate_code,
31
+ inputs=gr.inputs.Textbox(
32
+ lines=3, label="Enter Text", placeholder="Ex-Add two numbers"),
33
+ outputs=gr.outputs.Textbox(label="Generated Code"))
34
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch