tudormunteanu commited on
Commit
1e0e52a
0 Parent(s):

first commit

Browse files
Files changed (3) hide show
  1. .gitattributes +35 -0
  2. README.md +14 -0
  3. app.py +93 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Tiny Coder
3
+ emoji: 🌖
4
+ colorFrom: red
5
+ colorTo: pink
6
+ sdk: streamlit
7
+ sdk_version: 1.40.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: Hold me closer, tiny coder!
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+
3
+ import os
4
+ import gradio as gr
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM
6
+ import torch
7
+
8
+ # Initialize model and tokenizer
9
+ def load_model(model_size: str = "32B"):
10
+ """
11
+ Load model and tokenizer based on size selection
12
+ Note: You'll need to replace these with actual HuggingFace model IDs
13
+ """
14
+ model_map = {
15
+ "0.5B": "Qwen/Qwen-0.5B",
16
+ "1.5B": "Qwen/Qwen-1.5B",
17
+ "7B": "Qwen/Qwen-7B",
18
+ # ... add other model sizes as needed
19
+ }
20
+
21
+ model_id = model_map.get(model_size, "Qwen/Qwen-7B") # default to 7B if size not found
22
+
23
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
24
+ model = AutoModelForCausalLM.from_pretrained(
25
+ model_id,
26
+ torch_dtype=torch.float16,
27
+ device_map="auto"
28
+ )
29
+ return model, tokenizer
30
+
31
+ def process_query(query: str, model_size: str = "7B") -> str:
32
+ """
33
+ Process a single query and return the response
34
+ """
35
+ if not query:
36
+ return ""
37
+
38
+ try:
39
+ model, tokenizer = load_model(model_size)
40
+
41
+ # Prepare the input
42
+ inputs = tokenizer(query, return_tensors="pt").to(model.device)
43
+
44
+ # Generate response
45
+ with torch.no_grad():
46
+ outputs = model.generate(
47
+ **inputs,
48
+ max_new_tokens=512,
49
+ temperature=0.7,
50
+ top_p=0.9,
51
+ pad_token_id=tokenizer.pad_token_id
52
+ )
53
+
54
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
55
+ return response.replace(query, "").strip()
56
+
57
+ except Exception as e:
58
+ return f"Error: {str(e)}"
59
+
60
+ def main():
61
+ with gr.Blocks() as demo:
62
+ with gr.Row():
63
+ model_size = gr.Radio(
64
+ choices=["0.5B", "1.5B", "3B", "7B", "14B", "32B"],
65
+ label="Qwen2.5-Coder Model Size:",
66
+ value="32B"
67
+ )
68
+
69
+ with gr.Row():
70
+ input_text = gr.Textbox(
71
+ lines=5,
72
+ label="Input",
73
+ placeholder="Enter your query here..."
74
+ )
75
+
76
+ with gr.Row():
77
+ output_text = gr.Textbox(
78
+ lines=10,
79
+ label="Output"
80
+ )
81
+
82
+ submit_btn = gr.Button("Generate")
83
+
84
+ submit_btn.click(
85
+ fn=process_query,
86
+ inputs=[input_text, model_size],
87
+ outputs=output_text
88
+ )
89
+
90
+ demo.launch(max_threads=5)
91
+
92
+ if __name__ == "__main__":
93
+ main()