yasserrmd commited on
Commit
b760cb4
1 Parent(s): bbc225c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -45
app.py CHANGED
@@ -47,22 +47,6 @@ async def extract_code_blocks(markdown_text):
47
 
48
  return code_blocks
49
 
50
- async def generate_infographic_details(request: InfographicRequest):
51
- description = request.description
52
- generated_completion = client.chat.completions.create(
53
- model="llama-3.1-70b-versatile",
54
- messages=[
55
- {"role": "system", "content": SYSTEM_INSTRUCT},
56
- {"role": "user", "content": description}
57
- ],
58
- temperature=0.5,
59
- max_tokens=5000,
60
- top_p=1,
61
- stream=False,
62
- stop=None
63
- )
64
- generated_text = generated_completion.choices[0].message.content
65
-
66
  # Route to serve the HTML template
67
  @app.get("/", response_class=HTMLResponse)
68
  async def serve_frontend():
@@ -71,15 +55,15 @@ async def serve_frontend():
71
  # Route to handle infographic generation
72
  @app.post("/generate")
73
  async def generate_infographic(request: InfographicRequest):
74
- description =await generate_infographic_details(request)
75
  prompt = PROMPT_TEMPLATE.format(description=description)
76
 
77
  messages = [{"role": "user", "content": prompt}]
78
  stream = clientHFInference.chat.completions.create(
79
  model="Qwen/Qwen2.5-Coder-32B-Instruct",
80
  messages=messages,
81
- temperature=0.4,
82
- max_tokens=6000,
83
  top_p=0.7,
84
  stream=True,
85
  )
@@ -95,29 +79,3 @@ async def generate_infographic(request: InfographicRequest):
95
  return JSONResponse(content={"html": code_blocks[0]})
96
  else:
97
  return JSONResponse(content={"error": "No generation"},status_code=500)
98
-
99
- # try:
100
- # messages = [{"role": "user", "content": prompt}]
101
- # stream = client.chat.completions.create(
102
- # model="Qwen/Qwen2.5-Coder-32B-Instruct",
103
- # messages=messages,
104
- # temperature=0.4,
105
- # max_tokens=6000,
106
- # top_p=0.7,
107
- # stream=True,
108
- # )
109
-
110
-
111
- # generated_text = ""
112
- # for chunk in stream:
113
- # generated_text += chunk.choices[0].delta.content
114
-
115
- # print(generated_text)
116
- # code_blocks= await extract_code_blocks(generated_text)
117
- # if code_blocks:
118
- # return JSONResponse(content={"html": code_blocks[0]})
119
- # else:
120
- # return JSONResponse(content={"error": "No generation"},status_code=500)
121
-
122
- # except Exception as e:
123
- # return JSONResponse(content={"error": str(e)}, status_code=500)
 
47
 
48
  return code_blocks
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  # Route to serve the HTML template
51
  @app.get("/", response_class=HTMLResponse)
52
  async def serve_frontend():
 
55
  # Route to handle infographic generation
56
  @app.post("/generate")
57
  async def generate_infographic(request: InfographicRequest):
58
+ description = request.description
59
  prompt = PROMPT_TEMPLATE.format(description=description)
60
 
61
  messages = [{"role": "user", "content": prompt}]
62
  stream = clientHFInference.chat.completions.create(
63
  model="Qwen/Qwen2.5-Coder-32B-Instruct",
64
  messages=messages,
65
+ temperature=0.5,
66
+ max_tokens=5000,
67
  top_p=0.7,
68
  stream=True,
69
  )
 
79
  return JSONResponse(content={"html": code_blocks[0]})
80
  else:
81
  return JSONResponse(content={"error": "No generation"},status_code=500)