Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,32 +1,47 @@
|
|
1 |
import os
|
2 |
import requests
|
3 |
import json
|
|
|
4 |
from flask import Flask, request, jsonify
|
5 |
from ddtrace import patch_all, tracer
|
|
|
|
|
6 |
|
7 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
patch_all()
|
9 |
|
|
|
|
|
|
|
10 |
app = Flask(__name__)
|
11 |
|
12 |
-
#
|
13 |
os.environ['DD_LLMOBS_ENABLED'] = '1'
|
14 |
os.environ['DD_LLMOBS_ML_APP'] = 'anything-api'
|
15 |
os.environ['DD_LLMOBS_AGENTLESS_ENABLED'] = '1'
|
16 |
|
17 |
@app.route('/llm_call', methods=['POST'])
|
18 |
def handle_llm_call():
|
|
|
|
|
19 |
# Extract data from the incoming request (e.g., message to LLM)
|
20 |
data = request.get_json()
|
21 |
message = data.get("message")
|
22 |
|
23 |
if not message:
|
|
|
24 |
return jsonify({"error": "No message provided"}), 400
|
25 |
|
26 |
url = 'https://severian-anything.hf.space/api/v1/workspace/scoreboard/chat'
|
27 |
headers = {
|
28 |
'accept': 'application/json',
|
29 |
-
'Authorization': 'Bearer
|
30 |
'Content-Type': 'application/json'
|
31 |
}
|
32 |
payload = {
|
@@ -36,6 +51,7 @@ def handle_llm_call():
|
|
36 |
|
37 |
# Trace the LLM API call
|
38 |
with tracer.trace("llm_api_call", service="anything-api", resource="chat", span_type="http") as span:
|
|
|
39 |
span.set_tag("llm.request.model", "anything-api")
|
40 |
span.set_tag("llm.request.input", message)
|
41 |
|
@@ -47,19 +63,23 @@ def handle_llm_call():
|
|
47 |
bot_response = response_data.get("textResponse")
|
48 |
|
49 |
span.set_tag("llm.response.output", bot_response)
|
|
|
50 |
|
51 |
return jsonify({"bot_response": bot_response})
|
52 |
|
53 |
except requests.RequestException as e:
|
54 |
span.set_tag("error", True)
|
55 |
span.set_tag("error.msg", str(e))
|
|
|
56 |
return jsonify({"error": f"Request failed: {e}"}), 500
|
57 |
|
58 |
except Exception as e:
|
59 |
span.set_tag("error", True)
|
60 |
span.set_tag("error.msg", str(e))
|
|
|
61 |
return jsonify({"error": f"An error occurred: {e}"}), 500
|
62 |
|
63 |
|
64 |
if __name__ == "__main__":
|
|
|
65 |
app.run(host='0.0.0.0', port=7860)
|
|
|
1 |
import os
|
2 |
import requests
|
3 |
import json
|
4 |
+
import logging
|
5 |
from flask import Flask, request, jsonify
|
6 |
from ddtrace import patch_all, tracer
|
7 |
+
from ddtrace.llmobs import LLMObs
|
8 |
+
from dotenv import load_dotenv
|
9 |
|
10 |
+
# Load environment variables from .env file if it exists
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
# Set up logging for debugging
|
14 |
+
logging.basicConfig(level=logging.DEBUG)
|
15 |
+
|
16 |
+
# Enable Datadog tracing
|
17 |
patch_all()
|
18 |
|
19 |
+
# Enable LLM observability
|
20 |
+
LLMObs.enable()
|
21 |
+
|
22 |
app = Flask(__name__)
|
23 |
|
24 |
+
# Ensure environment variables are set
|
25 |
os.environ['DD_LLMOBS_ENABLED'] = '1'
|
26 |
os.environ['DD_LLMOBS_ML_APP'] = 'anything-api'
|
27 |
os.environ['DD_LLMOBS_AGENTLESS_ENABLED'] = '1'
|
28 |
|
29 |
@app.route('/llm_call', methods=['POST'])
|
30 |
def handle_llm_call():
|
31 |
+
logging.debug("Received a request at /llm_call")
|
32 |
+
|
33 |
# Extract data from the incoming request (e.g., message to LLM)
|
34 |
data = request.get_json()
|
35 |
message = data.get("message")
|
36 |
|
37 |
if not message:
|
38 |
+
logging.error("No message provided in the request.")
|
39 |
return jsonify({"error": "No message provided"}), 400
|
40 |
|
41 |
url = 'https://severian-anything.hf.space/api/v1/workspace/scoreboard/chat'
|
42 |
headers = {
|
43 |
'accept': 'application/json',
|
44 |
+
'Authorization': 'Bearer YOUR_API_KEY_HERE', # Replace with actual token securely
|
45 |
'Content-Type': 'application/json'
|
46 |
}
|
47 |
payload = {
|
|
|
51 |
|
52 |
# Trace the LLM API call
|
53 |
with tracer.trace("llm_api_call", service="anything-api", resource="chat", span_type="http") as span:
|
54 |
+
logging.debug("Starting trace for LLM API call.")
|
55 |
span.set_tag("llm.request.model", "anything-api")
|
56 |
span.set_tag("llm.request.input", message)
|
57 |
|
|
|
63 |
bot_response = response_data.get("textResponse")
|
64 |
|
65 |
span.set_tag("llm.response.output", bot_response)
|
66 |
+
logging.debug(f"LLM API response: {bot_response}")
|
67 |
|
68 |
return jsonify({"bot_response": bot_response})
|
69 |
|
70 |
except requests.RequestException as e:
|
71 |
span.set_tag("error", True)
|
72 |
span.set_tag("error.msg", str(e))
|
73 |
+
logging.error(f"Request failed: {e}")
|
74 |
return jsonify({"error": f"Request failed: {e}"}), 500
|
75 |
|
76 |
except Exception as e:
|
77 |
span.set_tag("error", True)
|
78 |
span.set_tag("error.msg", str(e))
|
79 |
+
logging.error(f"An error occurred: {e}")
|
80 |
return jsonify({"error": f"An error occurred: {e}"}), 500
|
81 |
|
82 |
|
83 |
if __name__ == "__main__":
|
84 |
+
logging.info("Starting Flask app on port 7860")
|
85 |
app.run(host='0.0.0.0', port=7860)
|