Adrien commited on
Commit
428cc4d
1 Parent(s): 078e6fe

add llmtracing

Browse files
Files changed (5) hide show
  1. .gitignore +2 -1
  2. __pycache__/writer.cpython-311.pyc +0 -0
  3. app.py +36 -47
  4. requirements.txt +3 -0
  5. writer.py +21 -4
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  .streamlit
2
  *.log
3
- __pycache__
 
 
1
  .streamlit
2
  *.log
3
+ __pycache__
4
+ *.pyc
__pycache__/writer.cpython-311.pyc DELETED
Binary file (4.88 kB)
 
app.py CHANGED
@@ -3,7 +3,11 @@ from writer import write_article, incorporate_feedback, _template, evaluate_post
3
  import hmac
4
  import dspy
5
  from dsp.modules import Claude
6
- from pydantic import BaseModel
 
 
 
 
7
 
8
 
9
  def check_password():
@@ -34,45 +38,6 @@ if not check_password():
34
  st.stop()
35
 
36
 
37
- class LmChoice(BaseModel, arbitrary_types_allowed=True):
38
- gpt_35_turbo: dspy.dsp.LM = dspy.OpenAI(
39
- model="gpt-3.5-turbo",
40
- max_tokens=3800,
41
- api_key=st.secrets["OpenAI"],
42
- )
43
- gpt_4_turbo: dspy.dsp.LM = dspy.OpenAI(
44
- model="gpt-4-0125-preview",
45
- max_tokens=3800,
46
- api_key=st.secrets["OpenAI"],
47
- )
48
- gpt_4: dspy.dsp.LM = dspy.OpenAI(
49
- model="gpt-4-0125-preview",
50
- max_tokens=3800,
51
- api_key=st.secrets["OpenAI"],
52
- )
53
- # command_r_plus: dspy.dsp.LM = dspy.Cohere(
54
- # model="command-r-plus",
55
- # max_tokens=4000,
56
- # api_key=st.secrets["Cohere"],
57
- # )
58
-
59
- claude_3_opus: dspy.dsp.LM = Claude(
60
- model="claude-3-opus-20240229",
61
- api_key=st.secrets["Claude"],
62
- max_tokens=4096,
63
- )
64
- claude_3_sonnet: dspy.dsp.LM = Claude(
65
- model="claude-3-sonnet-20240229",
66
- api_key=st.secrets["Claude"],
67
- max_tokens=4096,
68
- )
69
- claude_3_haiku: dspy.dsp.LM = Claude(
70
- model="claude-3-haiku-20240307",
71
- api_key=st.secrets["Claude"],
72
- max_tokens=4096,
73
- )
74
-
75
-
76
  st.title("Linkedin shill")
77
 
78
  #! I hate this
@@ -106,23 +71,47 @@ with st.sidebar:
106
  "claude3 sonnet",
107
  "claude3 haiku",
108
  "claude3 opus",
109
- "command-r-plus",
110
  ),
111
  )
112
  if lm_choice == "gpt-4-turbo":
113
- lm = LmChoice.gpt_4_turbo
 
 
 
 
114
  elif lm_choice == "gpt-3.5-turbo":
115
- lm = LmChoice.gpt_35_turbo
 
 
 
 
116
  elif lm_choice == "claude3 sonnet":
117
- lm = LmChoice.claude_3_sonnet
 
 
 
 
118
  elif lm_choice == "claude3 haiku":
119
- lm = LmChoice.claude_3_haiku
 
 
 
 
120
  elif lm_choice == "claude3 opus":
121
- lm = LmChoice.claude_3_opus
 
 
 
 
122
  # elif lm_choice == "command-r-plus":
123
  # lm = LmChoice.command_r_plus
124
  else:
125
- lm = LmChoice.gpt_4
 
 
 
 
126
 
127
  with st.form("my_form"):
128
  topic = st.text_input("topic", "Oil future")
 
3
  import hmac
4
  import dspy
5
  from dsp.modules import Claude
6
+
7
+ import phoenix as px
8
+
9
+ my_traces = px.Client().get_trace_dataset().save()
10
+ px.launch_app(trace=px.TraceDataset.load(my_traces))
11
 
12
 
13
  def check_password():
 
38
  st.stop()
39
 
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  st.title("Linkedin shill")
42
 
43
  #! I hate this
 
71
  "claude3 sonnet",
72
  "claude3 haiku",
73
  "claude3 opus",
74
+ # "command-r-plus",
75
  ),
76
  )
77
  if lm_choice == "gpt-4-turbo":
78
+ lm = dspy.OpenAI(
79
+ model="gpt-4-0125-preview",
80
+ max_tokens=3800,
81
+ api_key=st.secrets["OpenAI"],
82
+ )
83
  elif lm_choice == "gpt-3.5-turbo":
84
+ lm = dspy.OpenAI(
85
+ model="gpt-3.5-turbo",
86
+ max_tokens=3800,
87
+ api_key=st.secrets["OpenAI"],
88
+ )
89
  elif lm_choice == "claude3 sonnet":
90
+ lm = Claude(
91
+ model="claude-3-sonnet-20240229",
92
+ api_key=st.secrets["Claude"],
93
+ max_tokens=4096,
94
+ )
95
  elif lm_choice == "claude3 haiku":
96
+ lm = Claude(
97
+ model="claude-3-haiku-20240307",
98
+ api_key=st.secrets["Claude"],
99
+ max_tokens=4096,
100
+ )
101
  elif lm_choice == "claude3 opus":
102
+ lm = Claude(
103
+ model="claude-3-opus-20240229",
104
+ api_key=st.secrets["Claude"],
105
+ max_tokens=4096,
106
+ )
107
  # elif lm_choice == "command-r-plus":
108
  # lm = LmChoice.command_r_plus
109
  else:
110
+ lm = dspy.OpenAI(
111
+ model="gpt-4",
112
+ max_tokens=3800,
113
+ api_key=st.secrets["OpenAI"],
114
+ )
115
 
116
  with st.form("my_form"):
117
  topic = st.text_input("topic", "Oil future")
requirements.txt CHANGED
@@ -1,2 +1,5 @@
1
  dspy-ai
2
  anthropic
 
 
 
 
1
  dspy-ai
2
  anthropic
3
+ arize-phoenix
4
+ openinference-instrumentation-dspy
5
+ opentelemetry-exporter-otlp
writer.py CHANGED
@@ -3,6 +3,23 @@ import dspy
3
  from dspy import Signature, InputField, Module, ChainOfThought, Predict
4
  import streamlit as st
5
  from typing import Optional
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  base_lm = dspy.OpenAI(
8
  model="gpt-4",
@@ -136,7 +153,7 @@ class Feedback(Module):
136
  class Evaluator(dspy.Signature):
137
  """You are a creative writing coach, evaluate this linkedin post"""
138
 
139
- linkedin_post = InputField(desc="A linkedin post to evaluate")
140
  output = dspy.OutputField(
141
  prefix="A comprehensive evaluation of the input post, styled in markdown"
142
  )
@@ -147,8 +164,8 @@ class SelfEval(Module):
147
  super().__init__()
148
  self.self_eval = ChainOfThought(Evaluator)
149
 
150
- def forward(self, content):
151
- eval = self.self_eval(content=content).output
152
  return eval
153
 
154
 
@@ -171,5 +188,5 @@ def incorporate_feedback(
171
 
172
  def evaluate_post(content: str, lm: Optional[dspy.dsp.LM] = base_lm) -> str:
173
  with dspy.context(lm=lm):
174
- eval = SelfEval().forward(content=content)
175
  return eval
 
3
  from dspy import Signature, InputField, Module, ChainOfThought, Predict
4
  import streamlit as st
5
  from typing import Optional
6
+ from openinference.instrumentation.dspy import DSPyInstrumentor
7
+ from opentelemetry import trace as trace_api
8
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
9
+ from opentelemetry.sdk import trace as trace_sdk
10
+ from opentelemetry.sdk.resources import Resource
11
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
12
+
13
+ endpoint = "http://127.0.0.1:6006/v1/traces"
14
+ resource = Resource(attributes={})
15
+ tracer_provider = trace_sdk.TracerProvider(resource=resource)
16
+ span_otlp_exporter = OTLPSpanExporter(endpoint=endpoint)
17
+ tracer_provider.add_span_processor(
18
+ SimpleSpanProcessor(span_exporter=span_otlp_exporter)
19
+ )
20
+ trace_api.set_tracer_provider(tracer_provider=tracer_provider)
21
+ DSPyInstrumentor().instrument()
22
+
23
 
24
  base_lm = dspy.OpenAI(
25
  model="gpt-4",
 
153
  class Evaluator(dspy.Signature):
154
  """You are a creative writing coach, evaluate this linkedin post"""
155
 
156
+ linkedin_post = InputField(prefix="A linkedin post to evaluate")
157
  output = dspy.OutputField(
158
  prefix="A comprehensive evaluation of the input post, styled in markdown"
159
  )
 
164
  super().__init__()
165
  self.self_eval = ChainOfThought(Evaluator)
166
 
167
+ def forward(self, linkedin_post):
168
+ eval = self.self_eval(linkedin_post=linkedin_post).output
169
  return eval
170
 
171
 
 
188
 
189
  def evaluate_post(content: str, lm: Optional[dspy.dsp.LM] = base_lm) -> str:
190
  with dspy.context(lm=lm):
191
+ eval = SelfEval().forward(linkedin_post=content)
192
  return eval