litellmlope / litellm /tests /test_helicone_integration.py
ka1kuk's picture
Upload 235 files
7db0ae4 verified
raw
history blame
769 Bytes
# #### What this tests ####
# # This tests if logging to the helicone integration actually works
# import sys, os
# import traceback
# import pytest
# sys.path.insert(
# 0, os.path.abspath("../..")
# ) # Adds the parent directory to the system path
# import litellm
# from litellm import embedding, completion
# litellm.success_callback = ["helicone"]
# litellm.set_verbose = True
# user_message = "Hello, how are you?"
# messages = [{"content": user_message, "role": "user"}]
# # openai call
# response = completion(
# model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi πŸ‘‹ - i'm openai"}]
# )
# # cohere call
# response = completion(
# model="command-nightly", messages=[{"role": "user", "content": "Hi πŸ‘‹ - i'm cohere"}]
# )