MultiLLM-ChainLit / chain_app.py
Artin2009's picture
Update chain_app.py
0fea945 verified
raw
history blame
63.7 kB
import chainlit as cl
from gradio_client import Client
from openai import OpenAI
from groq import Groq
import requests
from chainlit.input_widget import Select, Slider
import os
import cohere
from huggingface_hub import InferenceClient
hf_token = os.environ.get("HF_TOKEN")
hf_token_llama_3_1 = os.environ.get('HF_TOKEN_FOR_31')
openai_api_key = os.environ.get('OPENAI_API_KEY')
groq_api_key = os.environ.get('GROQ_API_KEY')
cohere_api_key = os.environ.get('COHERE_API_KEY')
hf_text_client = Client("Artin2009/text-generation", hf_token=hf_token)
# hf_image_client = Client('Artin2009/image-generation')
openai_client = OpenAI(api_key=openai_api_key)
groq_client = Groq(api_key=groq_api_key)
co = cohere.Client(
api_key=cohere_api_key,
)
# API_URL = "https://api-inference.huggingface.co/models/PartAI/TookaBERT-Large"
# headers = {"Authorization": f"Bearer {hf_token}"}
# def query(payload):
# response = requests.post(API_URL, headers=headers, json=payload)
# return response.json()
@cl.set_starters
async def set_starters():
return [
cl.Starter(
label="Morning routine ideation",
message="Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.",
icon="https://chainlit-rag-copilot-r2xd.onrender.com/public/idea.svg",
),
cl.Starter(
label="Explain superconductors",
message="Explain superconductors like I'm five years old.",
icon="https://chainlit-rag-copilot-r2xd.onrender.com/public/learn.svg",
),
cl.Starter(
label="Python script for daily email reports",
message="Write a script to automate sending daily email reports in Python, and walk me through how I would set it up.",
icon="https://chainlit-rag-copilot-r2xd.onrender.com/public/terminal.svg",
),
cl.Starter(
label="Text inviting friend to wedding",
message="Write a text asking a friend to be my plus-one at a wedding next month. I want to keep it super short and casual, and offer an out.",
icon="https://chainlit-rag-copilot-r2xd.onrender.com/public/write.svg",
)
]
@cl.set_chat_profiles
async def chat_profile():
return [
cl.ChatProfile(
name="None",
markdown_description="None",
),
cl.ChatProfile(
name="neural-brain-AI",
markdown_description="The main model of neural brain",
),
cl.ChatProfile(
name="Dorna-AI",
markdown_description="One of the open-sourced models that neural brain team fine-tuned",
),
# cl.ChatProfile(
# name='Image-Generation',
# markdown_description='Our image generation model, has a performance like midjourney',
# ),
cl.ChatProfile(
name="gpt4-o-mini",
markdown_description="The best state of the art openai model",
),
cl.ChatProfile(
name="GPT-4",
markdown_description="OpenAI's GPT-4 model",
),
cl.ChatProfile(
name="gpt-3.5-turbo",
markdown_description="OpenAI's GPT-3.5 Turbo model",
),
# cl.ChatProfile(
# name="GPT-3.5-turbo-0125",
# markdown_description="OpenAI's GPT-3.5 Turbo 0125 model",
# ),
cl.ChatProfile(
name="gpt-3.5-turbo-1106",
markdown_description="OpenAI's GPT-3.5 Turbo 1106 model",
),
# cl.ChatProfile(
# name="davinci-002",
# markdown_description="OpenAI's Davinci-002 model",
# ),
cl.ChatProfile(
name="TTS",
markdown_description="OpenAI's Text-to-Speech model",
),
cl.ChatProfile(
name="Qwen2-57B",
markdown_description="Qwen second generation model with 57B parameters",
),
cl.ChatProfile(
name="Qwen2-7B",
markdown_description="Qwen second generation model with 7B parameters",
),
cl.ChatProfile(
name="Qwen2-1.5B",
markdown_description="Qwen second generation model with 1.5B parameters",
),
cl.ChatProfile(
name="Qwen2-0.5B",
markdown_description="Qwen second generation model with 0.5B parameters",
),
cl.ChatProfile(
name="Qwen1.5-110B",
markdown_description="Qwen first generation improved model with 110B parameters",
),
# cl.ChatProfile(
# name="Qwen1.5-72B",
# markdown_description="Qwen first generation improved model with 72B parameters",
# ),
cl.ChatProfile(
name="Qwen1.5-32B",
markdown_description="Qwen first generation improved model with 32B parameters",
),
cl.ChatProfile(
name="Qwen1.5-2.7B",
markdown_description="Qwen first generation improved model with 2.7B parameters",
),
# cl.ChatProfile(
# name="Qwen-72B",
# markdown_description="Qwen first generation model with 72B parameters",
# ),
# cl.ChatProfile(
# name="Qwen-14B",
# markdown_description="Qwen first generation model with 14B parameters",
# ),
# cl.ChatProfile(
# name="Qwen-7B",
# markdown_description="Qwen first generation model with 7B parameters",
# ),
cl.ChatProfile(
name="Llama-3.1-405B",
markdown_description="Meta Open Source Model Llama with 405B parameters",
),
cl.ChatProfile(
name="Llama-3.1-70B",
markdown_description="Meta Open Source Model Llama with 70B parameters",
),
cl.ChatProfile(
name="Llama-3.1-8B",
markdown_description="Meta Open Source Model Llama with 8B parameters",
),
cl.ChatProfile(
name="Llama-3-70B",
markdown_description="Meta Open Source model Llama-3 with 70B parameters",
),
cl.ChatProfile(
name='Aya-35B',
markdown_description='Cohere open sourced AI model with 35B parameters'
),
cl.ChatProfile(
name='Aya-23B',
markdown_description='Cohere open sourced AI model with 23B parameters'
),
cl.ChatProfile(
name='Command-R-Plus',
markdown_description='Cohere open sourced AI model named Command R plus'
),
cl.ChatProfile(
name='Command-R',
markdown_description='Cohere open sourced AI model named Command R'
),
cl.ChatProfile(
name='Command-Light',
markdown_description='Cohere open sourced AI model named Command R'
),
cl.ChatProfile(
name='Command-Light-Nightly',
markdown_description='Cohere open sourced AI model named Command R'
),
cl.ChatProfile(
name='Command-Nightly',
markdown_description='Cohere open sourced AI model named Command R'
),
cl.ChatProfile(
name='Command',
markdown_description='Cohere open sourced AI model named Command R'
),
cl.ChatProfile(
name="Llama-3-8B",
markdown_description="Meta Open Source model Llama-2 with 7B parameters",
),
cl.ChatProfile(
name = "gemma2-9B",
markdown_description = 'Google Generation 2 Open Source LLM with 9B parameters'
),
cl.ChatProfile(
name = "gemma-7B",
markdown_description = 'Google Generation 1 Open Source LLM with 7B parameters'
),
cl.ChatProfile(
name="zephyr-7B",
markdown_description="Open Source model Zephyr with 7B parameters",
),
cl.ChatProfile(
name='mistral-nemo-12B',
markdown_description='mistral open source LLM with 12B parameters'
),
cl.ChatProfile(
name='mixtral-8x7B',
markdown_description = 'mistral open source LLM with 7B parameters'
),
# cl.ChatProfile(
# name="Toka-353M",
# markdown_description="PartAI Open Source model Toka with 353M parameters",
# )
]
@cl.set_chat_profiles
async def chat_profile(current_user: cl.User):
chat_profile = cl.user_session.get("chat_profile")
if chat_profile is not None:
return [
cl.ChatProfile(
name="My Chat Profile",
icon="https://picsum.photos/250",
markdown_description="The underlying LLM model is **GPT-3.5**, a *175B parameter model* trained on 410GB of text data.",
starters=[
cl.Starter(
label="Morning routine ideation",
message="Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.",
icon="https://chainlit-rag-copilot-r2xd.onrender.com/public/idea.svg",
),
cl.Starter(
label="Explain superconductors",
message="Explain superconductors like I'm five years old.",
icon="https://chainlit-rag-copilot-r2xd.onrender.com/public/learn.svg",
),
cl.Starter(
label="Python script for daily email reports",
message="Write a script to automate sending daily email reports in Python, and walk me through how I would set it up.",
icon="https://chainlit-rag-copilot-r2xd.onrender.com/public/terminal.svg",
),
cl.Starter(
label="Text inviting friend to wedding",
message="Write a text asking a friend to be my plus-one at a wedding next month. I want to keep it super short and casual, and offer an out.",
icon="https://chainlit-rag-copilot-r2xd.onrender.com/public/write.svg",
)
]
)
]
@cl.on_chat_start
async def on_chat_start():
chat_profile = cl.user_session.get("chat_profile")
if chat_profile == 'neural-brain-AI':
await cl.ChatSettings(
[
Select(
id="NB-Model",
label="NeuralBrain - Models",
values=["Neural Brain AI"],
initial_index=0,
)
]
).send()
await cl.Message(
content="Hello, I am the main model of neural brain team, i am an instance of ChatGPT-4, This team finetuned me and i am ready to help you"
).send()
if chat_profile == 'Dorna-AI':
await cl.ChatSettings(
[
Select(
id="param_3",
label="Parameter 3",
values=["512"], # Only one selectable value
initial_index=0,
tooltip="Config parameter 3 (e.g., max tokens)",
),
Select(
id="param_4",
label="Parameter 4",
values=["0.7"], # Only one selectable value
initial_index=0,
tooltip="Config parameter 4 (e.g., temperature)",
),
Select(
id="param_5",
label="Parameter 5",
values=["0.95"], # Only one selectable value
initial_index=0,
tooltip="Config parameter 5 (e.g., top_p)",
),
Select(
id="api_name",
label="API Name",
values=["/chat"],
initial_index=0,
),
]
).send()
await cl.Message(
content='my name is Dorna, Your AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!'
).send()
if chat_profile == 'gpt4-o-mini':
await cl.ChatSettings(
[
Select(
id="OpenAI-Model",
label="OpenAI - Model",
values=["gpt4-o-mini"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im one of the best models openai have released and i am configured by two iranian boys to help you."
).send()
# if chat_profile == 'Image-Generation':
# image = cl.Image(path='cat.png', name="result", display="inline")
# await cl.Message(
# content="I can make high quality & resoloution images for you, This is an example of what i can do!",
# elements=[image],
# ).send()
if chat_profile == 'GPT-4':
await cl.ChatSettings(
[
Select(
id="OpenAI-Model",
label="OpenAI - Model",
values=["gpt-4"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im OpenAI's latest and biggest model. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'gpt-3.5-turbo':
await cl.ChatSettings(
[
Select(
id="OpenAI-Model",
label="OpenAI - Model",
values=["gpt-3.5-turbo"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
# if chat_profile == 'GPT-3.5-turbo-0125':
# await cl.ChatSettings(
# [
# Select(
# id="OpenAI-Model",
# label="OpenAI - Model",
# values=["gpt-3.5-turbo-0125"],
# initial_index=0,
# ),
# Slider(
# id="Temperature",
# label="Model Temperature",
# initial=0.7,
# min=0,
# max=1,
# step=0.1,
# ),
# ]
# ).send()
# await cl.Message(
# content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
# ).send()
if chat_profile == 'gpt-3.5-turbo-1106':
await cl.ChatSettings(
[
Select(
id="OpenAI-Model",
label="OpenAI - Model",
values=["gpt-3.5-turbo-1106"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
# if chat_profile == 'davinci-002':
# await cl.ChatSettings(
# [
# Select(
# id="OpenAI-Model",
# label="OpenAI - Model",
# values=["davinci-002"],
# initial_index=0,
# ),
# Slider(
# id="Temperature",
# label="Model Temperature",
# initial=0.7,
# min=0,
# max=1,
# step=0.1,
# ),
# ]
# ).send()
# await cl.Message(
# content="Im one of the OpenAI's models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
# ).send()
if chat_profile == 'TTS':
await cl.Message(
content="Im TTS. of the best models OpenAI ever created. i can convert text to speech! . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'Qwen2-57B':
await cl.ChatSettings(
[
Select(
id="Qwen-Model",
label="Qwen - Model",
values=["Qwen2-57B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im Qwens second generation second large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
).send()
if chat_profile == 'Qwen2-7B':
await cl.ChatSettings(
[
Select(
id="Qwen-Model",
label="Qwen - Model",
values=["Qwen2-7B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im Qwens second generation third large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
).send()
if chat_profile == 'Qwen2-1.5B':
await cl.ChatSettings(
[
Select(
id="Qwen-Model",
label="Qwen - Model",
values=["Qwen2-1.5B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im Qwens second generation small model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
).send()
if chat_profile == 'Qwen2-0.5B':
await cl.ChatSettings(
[
Select(
id="Qwen-Model",
label="Qwen - Model",
values=["Qwen2-0.5B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im Qwens second generation small model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
).send()
if chat_profile == 'Qwen1.5-110B':
await cl.ChatSettings(
[
Select(
id="Qwen-Model",
label="Qwen - Model",
values=["Qwen1.5-110B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im Qwens 1.5th generation Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
).send()
# if chat_profile == 'Qwen1.5-72B':
# await cl.ChatSettings(
# [
# Select(
# id="Qwen-Model",
# label="Qwen - Model",
# values=["Qwen1.5-72B"],
# initial_index=0,
# ),
# Slider(
# id="Temperature",
# label="Model Temperature",
# initial=0.7,
# min=0,
# max=1,
# step=0.1,
# ),
# ]
# ).send()
# await cl.Message(
# content='Im Qwens 1.5th generation second Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
# ).send()
if chat_profile == 'Qwen1.5-32B':
await cl.ChatSettings(
[
Select(
id="Qwen-Model",
label="Qwen - Model",
values=["Qwen1.5-32B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im Qwens 1.5th generation third Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
).send()
if chat_profile == 'Qwen1.5-2.7B':
await cl.ChatSettings(
[
Select(
id="Qwen-Model",
label="Qwen - Model",
values=["Qwen1.5-2.7B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im Qwens 1.5th generation small model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
).send()
# if chat_profile == 'Qwen-72B':
# await cl.ChatSettings(
# [
# Select(
# id="Qwen-Model",
# label="Qwen - Model",
# values=["Qwen-72B"],
# initial_index=0,
# ),
# Slider(
# id="Temperature",
# label="Model Temperature",
# initial=0.7,
# min=0,
# max=1,
# step=0.1,
# ),
# ]
# ).send()
# await cl.Message(
# content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
# ).send()
# if chat_profile == 'Qwen-14B':
# await cl.ChatSettings(
# [
# Select(
# id="Qwen-Model",
# label="Qwen - Model",
# values=["Qwen-14B"],
# initial_index=0,
# ),
# Slider(
# id="Temperature",
# label="Model Temperature",
# initial=0.7,
# min=0,
# max=1,
# step=0.1,
# ),
# ]
# ).send()
# await cl.Message(
# content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
# ).send()
# if chat_profile == 'Qwen-7B':
# await cl.ChatSettings(
# [
# Select(
# id="Qwen-Model",
# label="Qwen - Model",
# values=["Qwen-7B"],
# initial_index=0,
# ),
# Slider(
# id="Temperature",
# label="Model Temperature",
# initial=0.7,
# min=0,
# max=1,
# step=0.1,
# ),
# ]
# ).send()
# await cl.Message(
# content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
# ).send()
if chat_profile == 'Llama-3.1-405B':
await cl.ChatSettings(
[
Select(
id="Meta-Model",
label="Meta - Model",
values=["Llama-3.1-405B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im the big Llama-3.1!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'Llama-3.1-70B':
await cl.ChatSettings(
[
Select(
id="Meta-Model",
label="Meta - Model",
values=["Llama-3.1-70B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im the second-big Llama-3.1!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'Llama-3.1-8B':
await cl.ChatSettings(
[
Select(
id="Meta-Model",
label="Meta - Model",
values=["Llama-3.1-8B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im the small Llama-3.1!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'Llama-3-70B':
await cl.ChatSettings(
[
Select(
id="Meta-Model",
label="Meta - Model",
values=["Llama-3-70B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im the big Llama-3!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'Llama-3-8B':
await cl.ChatSettings(
[
Select(
id="Meta-Model",
label="Meta - Model",
values=["Llama-3-8B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im The small Llama!. one of the best open source models released by Meta! i am the small version of meta's open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'Aya-23B':
await cl.ChatSettings(
[
Select(
id="Cohere-Model",
label="Cohere - Model",
values=["Aya-23B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im one of the best open source models that cohere released. i am configured by 2 iranian boys named Artin Daneshvar and Sadra Noadosut to help you out!'
).send()
if chat_profile == 'Aya-35B':
await cl.ChatSettings(
[
Select(
id="Cohere-Model",
label="Cohere - Model",
values=["Aya-35B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im one of the best open source models that cohere released. i am configured by 2 iranian boys named Artin Daneshvar and Sadra Noadosut to help you out!'
).send()
if chat_profile == 'Command-R-Plus':
await cl.ChatSettings(
[
Select(
id="Cohere-Model",
label="Cohere - Model",
values=["Command-R-Plus"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im one of the best open source models that cohere released. i am configured by 2 iranian boys named Artin Daneshvar and Sadra Noadosut to help you out!'
).send()
if chat_profile == 'Command-Nightly':
await cl.ChatSettings(
[
Select(
id="Cohere-Model",
label="Cohere - Model",
values=["Command-Nightly"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im one of the best open source models that cohere released. i am configured by 2 iranian boys named Artin Daneshvar and Sadra Noadosut to help you out!'
).send()
if chat_profile == 'Command-Light-Nightly':
await cl.ChatSettings(
[
Select(
id="Cohere-Model",
label="Cohere - Model",
values=["Command-Light-Nigtly"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im one of the best open source models that cohere released. i am configured by 2 iranian boys named Artin Daneshvar and Sadra Noadosut to help you out!'
).send()
if chat_profile == 'Command-Light':
await cl.ChatSettings(
[
Select(
id="Cohere-Model",
label="Cohere - Model",
values=["Command-Light"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im one of the best open source models that cohere released. i am configured by 2 iranian boys named Artin Daneshvar and Sadra Noadosut to help you out!'
)
if chat_profile == 'Command-R':
await cl.ChatSettings(
[
Select(
id="Cohere-Model",
label="Cohere - Model",
values=["Command-R"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im one of the best open source models that cohere released. i am configured by 2 iranian boys named Artin Daneshvar and Sadra Noadosut to help you out!'
).send()
if chat_profile == 'Command':
await cl.ChatSettings(
[
Select(
id="Cohere-Model",
label="Cohere - Model",
values=["Command"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content='Im one of the best open source models that cohere released. i am configured by 2 iranian boys named Artin Daneshvar and Sadra Noadosut to help you out!'
).send()
if chat_profile == 'gemma2-9B':
await cl.ChatSettings(
[
Select(
id="Google-Model",
label="Google - Model",
values=["Gemma-9B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im Gemma2. the 9B version of google second generation open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'gemma-7B':
await cl.ChatSettings(
[
Select(
id="Google-Model",
label="Google - Model",
values=["Gemma-7B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im Gemma. the small version of google open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'zephyr-7B':
await cl.ChatSettings(
[
Select(
id="zephyr-Model",
label="zephyr - Model",
values=["zephyr-7B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im Zephyr. One of the best open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'mixtral-8x7B':
await cl.ChatSettings(
[
Select(
id="Mistral-Model",
label="Mistral - Model",
values=["mixtral-8x7B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im Mistral. the small version of Mistral Family. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
if chat_profile == 'mistral-nemo-12B':
await cl.ChatSettings(
[
Select(
id="Mistral-Model",
label="Mistral - Model",
values=["mistral-nemo-12B"],
initial_index=0,
),
Slider(
id="Temperature",
label="Model Temperature",
initial=0.7,
min=0,
max=1,
step=0.1,
),
]
).send()
await cl.Message(
content="Im Mistral nemo 12B .i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
).send()
# if chat_profile == 'Toka-353M':
# await cl.ChatSettings(
# [
# Select(
# id="PartAI-Model",
# label="PartAI - Model",
# values=["TokaBert-353M"],
# initial_index=0,
# ),
# Slider(
# id="Temperature",
# label="Model Temperature",
# initial=0.7,
# min=0,
# max=1,
# step=0.1,
# ),
# ]
# ).send()
# await cl.Message(
# content="Im Toka. An opens source persian LLM . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? you should ask me your questions like : the capital of england is <mask> "
# ).send()
@cl.on_message
async def main(message: cl.Message):
chat_profile = cl.user_session.get("chat_profile")
if not chat_profile or chat_profile == 'None':
await cl.Message(
content="Please select a model first."
).send()
return
if chat_profile == 'neural-brain-AI':
completion = openai_client.chat.completions.create(
model="ft:gpt-3.5-turbo-1106:nb:aria1:9UWDrLJK",
messages=[
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
{"role": "user", "content": message.content}
]
)
model_response = completion.choices[0].message.content
await cl.Message(
content=model_response
).send()
elif chat_profile == "Dorna-AI":
result = hf_text_client.predict(
message=message.content,
request="your name is Dorna,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!",
param_3=512,
param_4=0.7,
param_5=0.95,
api_name="/chat"
)
model_response = result.strip("</s>")
await cl.Message(
content=model_response
).send()
elif chat_profile == "gpt4-o-mini":
completion = openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
{"role": "user", "content": message.content}
]
)
model_response = completion.choices[0].message.content
await cl.Message(
content=model_response
).send()
# elif chat_profile == 'Image-Generation':
# result = hf_image_client.predict(
# prompt=message.content,
# negative_prompt="",
# seed=0,
# randomize_seed=True,
# width=512,
# height=512,
# guidance_scale=0,
# num_inference_steps=2,
# api_name="/infer"
# )
# image = cl.Image(path=result, name="result", display="inline")
# await cl.Message(
# content="This message has an image!",
# elements=[image],
# ).send()
elif chat_profile == 'GPT-4':
completion = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
{"role": "user", "content": message.content}
]
)
model_response = completion.choices[0].message.content
await cl.Message(
content=model_response
).send()
elif chat_profile == 'gpt-3.5-turbo':
completion = openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
{"role": "user", "content": message.content}
]
)
model_response = completion.choices[0].message.content
await cl.Message(
content=model_response
).send()
elif chat_profile == 'GPT-3.5-turbo-0125':
completion = openai_client.chat.completions.create(
model="GPT-3.5-turbo-0125",
messages=[
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
{"role": "user", "content": message.content}
]
)
model_response = completion.choices[0].message.content
await cl.Message(
content=model_response
).send()
elif chat_profile == 'gpt-3.5-turbo-1106':
completion = openai_client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
{"role": "user", "content": message.content}
]
)
model_response = completion.choices[0].message.content
await cl.Message(
content=model_response
).send()
# elif chat_profile == 'davinci-002':
# completion = openai_client.chat.completions.create(
# model="davinci-002",
# messages=[
# {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
# {"role": "user", "content": message.content}
# ]
# )
# model_response = completion.choices[0].message.content
# await cl.Message(
# content=model_response
# ).send()
elif chat_profile == 'TTS':
response = openai_client.audio.speech.create(
model="tts-1",
voice="alloy",
input=message.content,
)
response.stream_to_file("output.mp3")
elements = [
cl.Audio(name="output.mp3", path="./output.mp3", display="inline"),
]
await cl.Message(
content="Here it is the response!",
elements=elements,
).send()
elif chat_profile == 'Qwen2-57B':
client = Client("Qwen/Qwen2-57b-a14b-instruct-demo", hf_token=hf_token)
result = client.predict(
query=message.content,
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
api_name="/model_chat"
)
await cl.Message(
content=result[1][0][1]
).send()
elif chat_profile == 'Qwen2-7B':
client = Client("Qwen/Qwen2-7b-instruct-demo", hf_token=hf_token)
result = client.predict(
query=message.content,
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
api_name="/model_chat"
)
await cl.Message(
content=result[1][0][1]
).send()
elif chat_profile == 'Qwen2-1.5B':
client = Client("Qwen/Qwen2-1.5b-instruct-demo", hf_token=hf_token)
result = client.predict(
query=message.content,
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
api_name="/model_chat"
)
await cl.Message(
content=result[1][0][1]
).send()
elif chat_profile == 'Qwen2-0.5B':
client = Client("Qwen/Qwen2-0.5B-Instruct", hf_token=hf_token)
result = client.predict(
query=message.content,
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
api_name="/model_chat"
)
await cl.Message(
content=result[1][0][1]
).send()
elif chat_profile == 'Qwen1.5-110B':
client = Client("Qwen/Qwen1.5-110B-Chat-demo", hf_token=hf_token)
result = client.predict(
query=message.content,
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
api_name="/model_chat"
)
await cl.Message(
content=result[1][0][1]
).send()
elif chat_profile == 'Qwen1.5-32B':
client = Client("Qwen/Qwen1.5-32B-Chat-demo", hf_token=hf_token)
result = client.predict(
query=message.content,
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
api_name="/model_chat"
)
await cl.Message(
content=result[1][0][1]
).send()
elif chat_profile == 'Qwen1.5-2.7B':
client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
result = client.predict(
query=message.content,
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
api_name="/model_chat"
)
await cl.Message(
content=result[1][0][1]
).send()
# elif chat_profile == 'Qwen-14B':
# client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
# result = client.predict(
# query=message.content,
# system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
# api_name="/model_chat"
# )
# await cl.Message(
# content=result[1][0][1]
# ).send()
# elif chat_profile == 'Qwen-7B':
# client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
# result = client.predict(
# query=message.content,
# system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
# api_name="/model_chat"
# )
# await cl.Message(
# content=result[1][0][1]
# ).send()
elif chat_profile == 'Llama-3.1-405B':
completion = groq_client.chat.completions.create(
model="llama-3.1-405b-reasoning",
messages=[
{
"role": "user",
"content": message.content
}
],
temperature=1,
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
complete_content = ""
# Iterate over each chunk
for chunk in completion:
# Retrieve the content from the current chunk
content = chunk.choices[0].delta.content
# Check if the content is not None before concatenating it
if content is not None:
complete_content += content
# Send the concatenated content as a message
await cl.Message(content=complete_content).send()
elif chat_profile == 'Llama-3.1-70B':
completion = groq_client.chat.completions.create(
model="llama-3.1-70b-versatile",
messages=[
{
"role": "user",
"content": message.content
}
],
temperature=1,
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
complete_content = ""
# Iterate over each chunk
for chunk in completion:
# Retrieve the content from the current chunk
content = chunk.choices[0].delta.content
# Check if the content is not None before concatenating it
if content is not None:
complete_content += content
# Send the concatenated content as a message
await cl.Message(content=complete_content).send()
elif chat_profile == 'Llama-3.1-8B':
completion = groq_client.chat.completions.create(
model="llama-3.1-8b-instant",
messages=[
{
"role": "user",
"content": message.content
}
],
temperature=1,
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
complete_content = ""
# Iterate over each chunk
for chunk in completion:
# Retrieve the content from the current chunk
content = chunk.choices[0].delta.content
# Check if the content is not None before concatenating it
if content is not None:
complete_content += content
# Send the concatenated content as a message
await cl.Message(content=complete_content).send()
elif chat_profile == 'Llama-3-70B':
completion = groq_client.chat.completions.create(
model="llama3-70b-8192",
messages=[
{
"role": "user",
"content": message.content
}
],
temperature=1,
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
complete_content = ""
# Iterate over each chunk
for chunk in completion:
# Retrieve the content from the current chunk
content = chunk.choices[0].delta.content
# Check if the content is not None before concatenating it
if content is not None:
complete_content += content
# Send the concatenated content as a message
await cl.Message(content=complete_content).send()
elif chat_profile == 'Llama-3-8B':
completion = groq_client.chat.completions.create(
model="llama3-8b-8192",
messages=[
{
"role": "user",
"content": message.content
}
],
temperature=1,
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
complete_content = ""
# Iterate over each chunk
for chunk in completion:
# Retrieve the content from the current chunk
content = chunk.choices[0].delta.content
# Check if the content is not None before concatenating it
if content is not None:
complete_content += content
# Send the concatenated content as a message
await cl.Message(content=complete_content).send()
elif chat_profile == 'gemma2-9B':
completion = groq_client.chat.completions.create(
model="gemma2-9b-it",
messages=[
{
"role": "user",
"content": message.content
}
],
temperature=1,
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
complete_content = ""
# Iterate over each chunk
for chunk in completion:
# Retrieve the content from the current chunk
content = chunk.choices[0].delta.content
# Check if the content is not None before concatenating it
if content is not None:
complete_content += content
# Send the concatenated content as a message
await cl.Message(content=complete_content).send()
elif chat_profile == 'gemma-7B':
completion = groq_client.chat.completions.create(
model="gemma-7b-it",
messages=[
{
"role": "user",
"content": message.content
}
],
temperature=1,
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
complete_content = ""
# Iterate over each chunk
for chunk in completion:
# Retrieve the content from the current chunk
content = chunk.choices[0].delta.content
# Check if the content is not None before concatenating it
if content is not None:
complete_content += content
# Send the concatenated content as a message
await cl.Message(content=complete_content).send()
elif chat_profile == "zephyr-7B":
result = hf_text_client.predict(
message=message.content,
request="your name is zephyr,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!",
param_3=512,
param_4=0.7,
param_5=0.95,
api_name="/chat"
)
model_response = result.strip("</s>")
await cl.Message(
content=model_response
).send()
elif chat_profile == 'mixtral-8x7B':
completion = groq_client.chat.completions.create(
model="mixtral-8x7b-32768",
messages=[
{
"role": "user",
"content": message.content
}
],
temperature=1,
max_tokens=1024,
top_p=1,
stream=True,
stop=None,
)
complete_content = ""
for chunk in completion:
content = chunk.choices[0].delta.content
if content is not None:
complete_content += content
await cl.Message(content=complete_content).send()
elif chat_profile == 'mistral-nemo-12B':
client = Client("0x7o/Mistral-Nemo-Instruct", hf_token=hf_token)
result = client.predict(
message=message.content,
max_new_tokens=512,
temperature=0.7,
top_p=0.95,
api_name="/chat"
)
await cl.Message(
content=result[1][0][1]
).send()
# elif chat_profile == 'Toka-353M':
# output = query({
# "inputs": message.content,
# })
# await cl.Message(
# content=output[0]['sequence']
# ).send()
elif chat_profile == 'Aya-23B':
stream = co.chat_stream(
model='c4ai-aya-23',
message=message.content,
temperature=0.3,
# chat_history=[{"role": "User", "message": "Hello"}, {"role": "Chatbot", "message": "Hello! How can I help you today?"}, {"role": "User", "message": "Hi"}, {"role": "User", "message": "hello"}],
prompt_truncation='OFF',
connectors=[],
)
complete_content = ''
for event in stream:
if event.event_type == 'text-generation':
complete_content += event.text
await cl.Message(content=complete_content).send()
elif chat_profile == 'Aya-35B':
stream = co.chat_stream(
model='c4ai-aya-23',
message=message.content,
temperature=0.3,
# chat_history=[{"role": "User", "message": "Hello"}, {"role": "Chatbot", "message": "Hello! How can I help you today?"}, {"role": "User", "message": "Hi"}, {"role": "User", "message": "hello"}],
prompt_truncation='OFF',
connectors=[],
)
complete_content = ''
for event in stream:
if event.event_type == 'text-generation':
complete_content += event.text
await cl.Message(content=complete_content).send()
elif chat_profile == 'Command-R-Plus':
stream = co.chat_stream(
model='command-r-plus',
message=message.content,
temperature=0.3,
chat_history=[],
prompt_truncation='AUTO',
)
complete_content = ''
for event in stream:
if event.event_type == 'text-generation':
complete_content += event.text
await cl.Message(content=complete_content).send()
elif chat_profile == 'Command-R':
stream = co.chat_stream(
model='command-r',
message=message.content,
temperature=0.3,
chat_history=[],
prompt_truncation='AUTO',
)
complete_content = ''
for event in stream:
if event.event_type == 'text-generation':
complete_content += event.text
await cl.Message(content=complete_content).send()
elif chat_profile == 'Command':
stream = co.chat_stream(
model='command',
message=message.content,
temperature=0.3,
chat_history=[],
prompt_truncation='AUTO',
)
complete_content = ''
for event in stream:
if event.event_type == 'text-generation':
complete_content += event.text
await cl.Message(content=complete_content).send()
elif chat_profile == 'Command-Light':
stream = co.chat_stream(
model='command-light',
message=message.content,
temperature=0.3,
chat_history=[],
prompt_truncation='AUTO',
)
complete_content = ''
for event in stream:
if event.event_type == 'text-generation':
complete_content += event.text
await cl.Message(content=complete_content).send()
elif chat_profile == 'Command-Light-Nightly':
stream = co.chat_stream(
model='command-light-nightly',
message=message.content,
temperature=0.3,
chat_history=[],
prompt_truncation='AUTO',
)
complete_content = ''
for event in stream:
if event.event_type == 'text-generation':
complete_content += event.text
await cl.Message(content=complete_content).send()
elif chat_profile == 'Command-Nightly':
stream = co.chat_stream(
model='command-light-nightly',
message=message.content,
temperature=0.3,
chat_history=[],
prompt_truncation='AUTO',
)
complete_content = ''
for event in stream:
if event.event_type == 'text-generation':
complete_content += event.text
await cl.Message(content=complete_content).send()
@cl.on_settings_update
async def setup_agent(settings):
print("on_settings_update", settings)