piotr-szleg-bards-ai commited on
Commit
362448c
1 Parent(s): 0b07a42
Files changed (3) hide show
  1. app.py +1 -1
  2. pipeline/config.py +1 -0
  3. pipeline/models.py +4 -4
app.py CHANGED
@@ -5,7 +5,7 @@ import pandas as pd
5
  import plotly
6
  from pandas.api.types import is_numeric_dtype
7
 
8
- from pipeline.config import QueriesConfig, LLMBoardConfig
9
  from pipeline.models import models_costs
10
 
11
  README = """
 
5
  import plotly
6
  from pandas.api.types import is_numeric_dtype
7
 
8
+ from pipeline.config import LLMBoardConfig, QueriesConfig
9
  from pipeline.models import models_costs
10
 
11
  README = """
pipeline/config.py CHANGED
@@ -6,6 +6,7 @@ try:
6
  except ImportError:
7
  Config = object
8
 
 
9
  class LLMBoardConfig(Config):
10
  group_columns: List[str] = ["model", "language", "template_name"]
11
  single_values_columns: List[str] = ["execution_time", "characters_count", "words_count"]
 
6
  except ImportError:
7
  Config = object
8
 
9
+
10
  class LLMBoardConfig(Config):
11
  group_columns: List[str] = ["model", "language", "template_name"]
12
  single_values_columns: List[str] = ["execution_time", "characters_count", "words_count"]
pipeline/models.py CHANGED
@@ -29,7 +29,7 @@ class Model(object):
29
  self.cost = f"${self.cost_per_million_input_tokens} / 1M input tokens, ${self.cost_per_million_output_tokens} / 1M output tokens"
30
 
31
 
32
- env = os.environ
33
 
34
  MODELS = [
35
  # source: https://openai.com/pricing
@@ -91,7 +91,7 @@ MODELS = [
91
  Model(
92
  "zephyr-7b-beta",
93
  "huggingface/HuggingFaceH4/zephyr-7b-beta",
94
- env["ZEPHYR_7B_BETA_URL"],
95
  "Hugging Face Inference Endpoint",
96
  hourly_cost=1.30,
97
  size_billion_parameters=7,
@@ -99,7 +99,7 @@ MODELS = [
99
  Model(
100
  "Mistral-7B-Instruct-v0.2",
101
  "huggingface/mistralai/Mistral-7B-Instruct-v0.2",
102
- env["MISTRAL_7B_BETA_URL"],
103
  "Hugging Face Inference Endpoint",
104
  hourly_cost=1.30,
105
  size_billion_parameters=7,
@@ -107,7 +107,7 @@ MODELS = [
107
  Model(
108
  "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
109
  "huggingface/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
110
- env["TINY_LLAMA_URL"],
111
  "Hugging Face Inference Endpoint",
112
  hourly_cost=0.60,
113
  size_billion_parameters=1.1,
 
29
  self.cost = f"${self.cost_per_million_input_tokens} / 1M input tokens, ${self.cost_per_million_output_tokens} / 1M output tokens"
30
 
31
 
32
+ env = os.environ.get
33
 
34
  MODELS = [
35
  # source: https://openai.com/pricing
 
91
  Model(
92
  "zephyr-7b-beta",
93
  "huggingface/HuggingFaceH4/zephyr-7b-beta",
94
+ env("ZEPHYR_7B_BETA_URL"),
95
  "Hugging Face Inference Endpoint",
96
  hourly_cost=1.30,
97
  size_billion_parameters=7,
 
99
  Model(
100
  "Mistral-7B-Instruct-v0.2",
101
  "huggingface/mistralai/Mistral-7B-Instruct-v0.2",
102
+ env("MISTRAL_7B_BETA_URL"),
103
  "Hugging Face Inference Endpoint",
104
  hourly_cost=1.30,
105
  size_billion_parameters=7,
 
107
  Model(
108
  "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
109
  "huggingface/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
110
+ env("TINY_LLAMA_URL"),
111
  "Hugging Face Inference Endpoint",
112
  hourly_cost=0.60,
113
  size_billion_parameters=1.1,