import os from langchain.llms import OpenAI # from transformers import AutoTokenizer, AutoModelForCausalLM from config import config from src.control.controller import Controller import src.view.view as view os.environ["TOKENIZERS_PARALLELISM"] = "true" if not "OPENAI_API_KEY" in os.environ: from config_key import OPENAI_API_KEY os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY # tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") open_ai_model = OpenAI(temperature=0) # llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") llm = open_ai_model ctrl = Controller(config) app = view.run(controller=ctrl, config=config) app.queue().launch()