import openai import pinecone import gradio as gr import os import json # initiates pinecone def init_pinecone(apikey, environment): pinecone.init(api_key=apikey, environment=environment) def qa_gita(query, openaikey): # Basic Checks if not query: return "Please enter your query." if not openaikey: return "Please enter openaikey." openai.api_key = openaikey response = openai.Embedding.create( input=[query], model="text-embedding-ada-002") embedding = response["data"][0]["embedding"] pinecone_api_key_gita = os.getenv("PINECONE_API_KEY_GITA") pinecone_environment = os.getenv("PINECONE_ENVIRONMENT_GITA") init_pinecone(pinecone_api_key_gita, pinecone_environment) index = pinecone.Index("gita") with open("data.json", "r") as f: data = json.loads(f.read()) res = index.query(vector=(embedding), top_k=8) ids = [i["id"] for i in res["matches"]] context = "" for id in ids: context = context + str(id) + ": " + data["gita"][str(id)] + "\n\n" systemprompt = f"You are not an AI Language model. You will be a Bhagwad Gita Assistant to the user. Restrict Yourself to the context of the question." userprompt = f"Verses:\n\n{context}\n\nQuestion: {query}\n\nAnswer:\n\n" response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": systemprompt}, {"role": "user", "content": userprompt}, ], max_tokens=256, temperature=0.0, ) answer = response["choices"][0]["message"]["content"] return answer, context def qa_bible(query, openaikey): # Basic Checks if not query: return "Please enter your query." if not openaikey: return "Please enter openaikey." openai.api_key = openaikey response = openai.Embedding.create( input=[query], model="text-embedding-ada-002") embedding = response["data"][0]["embedding"] pinecone_api_key_bible = os.getenv("PINECONE_API_KEY_BIBLE") pinecone_environment = os.getenv("PINECONE_ENVIRONMENT_BIBLE") init_pinecone(pinecone_api_key_bible, pinecone_environment) index = pinecone.Index("bible") with open("data.json", "r") as f: data = json.loads(f.read()) res = index.query(vector=(embedding), top_k=8) ids = [i["id"] for i in res["matches"]] context = "" for id in ids: context = context + str(id) + ": " + data["bible"][str(id)] + "\n\n" systemprompt = f"You are not an AI Language model. You will be a Bible Assistant to the user. Restrict Yourself to the context of the question." userprompt = f"Verses:\n\n{context}\n\nQuestion: {query}\n\nAnswer:\n\n" response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": systemprompt}, {"role": "user", "content": userprompt}, ], max_tokens=256, temperature=0.0, ) answer = response["choices"][0]["message"]["content"] return answer, context def qa_quran(query, openaikey): # Basic Checks if not query: return "Please enter your query." if not openaikey: return "Please enter openaikey." openai.api_key = openaikey response = openai.Embedding.create( input=[query], model="text-embedding-ada-002") embedding = response["data"][0]["embedding"] pinecone_api_key_quran = os.getenv("PINECONE_API_KEY_QURAN") pinecone_environment = os.getenv("PINECONE_ENVIRONMENT_QURAN") init_pinecone(pinecone_api_key_quran, pinecone_environment) index = pinecone.Index("quran") with open("data.json", "r") as f: data = json.loads(f.read()) res = index.query(vector=(embedding), top_k=8) ids = [i["id"] for i in res["matches"]] context = "" for id in ids: context = context + str(id) + ": " + data["quran"][str(id)] + "\n\n" systemprompt = f"You are not an AI Language model. You will be a Quran Assistant to the user. Restrict Yourself to the context of the question." userprompt = f"Verses:\n\n{context}\n\nQuestion: {query}\n\nAnswer:\n\n" response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": systemprompt}, {"role": "user", "content": userprompt}, ], max_tokens=256, temperature=0.0, ) answer = response["choices"][0]["message"]["content"] return answer, context def cleartext(query, output, references): """ Function to clear text """ return ["", "", ""] with gr.Blocks() as demo: gr.Markdown( """