Spaces:
Sleeping
Sleeping
import streamlit as st | |
import json | |
from transformers import pipeline | |
def load_model(model_name): | |
return pipeline("text-generation", model=model_name) | |
def main(): | |
if "generated_widget_id" not in st.session_state: | |
st.session_state["generated_widget_id"] = None | |
st.title("Prebid Config Generator") | |
st.write("Enter a Prebid config setting, such as 'bidderTimeout', and get a generated Prebid config output starting from that setting onward. Using '{' will generate a Prebid config from the beginning. The model currently has a capped output of 1000 characters.") | |
st.subheader("Intended Uses") | |
st.write("This model is designed to assist publishers in understanding and exploring how most and advanced publishers configure their Prebid settings. It can serve as a valuable reference to gain insights into common configurations, best practices, and different approaches used by publishers across various domains. The model should be seen as a helpful tool to gain inspiration and understanding of common Prebid settings but not as a substitute for thorough testing and manual review of the final configurations.") | |
st.write("To learn more about the default model, visit the [pbjsGPT2 model page](https://huggingface.co/PeterBrendan/pbjs_gpt2). To learn more about the advanced model, visit the [pbjsGPT2v2 model page](https://huggingface.co/PeterBrendan/pbjsGPT2v2). You can also refer to the [official Prebid Documentation on pbjs.setConfig](https://docs.prebid.org/dev-docs/publisher-api-reference/setConfig.html) for more information. There is a [Prebid Modules](https://huggingface.co/spaces/PeterBrendan/Prebid_Modules) version as well.") | |
st.write("*Note:* The model may take a moment to generate the output, typically up to 60 seconds. It will refresh after each prompt or button click, so please use your inputs judiciously. Thank you for your patience!") | |
# Default prompts | |
default_prompts = ["{", "bidderTimeout", "bidderSequence", "Usebidcache", "customPriceBucket", "coppa"] | |
# Create a selectbox for default prompts | |
default_prompt = st.selectbox("Choose a default prompt:", default_prompts) | |
# Create a text input field for custom prompt | |
custom_prompt = st.text_input("Enter a custom prompt:", "") | |
# Check if a default prompt is selected | |
if default_prompt: | |
user_input = default_prompt | |
else: | |
user_input = custom_prompt | |
# Check if the user input is empty | |
if user_input: | |
# Select the model based on the user's choice | |
model_name = "PeterBrendan/pbjsGPT2v2" if st.button("Advanced Mode") else "PeterBrendan/pbjs_gpt2" | |
# Load the Hugging Face model | |
generator = load_model(model_name) | |
# Display 'Generating Output' message | |
output_placeholder = st.empty() | |
with output_placeholder: | |
st.write("Generating Output...") | |
# Generate text based on user input | |
generated_text = generator(user_input, max_length=1000, num_return_sequences=1)[0]["generated_text"] | |
# Clear 'Generating Output' message and display the generated text | |
output_placeholder.empty() | |
st.write("Generated Text:") | |
try: | |
parsed_json = json.loads(generated_text) | |
beautified_json = json.dumps(parsed_json, indent=4) | |
st.code(beautified_json, language="json") | |
except json.JSONDecodeError: | |
st.write(generated_text) | |
# Run the app | |
if __name__ == "__main__": | |
main() | |