title_markdown = """
LLaVA-NeXT

LLaVA OneVision: Multimodal Chat

Video Model | Github | Huggingface | Blog | More
""" html_header = """
LLaVA-NeXT

Teach Multimodal LLMs to Comprehend Electrocardiographic Images

Code | Checkpoints | Data | Demo
""" block_css = """ #buttons button { min-width: min(120px,100%); } """ tos_markdown = """ ## Terms of use By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. We deploy our model backend with ZEROGPU. However, there could be congestion during the serving process, leading to delayed responses. If you encounter any issues with the webpage, kindly refresh it. """ learn_more_markdown = """ ## License The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, and [License](https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b) of LLaVA. Please contact us if you find any potential violation. """ bibtext = """ ## Citation ``` @article{liu2024teach, title={Teach Multimodal LLMs to Comprehend Electrocardiographic Images}, author={Ruoqi Liu, Yuelin Bai, Xiang Yue, Ping Zhang}, journal={arXiv preprint arXiv:2410.19008}, year={2024} } ``` """ block_css = """ #buttons button { min-width: min(120px,100%); } """