File size: 4,865 Bytes
8d0adf7 eca76ba 8d0adf7 eca76ba 8d0adf7 eca76ba 8d0adf7 eca76ba 8d0adf7 eca76ba 8d0adf7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
title_markdown = """
<div style="display: flex; justify-content: left; align-items: center; text-align: left; background: linear-gradient(45deg, rgba(195, 253, 245, 0.8), rgba(255, 0, 80, 0.3)); border-radius: 10px; box-shadow: 0 8px 16px 0 rgba(0,0,0,0.1);"> <a href="https://llava-vl.github.io/blog/2024-05-10-llava-next-stronger-llms/" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
<img src="https://i.postimg.cc/sgbKDRVP/pangea-logo.png" alt="LLaVA-NeXT" style="max-width: 80px; height: auto; border-radius: 10px;">
</a>
<div>
<h2 ><a href="https://llava-vl.github.io/blog/2024-05-10-llava-next-stronger-llms/">LLaVA OneVision: Multimodal Chat</a> </h2>
<h5 style="margin: 0;"><a href="https://llavanext-video.lmms-lab.com/">Video Model</a> | <a href="https://github.com/LLaVA-VL/LLaVA-NeXT">Github</a> | <a href="https://huggingface.co/collections/lmms-lab/llava-next-6623288e2d61edba3ddbf5ff">Huggingface</a> | <a href="https://llava-vl.github.io/blog/2024-05-10-llava-next-stronger-llms/">Blog</a> | <a href="https://llava-vl.github.io">More</a></h5>
</div>
</div>
"""
html_header = """
<style>
/* Existing Styles for Larger Screens */
.header-container {
display: flex;
justify-content: left;
align-items: center;
text-align: left;
background: linear-gradient(45deg, rgba(195, 253, 245, 1), rgba(255, 0, 80, 0.3));
border-radius: 10px;
box-shadow: 0 8px 16px 0 rgba(0,0,0,0.1);
padding: 10px 20px; /* Added padding */
}
.header-container img {
max-width: 80px;
height: auto;
border-radius: 10px;
}
.header-container a {
color: black; /* Ensure text color is always black */
text-decoration: none;
}
/* Responsive adjustments for screens less than 768px wide */
@media (max-width: 768px) {
.header-container {
flex-direction: column;
align-items: flex-start;
padding: 10px 15px; /* Adjust padding for smaller screens */
}
.header-container img {
max-width: 60px; /* Adjust image size for smaller screens */
}
.header-container h2, .header-container h5 {
color: black; /* Ensure text color is always black */
text-decoration: none;
text-align: center; /* Center text on small screens */
margin-top: 5px; /* Add top margin for better spacing after stacking */
}
.header-container h2 {
color: black; /* Ensure text color is always black */
text-decoration: none;
font-size: 16px; /* Smaller font size for the title on mobile */
}
.header-container h5 {
color: black; /* Ensure text color is always black */
text-decoration: none;
font-size: 12px; /* Smaller font size for the subtitle on mobile */
}
.header-container a {
color: black; /* Ensure text color is always black */
text-decoration: none;
}
}
</style>
<div class="header-container">
<a href="https://aimedlab.github.io/PULSE/" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
<img src="https://i.postimg.cc/W3p3gX3m/pulse-icon.png" alt="LLaVA-NeXT">
</a>
<div>
<h2><a href="https://aimedlab.github.io/PULSE/">Teach Multimodal LLMs to Comprehend Electrocardiographic Images</a></h2>
<h5><a href="https://github.com/AIMedLab/PULSE">Code</a> | <a href="https://huggingface.co/PULSE-ECG/PULSE-7B">Checkpoints</a> | <a href="https://huggingface.co/collections/PULSE-ECG/pulse-ecg-671eec61a17554b2d0ae0fe5">Data</a> | <a href="https://huggingface.co/spaces/PULSE-ECG/PULSE">Demo</a></h5>
</div>
</div>
"""
block_css = """
#buttons button {
min-width: min(120px,100%);
}
"""
tos_markdown = """
## Terms of use
By using this service, users are required to agree to the following terms:
The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
We deploy our model backend with ZEROGPU. However, there could be congestion during the serving process, leading to delayed responses. If you encounter any issues with the webpage, kindly refresh it.
"""
learn_more_markdown = """
## License
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, and [License](https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b) of LLaVA. Please contact us if you find any potential violation.
"""
bibtext = """
## Citation
```
@article{liu2024teach,
title={Teach Multimodal LLMs to Comprehend Electrocardiographic Images},
author={Ruoqi Liu, Yuelin Bai, Xiang Yue, Ping Zhang},
journal={arXiv preprint arXiv:2410.19008},
year={2024}
}
```
"""
block_css = """
#buttons button {
min-width: min(120px,100%);
}
"""
|