import streamlit as st from streamlit_extras.switch_page_button import switch_page st.title("Grounding DINO") st.success("""[Original tweet](https://twitter.com/mervenoyann/status/1780558859221733563) (April 17, 2024)""", icon="โ„น๏ธ") st.markdown(""" """) st.markdown(""" We have merged Grounding DINO in ๐Ÿค— Transformers ๐Ÿฆ– It's an amazing zero-shot object detection model, here's why ๐Ÿงถ """) st.markdown(""" """) st.image("pages/Grounding_DINO/image_1.jpeg", use_column_width=True) st.markdown(""" """) st.markdown("""There are two zero-shot object detection models as of now, one is OWL series by Google Brain and the other one is Grounding DINO ๐Ÿฆ• Grounding DINO pays immense attention to detail โฌ‡๏ธ Also [try yourself](https://t.co/UI0CMxphE7). """) st.markdown(""" """) st.image("pages/Grounding_DINO/image_2.jpeg", use_column_width=True) st.image("pages/Grounding_DINO/image_3.jpeg", use_column_width=True) st.markdown(""" """) st.markdown("""I have also built another [application](https://t.co/4EHpOwEpm0) for GroundingSAM, combining GroundingDINO and Segment Anything by Meta for cutting edge zero-shot image segmentation. """) st.markdown(""" """) st.image("pages/Grounding_DINO/image_4.jpeg", use_column_width=True) st.markdown(""" """) st.markdown("""Grounding DINO is essentially a model with connected image encoder (Swin transformer), text encoder (BERT) and on top of both, a decoder that outputs bounding boxes ๐Ÿฆ– This is quite similar to OWL series, which uses a ViT-based detector on CLIP. """, unsafe_allow_html=True) st.markdown(""" """) st.image("pages/Grounding_DINO/image_5.jpeg", use_column_width=True) st.markdown(""" """) st.markdown("""The authors train Swin-L/T with BERT contrastively (not like CLIP where they match the images to texts by means of similarity) where they try to approximate the region outputs to language phrases at the head outputs ๐Ÿคฉ """) st.markdown(""" """) st.image("pages/Grounding_DINO/image_6.jpeg", use_column_width=True) st.markdown(""" """) st.markdown("""The authors also form the text features on the sub-sentence level. This means it extracts certain noun phrases from training data to remove the influence between words while removing fine-grained information. """) st.markdown(""" """) st.image("pages/Grounding_DINO/image_7.jpeg", use_column_width=True) st.markdown(""" """) st.markdown("""Thanks to all of this, Grounding DINO has great performance on various REC/object detection benchmarks ๐Ÿ†๐Ÿ“ˆ """) st.markdown(""" """) st.image("pages/Grounding_DINO/image_8.jpeg", use_column_width=True) st.markdown(""" """) st.markdown("""Thanks to ๐Ÿค— Transformers, you can use Grounding DINO very easily! You can also check out [NielsRogge](https://twitter.com/NielsRogge)'s [notebook here](https://t.co/8ADGFdVkta). """) st.markdown(""" """) st.image("pages/Grounding_DINO/image_9.jpeg", use_column_width=True) st.info("""Ressources: [Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection](https://arxiv.org/abs/2303.05499) by Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Qing Jiang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, Lei Zhang (2023) [GitHub](https://github.com/IDEA-Research/GroundingDINO) [Hugging Face documentation](https://huggingface.co/docs/transformers/model_doc/grounding-dino)""", icon="๐Ÿ“š") st.markdown(""" """) st.markdown(""" """) st.markdown(""" """) col1, col2, col3 = st.columns(3) with col1: if st.button('Previous paper', use_container_width=True): switch_page("SegGPT") with col2: if st.button('Home', use_container_width=True): switch_page("Home") with col3: if st.button('Next paper', use_container_width=True): switch_page("DocOwl 1.5")