4rtemi5 commited on
Commit
5701708
2 Parent(s): 7377c04 12ddc30

Merge branch 'main' of https://huggingface.co/spaces/clip-italian/clip-italian-demo

Browse files
Files changed (6) hide show
  1. app.py +2 -1
  2. examples.py +12 -9
  3. home.py +2 -0
  4. image2text.py +9 -12
  5. introduction.md +2 -3
  6. text2image.py +9 -12
app.py CHANGED
@@ -15,7 +15,8 @@ PAGES = {
15
  st.sidebar.title("Explore our CLIP-Italian demo")
16
 
17
  logo = Image.open("static/img/clip_italian_logo.png")
18
- st.sidebar.image(logo, caption="CLIP-Italian logo")
 
19
 
20
  page = st.sidebar.radio("", list(PAGES.keys()))
21
  PAGES[page].app()
 
15
  st.sidebar.title("Explore our CLIP-Italian demo")
16
 
17
  logo = Image.open("static/img/clip_italian_logo.png")
18
+ st.sidebar.image(logo)
19
+ #, caption="CLIP-Italian logo"
20
 
21
  page = st.sidebar.radio("", list(PAGES.keys()))
22
  PAGES[page].app()
examples.py CHANGED
@@ -3,15 +3,17 @@ import streamlit as st
3
 
4
 
5
  def app():
6
- st.title("Examples & Applications")
 
 
 
7
  st.write(
8
  """
9
 
10
- ## Image Retrieval
11
 
12
- Even though we trained the Italian CLIP model on way less examples than the original
13
- OpenAI's CLIP, our training choices and quality datasets led to impressive results!
14
- Here, we collected few of **the most impressive text-image associations** learned by our model.
15
 
16
  Remember you can head to the **Text to Image** section of the demo at any time to test your own🤌 Italian queries!
17
 
@@ -19,7 +21,7 @@ def app():
19
  )
20
 
21
  st.markdown("### 1. Actors in Scenes")
22
- st.markdown("These examples comes from the CC dataset")
23
 
24
  st.subheader("una coppia")
25
  st.markdown("*a couple*")
@@ -39,7 +41,7 @@ def app():
39
  st.image("static/img/examples/couple_3.jpeg")
40
 
41
  st.markdown("### 2. Dresses")
42
- st.markdown("These examples comes from the Unsplash dataset")
43
 
44
  col1, col2 = st.beta_columns(2)
45
  col1.subheader("un vestito primavrile")
@@ -50,10 +52,11 @@ def app():
50
  col2.markdown("*a dress for the autumn*")
51
  col2.image("static/img/examples/vestito_autunnale.png")
52
 
53
- st.markdown("## Image Classification")
 
54
  st.markdown("We report this cool example provided by the "
55
  "[DALLE-mini team](https://github.com/borisdayma/dalle-mini). "
56
  "Is the DALLE-mini logo an *avocado* or an armchair (*poltrona*)?")
57
 
58
  st.image("static/img/examples/dalle_mini.png")
59
- st.markdown("It seems it's half an armchair and half an avocado! We thank the team for the great idea :)")
 
3
 
4
 
5
  def app():
6
+ #st.title("Examples & Applications")
7
+ st.markdown("<h1 style='text-align: center; color: #CD212A;'> Examples & Applications </h1>", unsafe_allow_html=True)
8
+ st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Complex Queries -Image Retrieval </h2>", unsafe_allow_html=True)
9
+
10
  st.write(
11
  """
12
 
 
13
 
14
+ Even though we trained the Italian CLIP model on way less examples(~1.4M) than the original
15
+ OpenAI's CLIP (~400M), our training choices and quality datasets led to impressive results!
16
+ Here, we present some of **the most impressive text-image associations** learned by our model.
17
 
18
  Remember you can head to the **Text to Image** section of the demo at any time to test your own🤌 Italian queries!
19
 
 
21
  )
22
 
23
  st.markdown("### 1. Actors in Scenes")
24
+ st.markdown("These examples were taken from the CC dataset")
25
 
26
  st.subheader("una coppia")
27
  st.markdown("*a couple*")
 
41
  st.image("static/img/examples/couple_3.jpeg")
42
 
43
  st.markdown("### 2. Dresses")
44
+ st.markdown("These examples were taken from the Unsplash dataset")
45
 
46
  col1, col2 = st.beta_columns(2)
47
  col1.subheader("un vestito primavrile")
 
52
  col2.markdown("*a dress for the autumn*")
53
  col2.image("static/img/examples/vestito_autunnale.png")
54
 
55
+ #st.markdown("## Image Classification")
56
+ st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Zero Shot Image Classification </h2>", unsafe_allow_html=True)
57
  st.markdown("We report this cool example provided by the "
58
  "[DALLE-mini team](https://github.com/borisdayma/dalle-mini). "
59
  "Is the DALLE-mini logo an *avocado* or an armchair (*poltrona*)?")
60
 
61
  st.image("static/img/examples/dalle_mini.png")
62
+ st.markdown("It seems it's half an armchair and half an avocado! We thank the DALLE-mini team for the great idea :)")
home.py CHANGED
@@ -7,5 +7,7 @@ def read_markdown_file(markdown_file):
7
 
8
 
9
  def app():
 
 
10
  intro_markdown = read_markdown_file("introduction.md")
11
  st.markdown(intro_markdown, unsafe_allow_html=True)
 
7
 
8
 
9
  def app():
10
+ st.markdown("<h1 style='text-align: center; color: #CD212A;'> CLIP-Italian </h1>", unsafe_allow_html=True)
11
+
12
  intro_markdown = read_markdown_file("introduction.md")
13
  st.markdown(intro_markdown, unsafe_allow_html=True)
image2text.py CHANGED
@@ -10,25 +10,22 @@ import gc
10
 
11
 
12
  def app():
13
- st.title("From Image to Text")
 
 
14
  st.markdown(
15
  """
16
 
17
- ### 👋 Ciao!
18
-
19
- Here you can find the captions or the labels that are most related to a given image. It is a zero-shot
20
- image classification task!
21
-
22
- 🤌 Italian mode on! 🤌
23
 
24
- For example, try to write "gatto" (cat) in the space for label1 and "cane" (dog) in the space for label2 and the run
25
  "classify"!
26
 
27
  """
28
  )
29
 
30
  image_url = st.text_input(
31
- "You can input the URL of an image",
32
  value="https://www.petdetective.it/wp-content/uploads/2016/04/gatto-toilette.jpg",
33
  )
34
 
@@ -38,14 +35,14 @@ def app():
38
 
39
  with col2:
40
  captions_count = st.selectbox(
41
- "Number of labels", options=range(1, MAX_CAP + 1), index=1
42
  )
43
- compute = st.button("Classify")
44
 
45
  with col1:
46
  captions = list()
47
  for idx in range(min(MAX_CAP, captions_count)):
48
- captions.append(st.text_input(f"Insert label {idx+1}"))
49
 
50
  if compute:
51
  captions = [c for c in captions if c != ""]
 
10
 
11
 
12
  def app():
13
+ #st.title("From Image to Text")
14
+ st.markdown("<h1 style='text-align: center; color: #CD212A;'> Zero Shot Image Classification </h1>", unsafe_allow_html=True)
15
+ st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Image to Text </h2>", unsafe_allow_html=True)
16
  st.markdown(
17
  """
18
 
19
+ 👋 Ciao! Here you can find the captions or the labels that are most related to a given image.
 
 
 
 
 
20
 
21
+ Try typing "gatto" (cat) in the space for label1 and "cane" (dog) in the space for label2 and click
22
  "classify"!
23
 
24
  """
25
  )
26
 
27
  image_url = st.text_input(
28
+ "YOU CAN INPUT THE URL OF AN IMAGE : ",
29
  value="https://www.petdetective.it/wp-content/uploads/2016/04/gatto-toilette.jpg",
30
  )
31
 
 
35
 
36
  with col2:
37
  captions_count = st.selectbox(
38
+ "NUMBER OF LABELS", options=range(1, MAX_CAP + 1), index=1
39
  )
40
+ compute = st.button("CLASSIFY")
41
 
42
  with col1:
43
  captions = list()
44
  for idx in range(min(MAX_CAP, captions_count)):
45
+ captions.append(st.text_input(f"INSERT LABEL {idx+1}"))
46
 
47
  if compute:
48
  captions = [c for c in captions if c != ""]
introduction.md CHANGED
@@ -1,11 +1,10 @@
1
- # CLIP-Italian
2
 
3
- CLIP-Italian is a multimodal model trained on ~1.4 million Italian text-image pairs using Italian Bert model as text encoder and Vision Transformer(ViT) as image encoder using the JAX/Flax neural network library. The training was carried out during the Hugging Face Community event on Google's TPU machines, sponsored by Google Cloud.
4
 
5
  Clip-Italian (Contrastive Language-Image Pre-training in Italian language) is based on OpenAI’s CLIP ([Radford et al., 2021](https://arxiv.org/abs/2103.00020))which is an amazing model that can learn to represent images and text jointly in the same space.
6
 
7
  In this project, we aim to propose the first CLIP model trained on Italian data, that in this context can be considered a
8
- low resource language. Using a few techniques, we have been able to fine-tune a SOTA Italian CLIP model with **only 1.4 million** training samples. Our Italian CLIP model
9
  is built upon the pre-trained [Italian BERT](https://huggingface.co/dbmdz/bert-base-italian-xxl-cased) model provided by [dbmdz](https://huggingface.co/dbmdz) and the OpenAI
10
  [vision transformer](https://huggingface.co/openai/clip-vit-base-patch32).
11
 
 
 
1
 
2
+ CLIP-Italian is a **multimodal** model trained on **~1.4 Million** Italian text-image pairs using **Italian Bert** model as text encoder and Vision Transformer **ViT** as image encoder using the **JAX/Flax** neural network library. The training was carried out during the **Hugging Face** Community event on **Google's TPU** machines, sponsored by **Google Cloud**.
3
 
4
  Clip-Italian (Contrastive Language-Image Pre-training in Italian language) is based on OpenAI’s CLIP ([Radford et al., 2021](https://arxiv.org/abs/2103.00020))which is an amazing model that can learn to represent images and text jointly in the same space.
5
 
6
  In this project, we aim to propose the first CLIP model trained on Italian data, that in this context can be considered a
7
+ low resource language. Using a few techniques, we have been able to fine-tune a SOTA Italian CLIP model with **only 1.4M** training samples. Our Italian CLIP model
8
  is built upon the pre-trained [Italian BERT](https://huggingface.co/dbmdz/bert-base-italian-xxl-cased) model provided by [dbmdz](https://huggingface.co/dbmdz) and the OpenAI
9
  [vision transformer](https://huggingface.co/openai/clip-vit-base-patch32).
10
 
text2image.py CHANGED
@@ -108,23 +108,20 @@ headers = {
108
  def app():
109
 
110
  #st.title("From Text to Image")
111
- st.markdown("<h1 style='text-align: center; color: #CD212A;'>Image Retrieval</h1>", unsafe_allow_html=True)
112
- st.markdown("<h2 style='text-align: center; color: #008C45;font-weight:bold;'>Text to Image</h2>", unsafe_allow_html=True)
113
  st.markdown(
114
  """
115
 
116
- ### 👋 Ciao!
117
-
118
- Here you can search for ~150.000 images in the Conceptual Captions dataset (CC) or in the Unsplash 25k Photos dataset.
119
- Even though we did not train on any of these images you will see most queries make sense. When you see errors, there might be two possibilities:
120
- the model is answering in a wrong way or the image you are looking for are not in the dataset and the model is giving you the best answer it can get.
121
-
122
 
 
123
 
124
- 🤌 Italian mode on! 🤌
125
-
126
 
127
- You can choose one of our examples down below...
128
  """
129
  )
130
 
@@ -160,7 +157,7 @@ st.markdown("<h2 style='text-align: center; color: #008C45;font-weight:bold;'>Te
160
 
161
  col1, col2 = st.beta_columns([3, 1])
162
  with col1:
163
- query = st.text_input("... or insert an Italian query text")
164
  with col2:
165
  dataset_name = st.selectbox("IR dataset", ["CC", "Unsplash"])
166
 
 
108
  def app():
109
 
110
  #st.title("From Text to Image")
111
+ st.markdown("<h1 style='text-align: center; color: #CD212A;'> Image Retrieval </h1>", unsafe_allow_html=True)
112
+ st.markdown("<h2 style='text-align: center; color: #008C45; font-weight:bold;'> Text to Image </h2>", unsafe_allow_html=True)
113
  st.markdown(
114
  """
115
 
116
+
117
+ 👋 Ciao! Here you can type Italian query and search from ~150k images in the Conceptual Captions (CC) dataset or 25k Photos in the Unsplash dataset.
 
 
 
 
118
 
119
+ Though these images were not used for training the model, you will see most queries make sense.
120
 
121
+ Rare errors might be due to 2 possibilities:
122
+ a)The model is answering in a wrong way or b) the image you are looking for are not in the dataset & the model is giving you the best answer it can get.
123
 
124
+ You can choose from one of the following examples :
125
  """
126
  )
127
 
 
157
 
158
  col1, col2 = st.beta_columns([3, 1])
159
  with col1:
160
+ query = st.text_input("OR INSERT AN ITALIAN QUERY TEXT : ")
161
  with col2:
162
  dataset_name = st.selectbox("IR dataset", ["CC", "Unsplash"])
163