xtlyxt commited on
Commit
fdaa538
1 Parent(s): 546add8

Update faceapp.py

Browse files
Files changed (1) hide show
  1. faceapp.py +45 -22
faceapp.py CHANGED
@@ -1,11 +1,12 @@
1
  import streamlit as st
2
  from PIL import Image
3
- from transformers import ViTForImageClassification, ViTImageProcessor
4
 
5
- # Load the model
6
- model_name = "trpakov/vit-face-expression"
7
- model = ViTForImageClassification.from_pretrained(model_name)
8
- image_processor = ViTImageProcessor.from_pretrained(model_name)
 
9
 
10
  # Streamlit app
11
  st.title("Emotion Recognition with vit-face-expression")
@@ -14,20 +15,42 @@ st.title("Emotion Recognition with vit-face-expression")
14
  x = st.slider('Select a value')
15
  st.write(f"{x} squared is {x * x}")
16
 
17
- # Upload image
18
- uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png"])
19
-
20
- if uploaded_image:
21
- image = Image.open(uploaded_image)
22
- inputs = image_processor(images=image, return_tensors="pt")
23
- pixel_values = inputs.pixel_values
24
-
25
- # Predict emotion
26
- with torch.no_grad():
27
- outputs = model(pixel_values)
28
- predicted_class = torch.argmax(outputs.logits, dim=1).item()
29
-
30
- emotion_labels = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
31
- predicted_emotion = emotion_labels[predicted_class]
32
-
33
- st.image(image, caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from PIL import Image
3
+ from transformers import pipeline
4
 
5
+ # Create an image classification pipeline with scores
6
+ pipe = pipeline("image-classification", model="trpakov/vit-face-expression", top_k=None)
7
+
8
+ # Define emotion labels
9
+ emotion_labels = ["Neutral", "Sad", "Angry", "Surprised", "Happy"]
10
 
11
  # Streamlit app
12
  st.title("Emotion Recognition with vit-face-expression")
 
15
  x = st.slider('Select a value')
16
  st.write(f"{x} squared is {x * x}")
17
 
18
+ # Upload images
19
+ uploaded_images = st.file_uploader("Upload images", type=["jpg", "png"], accept_multiple_files=True)
20
+
21
+ if st.button("Predict Emotions") and uploaded_images:
22
+ if len(uploaded_images) == 2:
23
+ # Open the uploaded images
24
+ images = [Image.open(img) for img in uploaded_images]
25
+
26
+ # Predict emotion for each image using the pipeline
27
+ results = [pipe(image) for image in images]
28
+
29
+ # Display images and predicted emotions side by side
30
+ col1, col2 = st.columns(2)
31
+ for i in range(2):
32
+ predicted_class = results[i][0]["label"]
33
+ predicted_emotion = predicted_class.split("_")[-1].capitalize()
34
+ col = col1 if i == 0 else col2
35
+ col.image(images[i], caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
36
+ col.write(f"Emotion Scores for {predicted_emotion}: {results[i][0]['score']:.4f}")
37
+
38
+ # Display scores for other categories
39
+ st.write(f"Emotion Scores for other categories (Image {i+1}):")
40
+ for label, score in zip(emotion_labels, results[i][0]["score"]):
41
+ if label.lower() != predicted_emotion.lower(): # Exclude the predicted emotion
42
+ st.write(f"{label}: {score:.4f}")
43
+ else:
44
+ # Open the uploaded images
45
+ images = [Image.open(img) for img in uploaded_images]
46
+
47
+ # Predict emotion for each image using the pipeline
48
+ results = [pipe(image) for image in images]
49
+
50
+ # Display images and predicted emotions
51
+ for i, result in enumerate(results):
52
+ predicted_class = result[0]["label"]
53
+ predicted_emotion = predicted_class.split("_")[-1].capitalize()
54
+ st.image(images[i], caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
55
+ st.write(f"Emotion Scores for Image {i+1}:")
56
+ st.write(f"{predicted_emotion}: {result[0]['score']:.4f}")