Not-Adam commited on
Commit
96ca2bb
1 Parent(s): a22c9a9
Files changed (1) hide show
  1. app.py +38 -49
app.py CHANGED
@@ -11,7 +11,7 @@ from io import BytesIO
11
  import face_recognition
12
  from turtle import title
13
  from openai import OpenAI
14
- from collections import Counter
15
  from transformers import pipeline
16
 
17
  import urllib.request
@@ -145,6 +145,13 @@ def get_colour(image_urls, category):
145
 
146
 
147
 
 
 
 
 
 
 
 
148
 
149
  @spaces.GPU
150
  def get_predicted_attributes(image_urls, category):
@@ -159,56 +166,38 @@ def get_predicted_attributes(image_urls, category):
159
  if len(values) == 0:
160
  continue
161
 
162
- # Adjust labels for the pipeline to be in format: "{attr}: {value}, clothing: {category}"
163
- attribute_formatted = attribute.replace("colartype", "collar").replace("sleevelength", "sleeve length").replace("fabricstyle", "fabric")
164
- values_formatted = [f"{attribute_formatted}: {value.strip()}, clothing: {category}" for value in values]
165
-
166
- # Get the predicted values for the attribute
167
- responses = pipe(image_urls, candidate_labels=values_formatted)
168
- result = [response[0]['label'].split(", clothing:")[0] for response in responses]
169
- common_result.append(Counter(result).most_common(1))
170
- print("Common Result:", common_result)
171
-
172
- # If attribute is details, then remove the obtained label from the values, and get the next most common
173
- if attribute == "details":
174
- print(values_formatted, result[0] + f", clothing: {category}")
175
- values_formatted.remove(result[0] + f", clothing: {category}")
176
- responses = pipe(image_urls, candidate_labels=values_formatted)
177
- i = 0
178
- print("Responses: ", responses)
179
- while len(responses) > 0 and responses[0][0]['score'] > 0.6 and i < 2:
180
- result = [response[0]['label'].split(", clothing:")[0] for response in responses]
181
- common_result.append(Counter(result).most_common(1))
182
-
183
- values_formatted.remove(result[0] + f", clothing: {category}")
184
- responses = pipe(image_urls, candidate_labels=values_formatted)
185
- print("Responses: ", responses)
186
- i += 1
187
-
188
-
189
- # Clean up the results into one long string
190
- for i, result in enumerate(common_result):
191
- common_result[i] = ", ".join([f"{x[0]}" for x in result])
192
-
193
- result = {}
194
-
195
- # Iterate through the list and split each item into key and value
196
- for item in common_result:
197
- # Split by ': ' to separate the key and value
198
- key, value = item.split(': ', 1)
199
-
200
  if key == "details":
201
- print(value)
202
- details_split = value.split(" , ")
203
- if len(details_split) == 2:
204
- result["details1"] = details_split[0].lower()
205
- result["details2"] = details_split[1].lower()
206
- else:
207
- result["details1"] = value.lower() # If there's only one detail, assign it to details 1
208
- else:
209
- result[key.lower().replace("collar", "colartype").replace("sleeve length", "sleevelength").replace("fabric", "fabricstyle")] = value.lower()
210
 
211
- return result
212
 
213
  def get_openAI_tags(image_urls):
214
  # Create list containing JSONs of each image URL
 
11
  import face_recognition
12
  from turtle import title
13
  from openai import OpenAI
14
+ from collections import Counter, defaultdict
15
  from transformers import pipeline
16
 
17
  import urllib.request
 
145
 
146
 
147
 
148
+ # Function for get_predicted_attributes
149
+ def get_most_common_label(responses):
150
+ feature_scores = defaultdict(float)
151
+ for response in responses:
152
+ label, score = response[0]['label'].split(", clothing:")[0], response[0]['score']
153
+ feature_scores[label] += score
154
+ return max(feature_scores, key=feature_scores.get), feature_scores[max(feature_scores, key=feature_scores.get)]
155
 
156
  @spaces.GPU
157
  def get_predicted_attributes(image_urls, category):
 
166
  if len(values) == 0:
167
  continue
168
 
169
+ # Adjust labels for the pipeline
170
+ attribute = attribute.replace("colartype", "collar").replace("sleevelength", "sleeve length").replace("fabricstyle", "fabric")
171
+ values = [f"{attribute}: {value.strip()}, clothing: {category}" for value in values]
172
+
173
+ # Get the predicted values for the attribute
174
+ responses = pipe(product['Images'].values[0], candidate_labels=values, device=device)
175
+ most_common, score = get_most_common_label(responses)
176
+ common_result.append(most_common)
177
+
178
+ if attribute == "details":
179
+ # Process additional details labels if the score is higher than 0.8
180
+ for _ in range(2):
181
+ values = [value for value in values if value != f"{most_common}, clothing: {category}"]
182
+ responses = pipe(product['Images'].values[0], candidate_labels=values, device=device)
183
+ most_common, score = get_most_common_label(responses)
184
+ if score > 0.8:
185
+ common_result.append(most_common)
186
+
187
+ # Convert common_result into a dictionary
188
+ final = {}
189
+ details_count = 0
190
+
191
+ for result in common_result:
192
+ result = result.replace("collar", "colartype").replace("sleeve length", "sleevelength").replace("fabric", "fabricstyle")
193
+ key, value = result.split(": ")
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  if key == "details":
195
+ if details_count > 0:
196
+ key += str(details_count)
197
+ details_count += 1
198
+ final[key] = value.lower()
 
 
 
 
 
199
 
200
+ return final
201
 
202
  def get_openAI_tags(image_urls):
203
  # Create list containing JSONs of each image URL