karwanjiru commited on
Commit
091d316
1 Parent(s): 779da7d

content moderation fixed

Browse files
Files changed (2) hide show
  1. app.py +19 -41
  2. requirements.txt +3 -1
app.py CHANGED
@@ -1,15 +1,16 @@
1
  import os
2
- from PIL import Image
 
3
  import torch
4
- from torchvision import transforms
5
  from transformers import AutoProcessor, FocalNetForImageClassification
6
- import gradio as gr
7
- import numpy as np
8
- import random
9
  from diffusers import DiffusionPipeline
 
 
10
  from huggingface_hub import InferenceClient
11
  import requests
12
- from io import BytesIO
 
13
 
14
  # Paths and model setup
15
  model_path = "MichalMlodawski/nsfw-image-detection-large"
@@ -38,27 +39,28 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
38
 
39
  # Load the diffusion pipeline
40
  if torch.cuda.is_available():
41
- torch.cuda.max_memory_allocated(device=device)
42
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
43
  pipe.enable_xformers_memory_efficient_attention()
44
  pipe = pipe.to(device)
45
- else:
46
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
47
  pipe = pipe.to(device)
48
 
49
  MAX_SEED = np.iinfo(np.int32).max
50
- MAX_IMAGE_SIZE = 1024
51
 
52
  # Initialize the InferenceClient
53
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
54
 
 
 
 
 
 
55
  # Inference function for generating images
56
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
57
  if randomize_seed:
58
  seed = random.randint(0, MAX_SEED)
59
-
60
  generator = torch.Generator().manual_seed(seed)
61
-
62
  image = pipe(
63
  prompt=prompt,
64
  negative_prompt=negative_prompt,
@@ -68,28 +70,23 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
68
  height=height,
69
  generator=generator
70
  ).images[0]
71
-
72
  return image
73
 
74
  # Respond function for the chatbot
75
  def respond(message, history, system_message, max_tokens, temperature, top_p):
76
  messages = [{"role": "system", "content": system_message}]
77
-
78
  for val in history:
79
  if val[0]:
80
  messages.append({"role": "user", "content": val[0]})
81
  if val[1]:
82
  messages.append({"role": "assistant", "content": val[1]})
83
-
84
  messages.append({"role": "user", "content": message})
85
-
86
  response = client.chat_completion(
87
  messages,
88
  max_tokens=max_tokens,
89
  temperature=temperature,
90
  top_p=top_p,
91
  )
92
-
93
  return response.choices[0].message['content']
94
 
95
  # Function to generate posts
@@ -104,9 +101,10 @@ def generate_post(prompt, max_tokens, temperature, top_p):
104
 
105
  # Function to moderate posts
106
  def moderate_post(post):
107
- # Implement your post moderation logic here
108
- if "inappropriate" in post:
109
- return "Post does not adhere to community guidelines."
 
110
  return "Post adheres to community guidelines."
111
 
112
  # Function to generate images using the diffusion pipeline
@@ -117,26 +115,6 @@ def generate_image(prompt):
117
 
118
  # Function to moderate images
119
  def moderate_image(image):
120
- # Convert the PIL image to a format that can be sent for moderation
121
- buffered = BytesIO()
122
- image.save(buffered, format="JPEG")
123
- image_bytes = buffered.getvalue()
124
-
125
- # Replace with your actual image moderation API endpoint
126
- moderation_api_url = "https://example.com/moderation/api"
127
-
128
- # Send the image to the moderation API
129
- response = requests.post(moderation_api_url, files={"file": image_bytes})
130
- result = response.json()
131
-
132
- # Check the result from the moderation API
133
- if result.get("moderation_status") == "approved":
134
- return "Image adheres to community guidelines."
135
- else:
136
- return "Image does not adhere to community guidelines."
137
-
138
- # Function to classify NSFW images
139
- def classify_nsfw(image):
140
  image_tensor = transform(image).unsqueeze(0)
141
  inputs = feature_extractor(images=image, return_tensors="pt")
142
  with torch.no_grad():
@@ -214,6 +192,6 @@ with gr.Blocks(css=css) as demo:
214
  selected_image = gr.Image(type="pil", label="Upload Image for Moderation")
215
  classify_button = gr.Button("Classify Image")
216
  classification_result = gr.Textbox(label="Classification Result")
217
- classify_button.click(classify_nsfw, selected_image, classification_result)
218
 
219
  demo.launch()
 
1
  import os
2
+ from io import BytesIO
3
+ import random
4
  import torch
5
+ from PIL import Image
6
  from transformers import AutoProcessor, FocalNetForImageClassification
 
 
 
7
  from diffusers import DiffusionPipeline
8
+ from detoxify import Detoxify
9
+ import gradio as gr
10
  from huggingface_hub import InferenceClient
11
  import requests
12
+ from torchvision import transforms
13
+ import numpy as np
14
 
15
  # Paths and model setup
16
  model_path = "MichalMlodawski/nsfw-image-detection-large"
 
39
 
40
  # Load the diffusion pipeline
41
  if torch.cuda.is_available():
42
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, use_safetensors=True)
 
43
  pipe.enable_xformers_memory_efficient_attention()
44
  pipe = pipe.to(device)
45
+ else:
46
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
47
  pipe = pipe.to(device)
48
 
49
  MAX_SEED = np.iinfo(np.int32).max
 
50
 
51
  # Initialize the InferenceClient
52
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
53
 
54
+ # Function to analyze text
55
+ def analyze_text(input_text):
56
+ results = Detoxify('original').predict(input_text)
57
+ return results
58
+
59
  # Inference function for generating images
60
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
61
  if randomize_seed:
62
  seed = random.randint(0, MAX_SEED)
 
63
  generator = torch.Generator().manual_seed(seed)
 
64
  image = pipe(
65
  prompt=prompt,
66
  negative_prompt=negative_prompt,
 
70
  height=height,
71
  generator=generator
72
  ).images[0]
 
73
  return image
74
 
75
  # Respond function for the chatbot
76
  def respond(message, history, system_message, max_tokens, temperature, top_p):
77
  messages = [{"role": "system", "content": system_message}]
 
78
  for val in history:
79
  if val[0]:
80
  messages.append({"role": "user", "content": val[0]})
81
  if val[1]:
82
  messages.append({"role": "assistant", "content": val[1]})
 
83
  messages.append({"role": "user", "content": message})
 
84
  response = client.chat_completion(
85
  messages,
86
  max_tokens=max_tokens,
87
  temperature=temperature,
88
  top_p=top_p,
89
  )
 
90
  return response.choices[0].message['content']
91
 
92
  # Function to generate posts
 
101
 
102
  # Function to moderate posts
103
  def moderate_post(post):
104
+ results = Detoxify('original').predict(post)
105
+ for key, value in results.items():
106
+ if value > 0.5:
107
+ return "Post does not adhere to community guidelines."
108
  return "Post adheres to community guidelines."
109
 
110
  # Function to generate images using the diffusion pipeline
 
115
 
116
  # Function to moderate images
117
  def moderate_image(image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  image_tensor = transform(image).unsqueeze(0)
119
  inputs = feature_extractor(images=image, return_tensors="pt")
120
  with torch.no_grad():
 
192
  selected_image = gr.Image(type="pil", label="Upload Image for Moderation")
193
  classify_button = gr.Button("Classify Image")
194
  classification_result = gr.Textbox(label="Classification Result")
195
+ classify_button.click(moderate_image, selected_image, classification_result)
196
 
197
  demo.launch()
requirements.txt CHANGED
@@ -6,4 +6,6 @@ torch
6
  transformers
7
  xformers
8
  torchvision
9
- Pillow
 
 
 
6
  transformers
7
  xformers
8
  torchvision
9
+ Pillow
10
+ detoxify
11
+ altair==4.1.0