RyanMullins commited on
Commit
ad5bf1a
1 Parent(s): f5e3203

Adding requirements.txt

Browse files
Files changed (2) hide show
  1. app.py +3 -62
  2. requirements.txt +3 -0
app.py CHANGED
@@ -2,85 +2,26 @@ from collections.abc import Sequence
2
  import random
3
 
4
  import gradio as gr
5
- import immutabledict
6
- import spaces
7
- import torch
8
-
9
-
10
- #### Version 1: Baseline
11
- # Step 1: Select and load your model
12
- # Step 2: Load the test dataset (4-5 examples)
13
- # Step 3: Run generation with and wihtout watermarking, display the outputs
14
- # Step 4: User clicks the reveal button to see the watermarked vs not gens
15
-
16
- #### Version 2: Gamification
17
- # Stesp 1-3 the same
18
- # Step 4: User marks specific generations as watermarked
19
- # Step 5: User clicks the reveal button to see the watermarked vs not gens
20
 
21
  # If the watewrmark is not detected, consider the use case. Could be because of
22
  # the nature of the task (e.g., fatcual responses are lower entropy) or it could
23
  # be another
24
 
25
- GEMMA_2B = 'google/gemma-2b'
26
 
27
- PROMPTS: tuple[str] = (
28
  'prompt 1',
29
  'prompt 2',
30
  'prompt 3',
31
  'prompt 4',
32
  )
33
 
34
- WATERMARKING_CONFIG = immutabledict.immutabledict({
35
- "ngram_len": 5,
36
- "keys": [
37
- 654,
38
- 400,
39
- 836,
40
- 123,
41
- 340,
42
- 443,
43
- 597,
44
- 160,
45
- 57,
46
- 29,
47
- 590,
48
- 639,
49
- 13,
50
- 715,
51
- 468,
52
- 990,
53
- 966,
54
- 226,
55
- 324,
56
- 585,
57
- 118,
58
- 504,
59
- 421,
60
- 521,
61
- 129,
62
- 669,
63
- 732,
64
- 225,
65
- 90,
66
- 960,
67
- ],
68
- "sampling_table_size": 2**16,
69
- "sampling_table_seed": 0,
70
- "context_history_size": 1024,
71
- "device": (
72
- torch.device("cuda:0")
73
- if torch.cuda.is_available()
74
- else torch.device("cpu")
75
- ),
76
- })
77
-
78
  _CORRECT_ANSWERS: dict[str, bool] = {}
79
 
80
  with gr.Blocks() as demo:
81
  prompt_inputs = [
82
  gr.Textbox(value=prompt, lines=4, label='Prompt')
83
- for prompt in PROMPTS
84
  ]
85
  generate_btn = gr.Button('Generate')
86
 
 
2
  import random
3
 
4
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  # If the watewrmark is not detected, consider the use case. Could be because of
7
  # the nature of the task (e.g., fatcual responses are lower entropy) or it could
8
  # be another
9
 
10
+ _GEMMA_2B = 'google/gemma-2b'
11
 
12
+ _PROMPTS: tuple[str] = (
13
  'prompt 1',
14
  'prompt 2',
15
  'prompt 3',
16
  'prompt 4',
17
  )
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  _CORRECT_ANSWERS: dict[str, bool] = {}
20
 
21
  with gr.Blocks() as demo:
22
  prompt_inputs = [
23
  gr.Textbox(value=prompt, lines=4, label='Prompt')
24
+ for prompt in _PROMPTS
25
  ]
26
  generate_btn = gr.Button('Generate')
27
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ spaces
3
+ transformers @ git+https://github.com/sumedhghaisas2/transformers_private@synthid_text