sun-tana commited on
Commit
dc893c6
1 Parent(s): bd7364d

revise logic

Browse files
Files changed (1) hide show
  1. app.py +42 -28
app.py CHANGED
@@ -22,17 +22,21 @@ max_seq_length = 32
22
  env_decode ={}
23
  with open('tf_labels6.json', encoding='utf-8') as fh:
24
  env_decode = json.load(fh)
 
25
 
26
  hour_decode={}
27
  with open('tf_labels7.json', encoding='utf-8') as fh:
28
  hour_decode = json.load(fh)
 
29
 
30
  minute_decode={}
31
  with open('tf_labels8.json', encoding='utf-8') as fh:
32
  minute_decode = json.load(fh)
 
33
 
34
  def create_model():
35
 
 
36
  inputs = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32)
37
  embedding_layer = transformer_model(inputs)[0]
38
 
@@ -81,9 +85,10 @@ def create_model():
81
  output_layer7 = Dense(25, activation='softmax', name='output7')(x7)
82
  output_layer8 = Dense(61, activation='softmax', name='output8')(x8)
83
 
 
84
  for i,layer in enumerate(transformer_model.roberta.encoder.layer[:-1]):
85
  transformer_model.roberta.encoder.layer[i].trainable = False
86
- # define the model #input_layer inputs
87
  model = Model(inputs=inputs , outputs=[output_layer1, output_layer2, output_layer3,output_layer4,output_layer5,output_layer6,output_layer7,output_layer8])
88
 
89
  opt = keras.optimizers.Adam(learning_rate=3e-5)
@@ -93,12 +98,20 @@ def create_model():
93
  'categorical_accuracy'
94
  ])
95
 
 
96
  model.load_weights("t1_m1.h5")
97
  return model
98
 
99
 
100
  model =create_model()
101
 
 
 
 
 
 
 
 
102
 
103
 
104
  def predict(text):
@@ -106,22 +119,22 @@ def predict(text):
106
  spilt_thai_text = [word_tokenize(x) for x in test_texts]
107
  new_input_ids = tokenizer(spilt_thai_text, padding=True, truncation=True, return_tensors="tf",is_split_into_words=True)["input_ids"]
108
  test_padded_sequences = pad_sequences(new_input_ids, maxlen=max_seq_length,padding='post',truncating='post',value=1) #post pre
109
- print(test_padded_sequences.shape)
110
  predicted_labels = model.predict(test_padded_sequences)
111
- output = []
 
 
 
 
 
 
 
 
112
 
113
  for i in range(len(test_texts)):
114
- print(test_texts[i])
115
  valid = 1 if predicted_labels[0][i] > 0.5 else 0
116
  is_scene = 1 if predicted_labels[1][i] > 0.5 else 0
117
  has_num = 1 if predicted_labels[2][i] > 0.5 else 0
118
- print(f'is_valid : {valid}')
119
- print(f'is_scene : {is_scene}')
120
- print(f'has_num : {has_num}')
121
-
122
  turn = 1 if predicted_labels[3][i] > 0.5 else 0
123
- print(f'turn_on_off : {turn}')
124
- print(f'device : ไฟ')
125
 
126
  env_id = np.argmax(predicted_labels[5][i])
127
  env_label = env_decode[env_id]
@@ -131,28 +144,29 @@ def predict(text):
131
 
132
  minute_id = np.argmax(predicted_labels[7][i])
133
  minute_label = minute_decode[minute_id]
134
- print(f'env : {env_label}')
135
- print(f'hour : {hour_label}')
136
- print(f'minute : {minute_label}')
137
- print('----')
138
- tmp = {
139
- 'valid' : valid,
140
- 'is_scene' : is_scene,
141
- 'has_num' : has_num,
142
- 'turn_on_off' : turn,
143
- 'device' : 'ไฟ',
144
- 'env' : env_label,
145
- 'hour' : hour,
146
- 'minute' : minute,
147
- }
148
- output.append(tmp)
149
- return output
150
 
151
  iface = gr.Interface(
152
  fn=predict,
153
  inputs='text',
154
- outputs='label',
155
- examples=[["Hello! My name is Omar"]]
 
156
  )
157
 
158
  iface.launch()
 
22
  env_decode ={}
23
  with open('tf_labels6.json', encoding='utf-8') as fh:
24
  env_decode = json.load(fh)
25
+ env_decode = {int(x):y for x,y in env_decode.items()}
26
 
27
  hour_decode={}
28
  with open('tf_labels7.json', encoding='utf-8') as fh:
29
  hour_decode = json.load(fh)
30
+ hour_decode = {int(x):y for x,y in hour_decode.items()}
31
 
32
  minute_decode={}
33
  with open('tf_labels8.json', encoding='utf-8') as fh:
34
  minute_decode = json.load(fh)
35
+ minute_decode = {int(x):y for x,y in minute_decode.items()}
36
 
37
  def create_model():
38
 
39
+ # defined architecture for load_model
40
  inputs = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32)
41
  embedding_layer = transformer_model(inputs)[0]
42
 
 
85
  output_layer7 = Dense(25, activation='softmax', name='output7')(x7)
86
  output_layer8 = Dense(61, activation='softmax', name='output8')(x8)
87
 
88
+ # train only last layer of transformer
89
  for i,layer in enumerate(transformer_model.roberta.encoder.layer[:-1]):
90
  transformer_model.roberta.encoder.layer[i].trainable = False
91
+ # define the model inputs outputs
92
  model = Model(inputs=inputs , outputs=[output_layer1, output_layer2, output_layer3,output_layer4,output_layer5,output_layer6,output_layer7,output_layer8])
93
 
94
  opt = keras.optimizers.Adam(learning_rate=3e-5)
 
98
  'categorical_accuracy'
99
  ])
100
 
101
+ #load weight
102
  model.load_weights("t1_m1.h5")
103
  return model
104
 
105
 
106
  model =create_model()
107
 
108
+ room_dict = {
109
+ 'ห้องนั่งเล่น': 'Living Room','ห้องครัว':'Kitchen','ห้องนอน':'Bedroom','ห้องน้ำ':'Bathroom','ห้องรับประทานอาหาร': 'Dining Room','ห้องสมุด': 'Library','ห้องพักผู้มาเยือน': 'Guest Room','ห้องเล่นเกม':'Game Room','ห้องซักผ้า':'Laundry Room','ระเบียง':'balcony','ไม่มีห้อง':'no room'
110
+ }
111
+
112
+ scene_dict = {
113
+ 'ซีน เอ':'scene A','ซีน บี':'scene B','ซีน ซี':'scene C','ซีน ดี':'scene D','ซีน อี':'scene E','ซีน เอฟ':'scene F','ซีน จี':'scene G','ซีน เอช':'scene H','ไม่มีซีน':'no scene'
114
+ }
115
 
116
 
117
  def predict(text):
 
119
  spilt_thai_text = [word_tokenize(x) for x in test_texts]
120
  new_input_ids = tokenizer(spilt_thai_text, padding=True, truncation=True, return_tensors="tf",is_split_into_words=True)["input_ids"]
121
  test_padded_sequences = pad_sequences(new_input_ids, maxlen=max_seq_length,padding='post',truncating='post',value=1) #post pre
 
122
  predicted_labels = model.predict(test_padded_sequences)
123
+
124
+ # default answer
125
+ tmp = {
126
+ 'command' : "not recognized",
127
+ 'room' : None,
128
+ 'device' : None,
129
+ "hour" : None,
130
+ "minute": None
131
+ }
132
 
133
  for i in range(len(test_texts)):
 
134
  valid = 1 if predicted_labels[0][i] > 0.5 else 0
135
  is_scene = 1 if predicted_labels[1][i] > 0.5 else 0
136
  has_num = 1 if predicted_labels[2][i] > 0.5 else 0
 
 
 
 
137
  turn = 1 if predicted_labels[3][i] > 0.5 else 0
 
 
138
 
139
  env_id = np.argmax(predicted_labels[5][i])
140
  env_label = env_decode[env_id]
 
144
 
145
  minute_id = np.argmax(predicted_labels[7][i])
146
  minute_label = minute_decode[minute_id]
147
+
148
+
149
+ if valid:
150
+ tmp['device'] = 'ไฟ'
151
+ tmp['command'] = 'turn on' if turn else 'turn off'
152
+ if not is_scene:
153
+ tmp['room'] = room_dict[env_label] if env_label in room_dict else room_dict['ไม่มีห้อง']
154
+ else:
155
+ tmp['room'] = scene_dict[env_label] if env_label in scene_dict else room_dict['ไม่มีซีน']
156
+
157
+ if has_num:
158
+ tmp['hour'] = hour_label
159
+ tmp['minute'] = minute_label
160
+
161
+
162
+ return tmp
163
 
164
  iface = gr.Interface(
165
  fn=predict,
166
  inputs='text',
167
+ outputs='json',
168
+ examples=[["เปิดไฟห้องนอนหน่อย"],["เปิดไฟซีนเอ"],["ปิดไฟห้องรับประทานอาหารเวลา4ทุ่มสามสิบเจ็ดนาที"],['ปิดไฟห้องน้ำเวลาบ่าย���มงห้าสิบนาที'],["โย่ และนี่คือเสียงจากเด็กวัด"]],
169
+ interpretation="default",
170
  )
171
 
172
  iface.launch()