File size: 7,099 Bytes
1e2c7f6
 
74ed80e
 
 
 
 
 
 
 
 
 
 
 
bd7364d
74ed80e
 
e409934
 
74ed80e
 
b6d0c18
 
 
dc893c6
b6d0c18
 
 
 
dc893c6
b6d0c18
 
 
 
dc893c6
b6d0c18
74ed80e
 
dc893c6
0104595
74ed80e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f81c7d5
 
 
74ed80e
dc893c6
b6d0c18
 
dc893c6
74ed80e
b6d0c18
 
 
 
 
 
 
 
dc893c6
74ed80e
 
 
 
 
 
dc893c6
 
 
 
 
 
 
1e2c7f6
8df96f2
 
74ed80e
 
 
 
 
dc893c6
 
 
 
 
 
 
 
 
74ed80e
 
59bdd3b
 
 
 
 
 
 
 
 
 
 
 
 
dc893c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8df96f2
 
 
 
dc893c6
 
 
8df96f2
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
import gradio as gr

from pythainlp import  word_tokenize
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, Conv1D, MaxPooling1D, Dense, Flatten, Concatenate, Dropout, Dot, Activation, Reshape, Permute, Multiply
from keras import backend as K
import pandas as pd
from transformers import TFAutoModel, AutoTokenizer
from sklearn.model_selection import train_test_split
import json

# load the tokenizer and transformer model
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base",max_length=60) #xlm-roberta-base bert-base-multilingual-cased
transformer_model = TFAutoModel.from_pretrained("xlm-roberta-base") #philschmid/tiny-bert-sst2-distilled
max_seq_length = 32

env_decode ={}
with open('tf_labels6.json', encoding='utf-8') as fh:
    env_decode = json.load(fh)
    env_decode = {int(x):y for x,y in env_decode.items()}

hour_decode={}
with open('tf_labels7.json', encoding='utf-8') as fh:
    hour_decode = json.load(fh)
    hour_decode = {int(x):y for x,y in hour_decode.items()}

minute_decode={}
with open('tf_labels8.json', encoding='utf-8') as fh:
    minute_decode = json.load(fh)
    minute_decode = {int(x):y for x,y in minute_decode.items()}

def create_model():

  # defined architecture for load_model
  inputs = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32)
  embedding_layer = transformer_model(inputs)[0]

  flatten_layer = Flatten()(embedding_layer)
  x1 = Dense(64, activation='relu')(flatten_layer)
  x1 = Dense(32, activation='relu')(x1)
  x1 = Dense(16, activation='relu')(x1)


  x2 = Dense(64, activation='relu')(flatten_layer)
  x2 = Dense(32, activation='relu')(x2)
  x2 = Dense(16, activation='relu')(x2)

  x3 = Dense(64, activation='relu')(flatten_layer)
  x3 = Dense(32, activation='relu')(x3)
  x3 = Dense(16, activation='relu')(x3)

  x4 = Dense(64, activation='relu')(flatten_layer)
  x4 = Dense(32, activation='relu')(x4)
  x4 = Dense(16, activation='relu')(x4)


  x5 = Dense(64, activation='relu')(flatten_layer)
  x5 = Dense(32, activation='relu')(x5)
  x5 = Dense(16, activation='relu')(x5)

  x6 = Dense(512, activation='relu')(flatten_layer)
  x6 = Dense(256, activation='relu')(x6)
  x6 = Dense(128, activation='relu')(x6)

  x7 = Dense(128, activation='relu')(flatten_layer)
  x7 = Dense(64, activation='relu')(x7)
  x7 = Dense(32, activation='relu')(x7)

  x8 = Dense(256, activation='relu')(flatten_layer)
  x8 = Dense(128, activation='relu')(x8)
  x8 = Dense(64, activation='relu')(x8)


  output_layer1 = Dense(1, activation='sigmoid', name='output1')(x1)
  output_layer2 = Dense(1, activation='sigmoid', name='output2')(x2) 
  output_layer3 = Dense(1, activation='sigmoid', name='output3')(x3) 
  output_layer4 = Dense(1, activation='sigmoid', name='output4')(x4) 
  output_layer5 = Dense(1, activation='sigmoid', name='output5')(x5) 
  output_layer6 = Dense(119, activation='softmax', name='output6')(x6) 
  output_layer7 = Dense(25, activation='softmax', name='output7')(x7) 
  output_layer8 = Dense(61, activation='softmax', name='output8')(x8) 

  # train only last layer of transformer
  for i,layer in enumerate(transformer_model.roberta.encoder.layer[:-1]):
    transformer_model.roberta.encoder.layer[i].trainable = False
  # define the model inputs outputs
  model = Model(inputs=inputs , outputs=[output_layer1, output_layer2, output_layer3,output_layer4,output_layer5,output_layer6,output_layer7,output_layer8])

  opt = keras.optimizers.Adam(learning_rate=3e-5)
  model.compile(loss=['binary_crossentropy','binary_crossentropy','binary_crossentropy','binary_crossentropy','binary_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy'], optimizer=opt,
                metrics=[
        tf.keras.metrics.BinaryAccuracy(),
          'categorical_accuracy'
      ])

  #load weight
  model.load_weights("t1_m1.h5")
  return model


model =create_model()

room_dict = {
    'ห้องนั่งเล่น': 'Living Room','ห้องครัว':'Kitchen','ห้องนอน':'Bedroom','ห้องน้ำ':'Bathroom','ห้องรับประทานอาหาร': 'Dining Room','ห้องสมุด': 'Library','ห้องพักผู้มาเยือน': 'Guest Room','ห้องเล่นเกม':'Game Room','ห้องซักผ้า':'Laundry Room','ระเบียง':'balcony','ไม่มีห้อง':'no room'
}

scene_dict = {
    'ซีน เอ':'scene A','ซีน บี':'scene B','ซีน ซี':'scene C','ซีน ดี':'scene D','ซีน อี':'scene E','ซีน เอฟ':'scene F','ซีน จี':'scene G','ซีน เอช':'scene H','ไม่มีซีน':'no scene'
}


def predict(text):
  test_texts = [text]
  spilt_thai_text = [word_tokenize(x) for x in test_texts]
  new_input_ids = tokenizer(spilt_thai_text, padding=True, truncation=True, return_tensors="tf",is_split_into_words=True)["input_ids"]
  test_padded_sequences = pad_sequences(new_input_ids, maxlen=max_seq_length,padding='post',truncating='post',value=1) #post pre
  predicted_labels = model.predict(test_padded_sequences)

  # default answer
  tmp = {
        'command' : "not recognized",
        'room' : None,
        'device' : None,
        "hour" : None,
        "minute": None
      }

  for i in range(len(test_texts)):
    valid = 1 if predicted_labels[0][i] > 0.5 else 0
    is_scene = 1 if predicted_labels[1][i] > 0.5 else 0
    has_num = 1 if predicted_labels[2][i] > 0.5 else 0
    turn = 1 if predicted_labels[3][i] > 0.5 else 0

    env_id = np.argmax(predicted_labels[5][i])
    env_label = env_decode[env_id]

    hour_id = np.argmax(predicted_labels[6][i])
    hour_label = hour_decode[hour_id]

    minute_id = np.argmax(predicted_labels[7][i])
    minute_label = minute_decode[minute_id]
    

    if valid:
      tmp['device'] = 'ไฟ'
      tmp['command'] = 'turn on' if turn else 'turn off'
      if not is_scene:
        tmp['room'] = room_dict[env_label] if env_label in room_dict  else room_dict['ไม่มีห้อง']
      else:
        tmp['room'] = scene_dict[env_label] if env_label in scene_dict  else room_dict['ไม่มีซีน']

      if has_num:
        tmp['hour'] = hour_label
        tmp['minute'] = minute_label


  return tmp

iface = gr.Interface(
  fn=predict, 
  inputs='text',
  outputs='json',
  examples=[["เปิดไฟห้องนอนหน่อย"],["เปิดไฟซีนเอ"],["ปิดไฟห้องรับประทานอาหารเวลา4ทุ่มสามสิบเจ็ดนาที"],['ปิดไฟห้องน้ำเวลาบ่ายโมงห้าสิบนาที'],["โย่ และนี่คือเสียงจากเด็กวัด"]],
  interpretation="default",
)

iface.launch()