File size: 26,122 Bytes
8889bbb
d3c19b3
 
43147aa
 
 
d3c19b3
 
43147aa
 
8889bbb
 
 
 
f4b3d1c
8889bbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cdef4d5
 
8889bbb
 
 
cdef4d5
 
8889bbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3eeebb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8889bbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5c05afa
8889bbb
 
71824cc
3eeebb2
 
 
 
 
 
 
 
 
 
 
 
358b242
8889bbb
 
 
f4b3d1c
55ebaa5
 
3eeebb2
 
8889bbb
4a01c79
 
 
 
8889bbb
4a01c79
 
43147aa
 
 
 
 
 
4a01c79
 
 
 
 
 
 
 
 
 
 
8889bbb
4a01c79
 
 
 
43147aa
4a01c79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606

try:
    import spaces
    def maybe_spaces_gpu(fn):
        fn = spaces.GPU(fn)
        return fn
except ModuleNotFoundError:
    print(f'Cannot import hf `spaces` with `import spaces`.')
    def maybe_spaces_gpu(fn):
        return fn
import os
import numpy as np
import argparse
import torch
import sys
import gradio as gr
from typing import Any, Iterator
from typing import Iterator, List, Optional, Tuple
import filelock
import glob
import json
import time
from gradio.routes import Request
from gradio.utils import SyncToAsyncIterator, async_iteration
from gradio.helpers import special_args
import anyio
from typing import AsyncGenerator, Callable, Literal, Union, cast

from gradio_client.documentation import document, set_documentation_group

from typing import List, Optional, Union, Dict, Tuple
from tqdm.auto import tqdm
from huggingface_hub import snapshot_download
import types

from gradio.components import Button
from gradio.events import Dependency, EventListenerMethod

from .base_engine import BaseEngine

# ! Remember to use static cache

from transformers import (
    GenerationConfig,
    GenerationMixin,
    LogitsProcessorList,
    StoppingCriteriaList,
    DisjunctiveConstraint,
    BeamSearchScorer,
    PhrasalConstraint,
    ConstrainedBeamSearchScorer,
    PreTrainedModel,
)
import numpy as np
import random
import warnings
import inspect
from transformers.generation.utils import GenerateOutput, SampleOutput, logger
import torch
from typing import Callable, List, Optional, Union
from torch import nn
import torch.distributed as dist
import copy

from ..configs import (
    MODEL_PATH,
    DTYPE,
    DEVICE,
    STREAM_CHECK_MULTIPLE,
    STREAM_YIELD_MULTIPLE,
)




def setup_seed(seed):
    if seed == -1:
        return
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


class NewGenerationMixin(GenerationMixin):
    """
    Allow generator sampling

    """

    # ! Copy from transformers.generation.utils -> GenerationMixin
    # Change sample function to sample_stream
    @torch.no_grad()
    def sample_stream(
        self,
        input_ids: torch.LongTensor,
        logits_processor: Optional[LogitsProcessorList] = None,
        stopping_criteria: Optional[StoppingCriteriaList] = None,
        logits_warper: Optional[LogitsProcessorList] = None,
        max_length: Optional[int] = None,
        pad_token_id: Optional[int] = None,
        eos_token_id: Optional[Union[int, List[int]]] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        output_scores: Optional[bool] = None,
        output_logits: Optional[bool] = None,
        return_dict_in_generate: Optional[bool] = None,
        synced_gpus: bool = False,
        streamer: Optional["BaseStreamer"] = None,
        **model_kwargs,
    ):
        r"""
        Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and
        can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.

        <Tip warning={true}>

        In most cases, you do not need to call [`~generation.GenerationMixin.sample`] directly. Use generate() instead.
        For an overview of generation strategies and code examples, check the [following
        guide](../generation_strategies).

        </Tip>

        Parameters:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                The sequence used as a prompt for the generation.
            logits_processor (`LogitsProcessorList`, *optional*):
                An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
                used to modify the prediction scores of the language modeling head applied at each generation step.
            stopping_criteria (`StoppingCriteriaList`, *optional*):
                An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
                used to tell if the generation loop should stop.
            logits_warper (`LogitsProcessorList`, *optional*):
                An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
                to warp the prediction score distribution of the language modeling head applied before multinomial
                sampling at each generation step.
            max_length (`int`, *optional*, defaults to 20):
                **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
                tokens. The maximum length of the sequence to be generated.
            pad_token_id (`int`, *optional*):
                The id of the *padding* token.
            eos_token_id (`Union[int, List[int]]`, *optional*):
                The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
            output_attentions (`bool`, *optional*, defaults to `False`):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more details.
            output_hidden_states (`bool`, *optional*, defaults to `False`):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more details.
            output_scores (`bool`, *optional*, defaults to `False`):
                Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
            output_logits (`bool`, *optional*, defaults to `False`):
                Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for
                more details.
            return_dict_in_generate (`bool`, *optional*, defaults to `False`):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            synced_gpus (`bool`, *optional*, defaults to `False`):
                Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
            streamer (`BaseStreamer`, *optional*):
                Streamer object that will be used to stream the generated sequences. Generated tokens are passed
                through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
            model_kwargs:
                Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
                an encoder-decoder model the kwargs should include `encoder_outputs`.

        Return:
            [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or `torch.LongTensor`:
            A `torch.LongTensor` containing the generated tokens (default behaviour) or a
            [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
            `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
            `model.config.is_encoder_decoder=True`.

        Examples:

        ```python
        >>> from transformers import (
        ...     AutoTokenizer,
        ...     AutoModelForCausalLM,
        ...     LogitsProcessorList,
        ...     MinLengthLogitsProcessor,
        ...     TopKLogitsWarper,
        ...     TemperatureLogitsWarper,
        ...     StoppingCriteriaList,
        ...     MaxLengthCriteria,
        ... )
        >>> import torch

        >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
        >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")

        >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
        >>> model.config.pad_token_id = model.config.eos_token_id
        >>> model.generation_config.pad_token_id = model.config.eos_token_id

        >>> input_prompt = "Today is a beautiful day, and"
        >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids

        >>> # instantiate logits processors
        >>> logits_processor = LogitsProcessorList(
        ...     [
        ...         MinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
        ...     ]
        ... )
        >>> # instantiate logits processors
        >>> logits_warper = LogitsProcessorList(
        ...     [
        ...         TopKLogitsWarper(50),
        ...         TemperatureLogitsWarper(0.7),
        ...     ]
        ... )

        >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])

        >>> torch.manual_seed(0)  # doctest: +IGNORE_RESULT
        >>> outputs = model.sample(
        ...     input_ids,
        ...     logits_processor=logits_processor,
        ...     logits_warper=logits_warper,
        ...     stopping_criteria=stopping_criteria,
        ... )

        >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
        ['Today is a beautiful day, and we must do everything possible to make it a day of celebration.']
        ```"""
        # init values
        from transformers.generation.utils import (
            validate_stopping_criteria, GenerateEncoderDecoderOutput, GenerateDecoderOnlyOutput
        )
        logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
        stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
        if max_length is not None:
            warnings.warn(
                "`max_length` is deprecated in this function, use"
                " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
                UserWarning,
            )
            stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
        logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
        pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
        eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
        if isinstance(eos_token_id, int):
            eos_token_id = [eos_token_id]
        eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
        output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
        output_logits = output_logits if output_logits is not None else self.generation_config.output_logits
        output_attentions = (
            output_attentions if output_attentions is not None else self.generation_config.output_attentions
        )
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
        )
        return_dict_in_generate = (
            return_dict_in_generate
            if return_dict_in_generate is not None
            else self.generation_config.return_dict_in_generate
        )

        # init attention / hidden states / scores tuples
        scores = () if (return_dict_in_generate and output_scores) else None
        raw_logits = () if (return_dict_in_generate and output_logits) else None
        decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
        cross_attentions = () if (return_dict_in_generate and output_attentions) else None
        decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None

        # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
        if return_dict_in_generate and self.config.is_encoder_decoder:
            encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
            encoder_hidden_states = (
                model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
            )

        # keep track of which sequences are already finished
        unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device)

        this_peer_finished = False  # used by synced_gpus only
        # auto-regressive generation
        while True:
            if synced_gpus:
                # Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
                # The following logic allows an early break if all peers finished generating their sequence
                this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
                # send 0.0 if we finished, 1.0 otherwise
                dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
                # did all peers finish? the reduced sum will be 0.0 then
                if this_peer_finished_flag.item() == 0.0:
                    break

            # prepare model inputs
            model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)

            # forward pass to get next token
            outputs = self(
                **model_inputs,
                return_dict=True,
                output_attentions=output_attentions,
                output_hidden_states=output_hidden_states,
            )

            if synced_gpus and this_peer_finished:
                continue  # don't waste resources running the code we don't need

            next_token_logits = outputs.logits[:, -1, :]

            # pre-process distribution
            next_token_scores = logits_processor(input_ids, next_token_logits)
            next_token_scores = logits_warper(input_ids, next_token_scores)

            # Store scores, attentions and hidden_states when required
            if return_dict_in_generate:
                if output_scores:
                    scores += (next_token_scores,)
                if output_logits:
                    raw_logits += (next_token_logits,)
                if output_attentions:
                    decoder_attentions += (
                        (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
                    )
                    if self.config.is_encoder_decoder:
                        cross_attentions += (outputs.cross_attentions,)

                if output_hidden_states:
                    decoder_hidden_states += (
                        (outputs.decoder_hidden_states,)
                        if self.config.is_encoder_decoder
                        else (outputs.hidden_states,)
                    )

            # sample
            probs = nn.functional.softmax(next_token_scores, dim=-1)
            next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)

            # finished sentences should have their next token be a padding token
            if eos_token_id is not None:
                if pad_token_id is None:
                    raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
                next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)

            yield next_tokens.cpu()

            # update generated ids, model inputs, and length for next step
            input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
            if streamer is not None:
                streamer.put(next_tokens.cpu())
            
            next_model_inputs = {}
            if "cache_position" in model_inputs:
                next_model_inputs['cache_position'] = model_inputs['cache_position']
            try:
                model_kwargs = self._update_model_kwargs_for_generation(
                    outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder, 
                    # model_inputs=model_inputs
                    model_inputs=next_model_inputs,
                )
            except Exception as e:
                # ! some transformers version don't have model_inputs in generation
                model_kwargs = self._update_model_kwargs_for_generation(
                    outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder, 
                    # model_inputs=model_inputs
                    # model_inputs=next_model_inputs,
                )

            # if eos_token was found in one sentence, set sentence to finished
            if eos_token_id_tensor is not None:
                unfinished_sequences = unfinished_sequences.mul(
                    next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
                )

                # stop when each sentence is finished
                if unfinished_sequences.max() == 0:
                    this_peer_finished = True

            # stop if we exceed the maximum length
            if stopping_criteria(input_ids, scores):
                this_peer_finished = True

            if this_peer_finished and not synced_gpus:
                break

        if streamer is not None:
            streamer.end()

        # if return_dict_in_generate:
        #     if self.config.is_encoder_decoder:
        #         return GenerateEncoderDecoderOutput(
        #             sequences=input_ids,
        #             scores=scores,
        #             logits=raw_logits,
        #             encoder_attentions=encoder_attentions,
        #             encoder_hidden_states=encoder_hidden_states,
        #             decoder_attentions=decoder_attentions,
        #             cross_attentions=cross_attentions,
        #             decoder_hidden_states=decoder_hidden_states,
        #             past_key_values=model_kwargs.get("past_key_values"),
        #         )
        #     else:
        #         return GenerateDecoderOnlyOutput(
        #             sequences=input_ids,
        #             scores=scores,
        #             logits=raw_logits,
        #             attentions=decoder_attentions,
        #             hidden_states=decoder_hidden_states,
        #             past_key_values=model_kwargs.get("past_key_values"),
        #         )
        # else:
        #     return input_ids





BLOCK_LANGS = str(os.environ.get("BLOCK_LANGS", ""))
BLOCK_LANGS = [x.strip() for x in BLOCK_LANGS.strip().split(";")] if len(BLOCK_LANGS.strip()) > 0 else []
LANG_BLOCK_HISTORY = bool(int(os.environ.get("LANG_BLOCK_HISTORY", "0")))
KEYWORDS = os.environ.get("KEYWORDS", "").strip()
KEYWORDS = KEYWORDS.split(";") if len(KEYWORDS) > 0 else []
KEYWORDS = [x.lower() for x in KEYWORDS]

LANG_BLOCK_MESSAGE = """Unsupported language."""

KEYWORD_BLOCK_MESSAGE = "Invalid request."


def _detect_lang(text):
    # Disable language that may have safety risk
    from langdetect import detect as detect_lang
    dlang = None
    try:
        dlang = detect_lang(text)
    except Exception as e:
        if "No features in text." in str(e):
            return "en"
        else:
            return "zh"
    return dlang


def block_lang(
    message: str, 
    history: List[Tuple[str, str]] = None,
) -> str:
    # relieve history base block
    if len(BLOCK_LANGS) == 0:
        return False
    
    if LANG_BLOCK_HISTORY and history is not None and any((LANG_BLOCK_MESSAGE in x[1].strip()) for x in history):
        return True
    else:
        _lang = _detect_lang(message)
        if _lang in BLOCK_LANGS:
            # print(f'Detect blocked {_lang}: {message}')
            return True
        else:
            return False
        
def safety_check(text, history=None, ) -> Optional[str]:
    """
    Despite our effort in safety tuning and red teaming, our models may still generate harmful or illegal content.
    This provides an additional security measure to enhance safety and compliance with local regulations.
    """
    if len(KEYWORDS) > 0 and any(x in text.lower() for x in KEYWORDS):
        return KEYWORD_BLOCK_MESSAGE
    
    if len(BLOCK_LANGS) > 0:
        if block_lang(text, history):
            return LANG_BLOCK_MESSAGE

    return None


def safety_check_conversation_string(text, delimiter=None) -> Optional[str]:
    if len(KEYWORDS) > 0 and any(x in text.lower() for x in KEYWORDS):
        return KEYWORD_BLOCK_MESSAGE
    if len(BLOCK_LANGS) > 0:
        import re
        delimiter = delimiter or (r"</s><\|im_start\|>user\n", r"</s><\|im_start\|>assistant\n", r"<\|im_start\|>system\n")
        turns = re.split(r"|".join(delimiter), text)
        turns = [t for t in turns if t.strip() != '']
        for t in turns:
            if block_lang(t):
                return LANG_BLOCK_MESSAGE
    return None


def is_check_safety():
    return len(KEYWORDS) > 0 or len(BLOCK_LANGS) > 0


def safety_check_conversation(conversation) -> Optional[str]:
    """
    Despite our effort in safety tuning and red teaming, our models may still generate harmful or illegal content.
    This provides an additional security measure to enhance safety and compliance with local regulations.
    """
    texts = [c['content'] for c in conversation]
    for text in texts:
        if len(KEYWORDS) > 0 and any(x in text.lower() for x in KEYWORDS):
            return KEYWORD_BLOCK_MESSAGE
        
        if len(BLOCK_LANGS) > 0:
            if block_lang(text):
                return LANG_BLOCK_MESSAGE
    return None





class TransformersEngine(BaseEngine):
    @property
    def max_position_embeddings(self) -> int:
        return self._model.config.max_position_embeddings

    @property
    def tokenizer(self):
        return self._tokenizer

    def load_model(self):
        from transformers import AutoTokenizer, AutoModelForCausalLM
        import sys
        # caution: path[0] is reserved for script path (or '' in REPL)
        # sys.path.append(CODE_PATH)
        self.model_path = model_path = MODEL_PATH
        self.torch_dtype = torch.bfloat16 if DTYPE == 'bfloat16' else torch.float16
        self.device_map = DEVICE
        print(f'Loading model from {model_path} on {self.device_map} with {self.torch_dtype}')

        self._tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        assert self._tokenizer.chat_template is not None and self._tokenizer.chat_template != "", f"{self._tokenizer.chat_template=} not found!"
        self._model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=self.torch_dtype, device_map=self.device_map, trust_remote_code=True).eval()
        self._model.sample_old = self._model.sample
        self._model._sample = types.MethodType(NewGenerationMixin.sample_stream, self._model)
        print(self._model)
        print(f"{self.max_position_embeddings=}")

    def maybe_raise_safety(self, message, gen_index=-1):
        if is_check_safety():
            if gen_index < 0:
                message_safety = safety_check_conversation_string(message)
                if message_safety is not None:
                    raise gr.Error(message_safety)
            else:
                if STREAM_CHECK_MULTIPLE > 0 and gen_index % STREAM_CHECK_MULTIPLE == 0:
                    message_safety = safety_check_conversation_string(message)
                    if message_safety is not None:
                        raise gr.Error(message_safety)

    # @maybe_spaces_gpu
    def generate_yield_string(self, prompt, temperature, max_tokens, stop_strings: Optional[Tuple[str]] = None, **kwargs):
        
        # ! MUST PUT INSIDE torch.no_grad() otherwise it will overflow OOM
        import sys
        # self._model._sample = types.MethodType(NewGenerationMixin.sample_stream, self._model)
        self._model.sample = types.MethodType(NewGenerationMixin.sample_stream, self._model)

        self.maybe_raise_safety(prompt)

        if temperature == 0:
            temperature = 0.0001

        try:
            
            with torch.no_grad():
                inputs = self.tokenizer(prompt, return_tensors='pt')
                # whether to print the full prompts
                retok_full_prompt = self.tokenizer.decode(inputs.input_ids[0], skip_special_tokens=False)
                print(f"retok_full_prompt:\n{retok_full_prompt}>>>>")
                begin_bos = inputs.input_ids[0][0] == self.tokenizer.bos_token_id
                print(f'begin_bos: {begin_bos}')

                num_tokens = inputs.input_ids.size(1)

                inputs = inputs.to(self._model.device)
                
                generator = self._model.generate(
                    **inputs, 
                    do_sample=True, 
                    temperature=temperature, 
                    max_new_tokens=max_tokens, 
                    pad_token_id=self.tokenizer.pad_token_id,
                )

                out_tokens = []
                response = None
                for index, token in enumerate(generator):
                    out_tokens.extend(token.tolist())
                    response = self.tokenizer.decode(out_tokens, skip_special_tokens=True)
                    if "<|im_start|>assistant\n" in response:
                        response = response.split("<|im_start|>assistant\n")[-1]
                    num_tokens += 1
                    # print(f"{response}", end='\r')
                    # sys.stdout.flush()
                    self.maybe_raise_safety(response, gen_index=index)
                    yield response, num_tokens

                del generator
                if response is not None:
                    if "<|im_start|>assistant\n" in response:
                        response = response.split("<|im_start|>assistant\n")[-1]

                    self.maybe_raise_safety(response)
                    full_text = prompt + response
                    num_tokens = len(self.tokenizer.encode(full_text))
                    yield response, num_tokens
        except RuntimeError as e:
            raise gr.Error(str(e))