Edit model card

ALMA-Cymraeg-13B

Fersiwn Gymraeg o fodel cyfieithu ALMA a ddisgrifir yn https://arxiv.org/abs/2309.11674.
This is a Welsh version of the ALMA LLM-based translation model.

Mae'r model LLM yn seiliedig ar Lama-2-13B, gyda hyfforddiant parhaus ar ddata Gymreig OSCAR-2301 am 3 Epoch ac yna hyfforddiant cywrain pellach ar ddata Cofnod y Cynulliad a ddarparir gan TechIaith.

Mae'r fersiwn yma wedi ei gywasgu i 4.0bpw er mwyn llwytho mewn cof GPU o 10GB gyda testyn hyd at 4096 tocyn gan ddefnyddio ExLlamaV2.

Fformat Sgwrs

Mae'r hyfforddiant cywrain wedi defnyddio'r fformat canlynol ar gyfer trosi o'r Saesneg i'r Gymraeg (a'r naill ffordd i'r llall).

Cyfieithwch y testun Saesneg canlynol i'r Gymraeg.
### Saesneg:
{prompt}

### Cymraeg:

Esiampl

import time
import sys, os
import dataclasses
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from exllamav2 import(
    ExLlamaV2,
    ExLlamaV2Config,
    ExLlamaV2Cache,
    ExLlamaV2Tokenizer,
    ExLlamaV2Lora,    
)

from exllamav2.generator import (
    ExLlamaV2StreamingGenerator,
    ExLlamaV2Sampler
)


class ModelClass:
    def __init__(self, generator, tokenizer, model):
        self.generator = generator
        self.tokenizer = tokenizer
        self.model = model

DEBUG = os.environ.get("DEBUG") and True or False

# Cychwyn model a storfa
def load_model(model_directory, max_seq_len=4096):
    """
    Yn llwytho model o gyfeiriadur ac yn dychwelyd y generadur a'r tocynnwr
    """
    config = ExLlamaV2Config()
    config.model_dir = model_directory
    config.max_seq_len = max_seq_len
    config.prepare()

    model = ExLlamaV2(config)
    print("Llwytho model: " + model_directory)

    cache = ExLlamaV2Cache(model, lazy = True, max_seq_len=max_seq_len)
    model.load_autosplit(cache)

    tokenizer = ExLlamaV2Tokenizer(config)
    generator = ExLlamaV2StreamingGenerator(model, cache, tokenizer)
    model = ModelClass(generator=generator, tokenizer=tokenizer, model=model)
    generator.warmup()
    return model

def generate_text(prompt, settings, max_new_tokens):
    sys.stdout.flush()
    input_ids = base_model.tokenizer.encode(prompt)
    generated_tokens = 0 # input_ids.shape[-1]
    base_model.generator.set_stop_conditions(["\n"])
    base_model.generator.begin_stream(input_ids, settings)
    time_begin = time.time()

    while True:
        chunk, eos, _ = base_model.generator.stream()
        generated_tokens += 1
        print (chunk, end = "")
        sys.stdout.flush()
        if eos or generated_tokens == max_new_tokens: break

    time_end = time.time()
    time_total = time_end - time_begin
    print(f"\nYmateb cyflawn mewn {time_total:.2f} eiliad, {generated_tokens} tocyn, {generated_tokens / time_total:.2f} tocyn/eiliad")
    return ""

base_model = load_model("./ALMA-Cymraeg-13B-0.1-4.0bpw-exl2")

settings = ExLlamaV2Sampler.Settings()
settings.temperature = 0.15 # newid fel bod angen e.e. 0.75
settings.top_k = 90 # newid fel bod angen e.e. 50
settings.top_p = 1.0 # ayyb
settings.token_repetition_penalty = 1.15 # ayyb
max_new_tokens = 2000 # ayyb

system_prompt = "Cyfieithwch y testun Saesneg canlynol i'r Gymraeg."

while True:
    user_input = input("Saesneg: ")

    prompt = f"{system_prompt}\n\n### Saesneg:\n{user_input}\n\n### Cymraeg:\n"
    if DEBUG: print(f"{prompt}\n\n")
    print("Cymraeg:")
    response = generate_text(prompt, settings, max_new_tokens)
    print("="*132)

Hawlfraint

Mae'r model yn seiliedig ar Llama2 ac felly dan drwydded gan Meta.
Mae'r data Cofnod y Cynulliad dan drywdded Llywodraeth Agored.

Downloads last month
13
Inference Examples
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social visibility and check back later, or deploy to Inference Endpoints (dedicated) instead.