Spaces:
Paused
Paused
import torch | |
import torch.nn.functional as F | |
import transformers | |
import gradio as gr | |
from src.client import DistributedBloomForCausalLM | |
INITIAL_PEERS = ['/ip4/193.106.95.184/tcp/443/p2p/QmSXDXLeSMXjS4YerDrdn1zpGQaNzkZ9ogN2SoAEyAdDhs'] | |
import hivemind # test that DHT instances work on localhost | |
dht1 = hivemind.DHT(start=True) | |
dht2 = hivemind.DHT(start=True, initial_peers=dht1.get_visible_maddrs()) | |
tokenizer = transformers.BloomTokenizerFast.from_pretrained("bigscience/test-bloomd-6b3") | |
model = DistributedBloomForCausalLM.from_pretrained("bigscience/test-bloomd-6b3", initial_peers=INITIAL_PEERS, low_cpu_mem_usage=True, torch_dtype=torch.float32) | |
def inference(text, seq_length=1): | |
input_ids = tokenizer(text, return_tensors='pt')['input_ids'] | |
final_tokens = input_ids | |
with torch.inference_mode(), model.transformer.h.inference_session() as remote_transformer: | |
for i in range(seq_length): | |
h = model.transformer.word_embeddings(input_ids) | |
h = model.transformer.word_embeddings_layernorm(h) | |
h = remote_transformer.step(h) | |
h = model.transformer.ln_f(h) | |
h = F.linear(h, weight=model.transformer.word_embeddings.weight) # note: this line takes a while, will also be fixed | |
next_token_ix = torch.multinomial((h[0, -1] / 0.8).softmax(-1), 1) | |
final_tokens = torch.cat([final_tokens, next_token_ix.view(1, 1)], dim=-1) | |
input_ids = next_token_ix.view(1, 1) | |
return tokenizer.decode(final_tokens[0], skip_special_tokens=False) | |
iface = gr.Interface( | |
fn=inference, | |
inputs=[ | |
gr.Textbox(lines=10, label="Input text"), | |
gr.inputs.Slider( | |
minimum=0, | |
maximum=1000, | |
step=1, | |
default=42, | |
label="Sequence length for generation" | |
) | |
], | |
outputs="text" | |
) | |
iface.launch() |