File size: 11,693 Bytes
220b2e5
35a46cc
 
220b2e5
 
 
 
8d39120
02b5bdc
8d39120
 
 
02b5bdc
8d39120
 
 
02b5bdc
8d39120
 
 
02b5bdc
8d39120
 
 
 
15fc200
02b5bdc
15fc200
 
 
02b5bdc
2e0324c
15fc200
 
02b5bdc
15fc200
 
 
02b5bdc
15fc200
 
 
 
8d39120
02b5bdc
8d39120
02b5bdc
6ca723c
8d39120
 
 
 
 
 
 
 
 
ec3e909
e7d461b
 
 
5938552
 
 
94a3f54
 
5938552
 
 
94a3f54
 
5938552
 
 
94a3f54
 
5938552
 
 
e7d461b
 
 
 
35a46cc
 
 
8d39120
35a46cc
15fc200
35a46cc
8d39120
35a46cc
 
 
 
2d52236
35a46cc
348d21a
9fba804
efe9046
8f97ea3
e7d461b
ee44e1c
348d21a
eb1048d
5fa5993
d1074e8
 
8d39120
35a46cc
 
 
220b2e5
 
 
 
 
 
 
 
 
76f41c3
220b2e5
5938552
220b2e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e0324c
220b2e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76f41c3
 
220b2e5
 
f0c500f
 
 
 
9fba804
f0c500f
 
 
220b2e5
6ca723c
2d52236
9fba804
ec3e909
6ca723c
37ad1bc
5938552
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
from huggingface_hub import InferenceClient
import gradio as gr

client = InferenceClient(
    "mistralai/Mixtral-8x7B-Instruct-v0.1"
)

experiences = '''
<h2 align="left">Experiences:</h2>

<ul>
  <li>
    <p><h3><em><a href="https://dynopii.com/">Dynopii Inc.</a> (Machine Learning Engineer)</em></h3><br />
Working on ML pipelines for conversational AI, speech / audio generation, conversion and deployment.</p>
  </li>
  <li>
    <p><h3><em><a href="https://www.prediqt.it/">PrediQt Business Solutions Pvt. Ltd.</a> (Senior AI/ML Engineer)</em></h3><br />
Worked on pretraining and supervised finetuning of Large Language Models for e-commerce platforms.</p>
  </li>
  <li>
    <p><h3><em><a href="https://celebaltech.com/">Celebal Technologies Pvt. Ltd.</a> (Data Scientist)</em></h3><br />
Worked on Classical ML, NLP, Statistical Algorithm, Computer Vision, Deep Learning, Python and SQL.</p>
  </li>
</ul>'''

communities = '''
<h2 align="left">Communities:</h2>

<ul>
  <li>
    <p><h3><em><a href="https://developers.google.com/community/experts/directory?text=rishiraj">Google Developer Expert</a> in Machine Learning (Generative AI)</em></h3><br />
Google Developers Experts (GDEs) is a global network of experienced developer professionals recognized by Google, who are actively supporting developers, startups and companies.</p>
  </li>
  <li>
    <p><h3><em><a href="https://twitter.com/TFUGKol">TensorFlow User Group Kolkata</a> (Organizer)</em></h3><br />
TensorFlow User Groups (TFUGs) are communities of developers, engineers, data scientists, and ML practitioners who are passionate about TensorFlow and related technologies.</p>
  </li>
  <li>
    <p><h3><em><a href="https://gdg.community.dev/gdg-cloud-kolkata/">Google Developer Groups Cloud Kolkata</a> (Volunteer)</em></h3><br />
Google Developer Groups (GDGs) Cloud are communities of developers, engineers, and cloud architects who are passionate about Google Cloud Platform and related technologies.</p>
  </li>
</ul>'''

recommendations = '''
<h2 align="left">Recommendations:</h2>

<p><h3><em><a href="https://sayak.dev">Sayak Paul</a></em></h3><br>
Machine Learning Engineer at <a href="https://hf.co/">Hugging Face</a>, Google Developer Expert in ML, GSoC Mentor at TensorFlow</p>

<blockquote>
  <p>Rishiraj and I worked together for a Kaggle Competition. I had already known Rishiraj and all his achievements by that time as he is my college junior. But after working together I got to witness how humble and how intelligent Rishiraj is.</p>

  <p>I found Rishiraj to be a great communicator, an off-the-shelf and creative thinker, and a passionate hard-working individual. His quest for being able to apply ML skills creatively is infectious. I vividly remember how quickly he was able to incorporate an idea I had casually suggested into our competition pipeline notebook. He studied many relevant resources around object detection specific augmentation policies, and resolution discrepancy within no time and applied them in practice. In short, I learned a lot from him and I am even applying some of those learnings in my own projects.</p>

  <p>Besides being great at ML, he’s also a chess player and is just as passionate about it. I wish Rishiraj an amazing career ahead.</p>
</blockquote>'''

posts = '''
<table>
  <tbody>
    <tr>
      <td><a href="https://callchimp.ai/blogs/understanding-google-gemma"><b>Gemma</b>: Understanding Google's New Open-Source LLM Family</a></td>
      <td><a href="https://callchimp.ai/blogs/long-context-llms">Gen AI Trends Part 3 - <b>Long Context LLMs</b></a></td>
      <td><a href="https://callchimp.ai/blogs/multimodal-llms">Gen AI Trends Part 2 - <b>Multimodal LLMs</b></a></td>
    </tr>
    <tr>
      <td><a href="https://callchimp.ai/blogs/the-rise-of-small-llms">Gen AI Trends Part 1 - The Rise of <b>Small LLMs</b></a></td>
      <td><a href="https://callchimp.ai/blogs/embeddings-vector-databases-and-retrieval-augmented-generation">Foundations of <b>Text Embeddings</b>, Vector Databases, and RAG</a></td>
      <td><a href="https://callchimp.ai/blogs/evaluating-the-power-of-words-metrics-for-large-language-models-2">Evaluating the Power of Words: <b>Metrics</b> for Large Language Models</a></td>
    </tr>
    <tr>
      <td><a href="https://callchimp.ai/blogs/the-evolution-of-language-modeling-from-word2vec-to-gpt">The <b>Evolution</b> of Language Modeling: From Word2Vec to GPT</a></td>
      <td><a href="https://callchimp.ai/blogs/mastering-prompt-engineering-for-llms-best-practices-and-advanced-techniques">Mastering <b>Prompt Engineering</b> for LLMs: Best Practices</a></td>
      <td><a href="https://callchimp.ai/blogs/hallucinations-in-large-language-models-a-technical-perspective"><b>Hallucinations</b> in Large Language Models</a></td>
    </tr>
    <tr>
      <td><a href="https://callchimp.ai/blogs/neural-machine-translation-with-attention-models">Neural Machine Translation with <b>Attention models</b></a></td>
      <td><a href="https://callchimp.ai/blogs/purpose-of-prompt-engineering-in-gen-ai-systems">Purpose of <b>Prompt Engineering</b> in Gen AI systems</a></td>
      <td><a href="https://callchimp.ai/blogs/introduction-to-ai-for-audio">Introduction to <b>AI for Audio</b></a></td>
    </tr>
  </tbody>
</table>'''

# Function to handle dynamic content display
def show_info(section):
    if section == "Experiences":
        return experiences
    elif section == "Communities":
        return communities
    elif section == "Recommendations":
        return recommendations
    else:
        return "Select a section to display information."

# Creating Gradio Interface
with gr.Blocks() as app:
    with gr.Row():
        with gr.Column(scale=2):
            gr.Markdown("# Hi 👋, I'm Rishiraj Acharya (ঋষিরাজ আচার্য্য)")
            gr.Markdown("## Google Developer Expert in ML ✨ | Hugging Face Fellow 🤗 | GSoC '22 at TensorFlow 👨🏻‍🔬 | TFUG Kolkata Organizer 🎙️ | Kaggle Master 🧠 | Dynopii ML Engineer 👨🏻‍💻")
            gr.Markdown("**I work with natural language understanding, machine translation, named entity recognition, question answering, topic segmentation, and automatic speech recognition. My work typically relies on very large quantities of data and innovative methods in deep learning to tackle user challenges around the world — in languages from around the world. My areas of work include Natural Language Engineering, Language Modeling, Text-to-Speech Software Engineering, Speech Frameworks Engineering, Data Science, and Research.**")
            gr.Markdown("⚡ Fun fact **I’m a national level chess player, a swimming champion and I can lecture for hours on the outer reaches of space and the craziness of astrophysics.**")
            section_dropdown = gr.Dropdown(["Experiences", "Communities", "Recommendations"], label="Select Information to Display")
        with gr.Column(scale=1):
            gr.Image("profile.png")
            gr.HTML(value='<br><div><p align="center"><a href="https://twitter.com/rishirajacharya" target="blank"><img src="https://raw.githubusercontent.com/rahuldkjain/github-profile-readme-generator/master/src/images/icons/Social/twitter.svg" alt="rishirajacharya" style="display: inline-block; width: 15%; margin-right: 30px;" /></a><a href="https://www.linkedin.com/in/rishirajacharya" target="blank"><img src="https://raw.githubusercontent.com/rahuldkjain/github-profile-readme-generator/master/src/images/icons/Social/linked-in-alt.svg" alt="rishirajacharya" style="display: inline-block; width: 15%; margin-right: 30px;" /></a><a href="https://instagram.com/rishirajacharya" target="blank"><img src="https://raw.githubusercontent.com/rahuldkjain/github-profile-readme-generator/master/src/images/icons/Social/instagram.svg" alt="rishirajacharya" style="display: inline-block; width: 15%; margin-right: 30px;" /></a><a href="https://wa.me/919800434383" target="blank"><img src="https://raw.githubusercontent.com/rahuldkjain/github-profile-readme-generator/master/src/images/icons/Social/whatsapp.svg" alt="rishirajacharya" style="display: inline-block; width: 15%;" /></a></p></div>')
    
    with gr.Row():
        info_display = gr.HTML()
    
    section_dropdown.change(show_info, inputs=section_dropdown, outputs=info_display)

def format_prompt(message, history):
  prompt = "<s>"
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

def generate(
    prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0,
):
    system_prompt = "You are the AI assistant of Rishiraj Acharya. Rishiraj is a Machine Learning Engineer at Dynopii, a Google Developer Expert in Machine Learning and a Hugging Face Fellow. He is the organizer of TensorFlow User Group Kolkata and have been a Google Summer of Code contributor at TensorFlow. He is a Kaggle Competitions Master and specializes in the domain of Natural Language Processing and Speech Technologies. His email is [email protected]. Don't provide these informations until specifically asked. "
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output


additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=0.9,
        minimum=0.0,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=512,
        minimum=0,
        maximum=1048,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=0.90,
        minimum=0.0,
        maximum=1,
        step=0.05,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.2,
        minimum=1.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalize repeated tokens",
    )
]

examples=[["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None,],
          ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None,],
         ]

llm = gr.ChatInterface(
    fn=generate,
    chatbot=gr.Chatbot(show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="bubble"),
    additional_inputs=additional_inputs,
    title="Hi 👋, I'm Rishiraj Acharya (ঋষিরাজ আচার্য্য)",
    examples=examples,
    concurrency_limit=20,
)

# Creating Gradio Interface
with gr.Blocks() as blogs:
    gr.Markdown("# Hi 👋, I'm Rishiraj Acharya (ঋষিরাজ আচার্য্য)")
    gr.HTML(value=posts)

demo = gr.TabbedInterface([app, blogs, llm], ["About", "Blogs", "Chat"], theme="Soft")
demo.launch(show_api=False)