File size: 5,519 Bytes
730fe87
ac171cf
730fe87
c58cbbc
35d97c8
 
9202468
4385b66
 
bd435b3
35d97c8
31bfb8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac171cf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4385b66
 
35d97c8
4385b66
 
 
69f88db
4385b66
bd435b3
 
 
69f88db
4385b66
69f88db
4385b66
69f88db
4385b66
 
 
 
 
 
 
 
 
 
bd435b3
9202468
4385b66
bd435b3
 
c58cbbc
bd435b3
4385b66
 
730fe87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4385b66
 
31bfb8d
ac171cf
730fe87
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import asyncio
import time
from chat_pipeline import ChatPipeline
from clip_transform import CLIPTransform
from chat_service import ChatService
from dotenv import load_dotenv
from speech_service import SpeechService
from concurrent.futures import ThreadPoolExecutor
from audio_stream_processor import AudioStreamProcessor
from streaming_chat_service import StreamingChatService

def time_sentance_lenghts():
    load_dotenv()

    print ("Initializing Chat")
    # audio_processor = AudioStreamProcessor()
    user_speech_service0 = SpeechService(voice_id="Adam")
    prompts = [
        "hello, i am a long sentance, how are you today? Tell me about your shadow self?",
        "a shorter sentance",
        "Jung believed that the process of self-discovery and personal growth involves confronting and integrating the shadow self into the conscious mind.",
        "By doing so, we become more self-aware and more fully actualized individuals.",
    ]

    print ("Timing prompts\n")
    for prompt in prompts:
        start_time = time.time()
        start_stream_time = time.time()
        stream = user_speech_service0.stream(prompt)
        audio = b""
        for chunk in stream:
            if chunk is not None:
                audio += chunk
        end_stream_time = time.time()
        from elevenlabs import play
        start_speech_time = time.time()
        play(audio)
        end_speech_time = time.time()
        end_time = time.time()
        total_time = (end_time - start_time)
        stream_time = (end_stream_time - start_stream_time)
        speech_time = (end_speech_time - start_speech_time)
        stream_multiple = speech_time / stream_time
        print(f"Stream time: {stream_time:.4f}, Acutual audio time: {speech_time:.4f}, a multiple of {stream_multiple:.2f}. for prompt: {prompt}")

    print ("\nChat success")

def test_sentance_lenghts():
    load_dotenv()

    print ("Initializing Chat")
    audio_processor = AudioStreamProcessor()
    user_speech_service0 = SpeechService(voice_id="Adam")
    user_speech_service1 = SpeechService(voice_id="Adam")
    user_speech_service2 = SpeechService(voice_id="Adam")
    user_speech_service3 = SpeechService(voice_id="Adam")

    prompts = [
        "hello, i am a long sentance, how are you today? Tell me about your shadow self?",
        "a shorter sentance",
        "Jung believed that the process of self-discovery and personal growth involves confronting and integrating the shadow self into the conscious mind.",
        "By doing so, we become more self-aware and more fully actualized individuals.",
    ]
    first = True
    stream1 = user_speech_service1.stream(prompts[1])
    stream0 = user_speech_service0.stream(prompts[0])
    time.sleep(5)
    stream2 = user_speech_service2.stream(prompts[2])
    stream3 = user_speech_service3.stream(prompts[3])
    audio_processor.add_audio_stream(stream0)
    audio_processor.add_audio_stream(stream1)
    audio_processor.add_audio_stream(stream2)
    audio_processor.add_audio_stream(stream3)
    audio_processor.close()
    from elevenlabs import generate, play
    speech0 = generate(prompts[0], voice="Adam")
    speech1 = generate(prompts[1], voice="Adam")
    speech2 = generate(prompts[2], voice="Adam")
    speech3 = generate(prompts[3], voice="Adam")
    play(speech0)
    play(speech1)
    play(speech2)
    play(speech1)
    play(speech3)
    play(speech1)
    # for prompt in prompts:
    #     stream = user_speech_service.stream(prompt)
    #     if first:
    #         first = False
    #         time.sleep(5)
    #     audio_processor.add_audio_stream(stream)
    audio_processor.close()
    print ("Chat success")

def run_debug_code():
    load_dotenv()

    # print ("Initializing CLIP templates")
    # clip_transform = CLIPTransform()
    # print ("CLIP success")

    print ("Initializing Chat")
    # chat_service = ChatService()
    audio_processor = AudioStreamProcessor()
    chat_service = StreamingChatService(audio_processor, voice_id="2OviOUQc1JsQRQgNkVBj") # Chales003

    user_speech_service = SpeechService(voice_id="Adam")

    # user_speech_service.print_voices() # if you want to see your custom voices

    prompts = [
        "hello, how are you today?",
        "tell me about your shadow self?",
        "hmm, interesting, tell me more about that.",
        "wait, that is so interesting, what else?",
    ]
    for prompt in prompts:
        print ("")
        print (f'prompt: "{prompt}"')
        stream = user_speech_service.stream(prompt)
        audio_processor.add_audio_stream(stream)

        print ("")
        print (f'response:')
        response = chat_service.respond_to(prompt)

    audio_processor.close()
    print ("Chat success")

async def run_pipeline():
    load_dotenv()

    try:
        chat_pipeline = ChatPipeline()
        await chat_pipeline.start()
        prompts = [
            "hello, how are you today?",
            "tell me about your shadow self?",
            "hmm, interesting, tell me more about that.",
            "wait, that is so interesting, what else?",
        ]
        for prompt in prompts:
            await chat_pipeline.enqueue(prompt)
        await chat_pipeline.wait_until_all_jobs_idle()
    except KeyboardInterrupt:
        print("Pipeline interrupted by user")
    except Exception as e:
        print(f"An error occurred: {e}")

if __name__ == '__main__':
    # time_sentance_lenghts()
    # test_sentance_lenghts()
    # run_debug_code()
    asyncio.run(run_pipeline())