This will create a task to do the quantization LLM inference
Source code in API/orchestrator/chain/completed_emotion_detection.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 | @receiver(completed_emotion_detection)
def trigger_completed_emotion_detection(sender, **kwargs):
"""
This will create a task to do the quantization LLM inference
"""
try:
logger.info("Emotion detection completed triggerred")
data = kwargs.get("data", {})
track_id = kwargs.get("track_id", None)
logger.info(data)
task_data = TaskData(**data)
if track_id is None:
logger.error("No track_id found")
return
data_text_id = task_data.parameters.get("data_text_id", None)
# get the text and emotion from the result
text = task_data.parameters["text"]
emotion = task_data.result_json["result_profile"].get("multi_modal_output", {})
data_multimodal_conversation_log_context_emotion_detection(
task_data=task_data, result=emotion
)
emotion_text = (
"Emotion value is from -1 to 1, -1 means negative, 1 means positive\n"
)
for key, value in emotion.items():
if key == "A":
emotion_text += f"Audio emotion: {value}\n"
if key == "T":
emotion_text += f"Text emotion: {value}\n"
if key == "V":
emotion_text += f"Video emotion: {value}\n"
if key == "M":
emotion_text += f"Overall emotion: {value}\n"
prompt = f"""
You are a conversational AI.
Your friend said: {text}.
And his emotion is detected like this:
{emotion_text}
Respond to him.
Your response will directly send to him.
"""
ClusterManager.chain_next(
track_id=track_id,
current_component="completed_emotion_detection",
next_component_params={"text": prompt, "data_text_id": data_text_id},
user=sender.user,
)
except Exception as e:
logger.exception(e)
|