Skip to content

CompletedTask

trigger_completed_task(sender, **kwargs)

Trigger the multi-modal emotion detection.

Source code in API/orchestrator/chain/completed_task.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
@receiver(completed_task)
def trigger_completed_task(sender, **kwargs):
    """
    Trigger the multi-modal emotion detection.
    """
    data = kwargs.get("data", {})
    task_data = TaskData(**data)

    if task_data.task_name == "speech2text":
        return completed_speech2text.send(
            sender=sender, data=data, track_id=task_data.track_id
        )

    if task_data.task_name == "emotion_detection":
        return completed_emotion_detection.send(
            sender=sender, data=data, track_id=task_data.track_id
        )

    if task_data.task_name == "quantization_llm":
        return completed_quantization_llm.send(
            sender=sender, data=data, track_id=task_data.track_id
        )

    if task_data.task_name == "text2speech":
        logger.info("Text2Speech task completed")
        return completed_text2speech.send(
            sender=sender, data=data, track_id=task_data.track_id
        )

    if task_data.task_name == "hf_llm":
        logger.info("HF LLM task completed")
        return completed_hf_llm.send(
            sender=sender, data=data, track_id=task_data.track_id
        )

    if task_data.task_name == "openai_speech2text":
        logger.info("OpenAI Speech2Text task completed")
        return completed_openai_speech2text.send(
            sender=sender, data=data, track_id=task_data.track_id
        )

    if task_data.task_name == "openai_gpt_4o_text_and_image":
        logger.info("OpenAI GPT4O task completed")
        return completed_openai_gpt_4o_text_and_image.send(
            sender=sender, data=data, track_id=task_data.track_id
        )
    if task_data.task_name == "openai_gpt_35":
        logger.info("OpenAI GPT3.5 task completed")
        return completed_openai_gpt_35.send(
            sender=sender, data=data, track_id=task_data.track_id
        )

    if task_data.task_name == "openai_gpt_4o_text_only":
        logger.info("OpenAI GPT4O Text Only task completed")
        return completed_openai_gpt_4o_text_only.send(
            sender=sender, data=data, track_id=task_data.track_id
        )
    if task_data.task_name == "rag":
        logger.info("RAG task completed")
        return completed_rag.send(sender=sender, data=data, track_id=task_data.track_id)

    if task_data.task_name == "openai_text2speech":
        logger.info("OpenAI Text2Speech task completed")
        return completed_openai_text2speech.send(
            sender=sender, data=data, track_id=task_data.track_id
        )

    task_name_choices = Task.get_task_name_choices()
    task_name_choices_list = [task[0] for task in task_name_choices]
    if task_data.task_name not in task_name_choices_list:
        logger.error("Task name not found is not in the choices list")
        return
    logger.critical(f"{task_data.task_name} task completed, however, no action taken.")