Skip to content

API Reference - Text Chat#

src #

intentional_text_chat #

Init file for intentional_text_chat.

__about__ #

Package descriptors for intentional-text-chat.

bot_structure #

Bot structure to support text chat for Intentional.

TextChatBotStructure #

Bases: TurnBasedBotStructure

Bot structure implementation for text chat.

Source code in plugins/intentional-text-chat/src/intentional_text_chat/bot_structure.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
class TextChatBotStructure(TurnBasedBotStructure):
    """
    Bot structure implementation for text chat.
    """

    name = "text_chat"

    def __init__(self, config: Dict[str, Any], intent_router: IntentRouter):
        """
        Args:
            config:
                The configuration dictionary for the bot structure.
        """
        super().__init__()
        logger.debug("Loading TextChatBotStructure from config: %s", config)

        # Init the model client
        llm_config = config.pop("llm", None)
        if not llm_config:
            raise ValueError("TextChatBotStructure requires a 'llm' configuration key to know which model to use.")
        self.model: TurnBasedModelClient = load_model_client_from_dict(
            parent=self, intent_router=intent_router, config=llm_config
        )

    async def connect(self) -> None:
        await self.model.connect()

    async def disconnect(self) -> None:
        await self.model.disconnect()

    async def run(self) -> None:
        """
        Main loop for the bot.
        """
        await self.model.run()

    async def send(self, data: Dict[str, Any]) -> AsyncGenerator[Dict[str, Any], None]:
        """
        Sends a message to the model and forward the response.

        Args:
            data: The message to send to the model in OpenAI format, like {"role": "user", "content": "Hello!"}
        """
        await self.model.send({"text_message": data})

    async def handle_interruption(self, lenght_to_interruption: int) -> None:
        """
        Handle an interruption in the streaming.

        Args:
            lenght_to_interruption: The length of the data that was produced to the user before the interruption.
                This value could be number of characters, number of words, milliseconds, number of audio frames, etc.
                depending on the bot structure that implements it.
        """
        logger.warning("TODO! Interruption not yet supported in text chat bot structure.")
__init__(config, intent_router) #

Parameters:

Name Type Description Default
config Dict[str, Any]

The configuration dictionary for the bot structure.

required
Source code in plugins/intentional-text-chat/src/intentional_text_chat/bot_structure.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
def __init__(self, config: Dict[str, Any], intent_router: IntentRouter):
    """
    Args:
        config:
            The configuration dictionary for the bot structure.
    """
    super().__init__()
    logger.debug("Loading TextChatBotStructure from config: %s", config)

    # Init the model client
    llm_config = config.pop("llm", None)
    if not llm_config:
        raise ValueError("TextChatBotStructure requires a 'llm' configuration key to know which model to use.")
    self.model: TurnBasedModelClient = load_model_client_from_dict(
        parent=self, intent_router=intent_router, config=llm_config
    )
handle_interruption(lenght_to_interruption) async #

Handle an interruption in the streaming.

Parameters:

Name Type Description Default
lenght_to_interruption int

The length of the data that was produced to the user before the interruption. This value could be number of characters, number of words, milliseconds, number of audio frames, etc. depending on the bot structure that implements it.

required
Source code in plugins/intentional-text-chat/src/intentional_text_chat/bot_structure.py
60
61
62
63
64
65
66
67
68
69
async def handle_interruption(self, lenght_to_interruption: int) -> None:
    """
    Handle an interruption in the streaming.

    Args:
        lenght_to_interruption: The length of the data that was produced to the user before the interruption.
            This value could be number of characters, number of words, milliseconds, number of audio frames, etc.
            depending on the bot structure that implements it.
    """
    logger.warning("TODO! Interruption not yet supported in text chat bot structure.")
run() async #

Main loop for the bot.

Source code in plugins/intentional-text-chat/src/intentional_text_chat/bot_structure.py
45
46
47
48
49
async def run(self) -> None:
    """
    Main loop for the bot.
    """
    await self.model.run()
send(data) async #

Sends a message to the model and forward the response.

Parameters:

Name Type Description Default
data Dict[str, Any]

The message to send to the model in OpenAI format, like {"role": "user", "content": "Hello!"}

required
Source code in plugins/intentional-text-chat/src/intentional_text_chat/bot_structure.py
51
52
53
54
55
56
57
58
async def send(self, data: Dict[str, Any]) -> AsyncGenerator[Dict[str, Any], None]:
    """
    Sends a message to the model and forward the response.

    Args:
        data: The message to send to the model in OpenAI format, like {"role": "user", "content": "Hello!"}
    """
    await self.model.send({"text_message": data})