Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • intheflow/signal-bot
1 result
Show changes
Commits on Source (2)
......@@ -13,3 +13,4 @@ wheels/
signal-cli-config/
.env
config.yml
......@@ -23,20 +23,14 @@
Follow the instructions in the [ollama repository](https://github.com/ollama/ollama) to set up the service.
5. **Configure the `.env` File**
Create a `.env` file in the root directory of the project and add the necessary configuration variables.
```env
SIGNAL_NUMBER=<your phone number>
# optional
OLLAMA_MODEL=<defaults to llama3.2>
SIGNAL_SERVICE=<defaults to localhost:8080>
OLLAMA_HOST=<defaults to 127.0.0.1:11434>
OLLAMA_CONTEXT=<some custom instruction on how the bot should behave>
BOT_WAKE_WORD=<some wake words the bot should listen to>
5. **Create config file**
```sh
cp config.yml.example config.yml
```
Edit `config.yml` to configure your bot settings, such as personalities, Signal service details, and Ollama configuration.
## Running the Bot
1. **Quick Test**
......
---
signal:
number: "+<your phone number>"
ollama:
# optional
host: "127.0.0.1:11434"
# optional
bot:
personalities:
- name: "Nerd"
trigger: "hey nerd"
model: "llama3.2"
instructions: "You are a nerd, make answers nerdy, include nerdy cliches, make answers patronising, in-deth answers"
example_question: "What is a llm?"
......@@ -29,6 +29,7 @@ exclude = [
"dist",
"build",
".mypy_cache",
"signal-cli-config",
]
line-length = 120
target-version = "py312"
......@@ -44,4 +45,4 @@ python_version = "3.12"
warn_return_any = true
warn_unused_configs = true
show_error_codes = true
exclude = ["env", "venv", ".venv"]
exclude = ["env", "venv", ".venv", "signal-cli-config"]
from signalbot import Command, Context, SignalBot
from .chat import relay_message_to_ollama
from .config import BotConfig, OllamaConfig, SignalConfig
from .config import AppConfig
signal_config = SignalConfig()
ollama_config = OllamaConfig()
bot_config = BotConfig()
config = AppConfig()
class BotCommand(Command):
async def handle(self, c: Context):
if c.message.text and c.message.text.lower().replace(",", "").startswith(bot_config.wake_word):
response = await relay_message_to_ollama(c.message.text, ollama_config.context)
await c.start_typing()
await c.send(response)
await c.stop_typing()
for personality in config.bot.personalities:
msg = c.message.text
if msg and msg.lower().replace(",", "").startswith(personality.trigger):
response = await relay_message_to_ollama(msg, personality.model, personality.instructions)
await c.start_typing()
await c.reply(response)
await c.stop_typing()
if __name__ == "__main__":
bot = SignalBot(
{
"signal_service": signal_config.service,
"phone_number": signal_config.number,
"signal_service": config.signal.service,
"phone_number": config.signal.number,
}
)
bot.register(BotCommand())
......
from typing import cast
import aiohttp
from .config import OllamaConfig
from .config import AppConfig
ollama_config = OllamaConfig()
config = AppConfig()
async def relay_message_to_ollama(message: str, context: str = "") -> str:
url = f"http://{ollama_config.host}/api/generate/"
async def relay_message_to_ollama(message: str, model: str, instructions: str = "") -> str:
url = f"http://{config.ollama.host}/api/generate/"
payload = {
"model": ollama_config.model,
"prompt": f"{context}\n{message}",
"model": model,
"prompt": f"{instructions}\n{message}",
"stream": False,
}
headers = {"Content-Type": "application/json"}
......@@ -19,13 +21,16 @@ async def relay_message_to_ollama(message: str, context: str = "") -> str:
response.raise_for_status()
data = await response.json()
return data.get("response", "")
return cast(str, data.get("response", ""))
# Example usage
if __name__ == "__main__":
import asyncio
message = "Why is the sky blue?"
response = asyncio.run(relay_message_to_ollama(message, ollama_config.context))
print(response)
for personality in config.bot.personalities:
print(f"You: {personality.example_question}\n") # noqa: T201
response = asyncio.run(
relay_message_to_ollama(personality.example_question, personality.model, personality.instructions)
)
print(f"{personality.name}: {response}\n{'#' * 80}\n") # noqa: T201
from confz import BaseConfig, EnvSource
from typing import ClassVar
from confz import BaseConfig, ConfigSource, FileSource
from pydantic import BaseModel
class BotConfig(BaseConfig):
wake_word: str = "hey ollama"
CONFIG_SOURCES = [EnvSource(file=".env", prefix="BOT_", allow_all=True)]
class PersonalityConfig(BaseModel):
trigger: str = "hey ollama"
name: str = "Ollama"
model: str = "llama3.2"
instructions: str = """
You are a Signal AI bot.
You generate chat replies based on the prompt you receive.
Keep your responses short, relevant and respectful.
"""
example_question: str = "Why is the sky blue?"
class BotConfig(BaseModel):
personalities: list[PersonalityConfig] = [PersonalityConfig()]
class SignalConfig(BaseConfig):
class SignalConfig(BaseModel):
number: str
service: str = "127.0.0.1:8080"
CONFIG_SOURCES = [EnvSource(file=".env", prefix="SIGNAL_", allow_all=True)]
class OllamaConfig(BaseConfig):
class OllamaConfig(BaseModel):
host: str = "127.0.0.1:11434"
model: str = "llama3.2"
context: str = ""
CONFIG_SOURCES = [EnvSource(file=".env", prefix="OLLAMA_", allow_all=True)]
class AppConfig(BaseConfig):
bot: BotConfig
signal: SignalConfig
ollama: OllamaConfig
CONFIG_SOURCES: ClassVar[list[ConfigSource]] = [FileSource(file="config.yml")]