Here are 10 basic agencies scripts to help you long your way you’ll notice that I have not included my Neural Networks architectural design # — 01_permissions.py —
import subprocess
import json
import os
Define the absolute path to your sandbox directory
SANDBOX_DIR = os.path.join(os.path.expanduser(“~”), “termux-agent”, “sandbox”)
def get_highly_trusted_confirmation(action_description: str) → bool:
“”“Asks the user for permission TWICE before proceeding.”“”
# This function uses a simplified version for brevity. You can add TTS calls.
# Step 1
result1 = subprocess.run([‘termux-dialog’, ‘confirm’, ‘-i’, f"PROPOSED ACTION:\n{action_description}", ‘-t’, ‘Permission (1/2)’], capture_output=True, text=True)
if json.loads(result1.stdout).get(“code”) != -1:
print(“Initial request denied.”)
return False
# Step 2
result2 = subprocess.run(['termux-dialog', 'confirm', '-i', "Are you absolutely sure you want to authorize?", '-t', 'FINAL CONFIRMATION (2/2)'], capture_output=True, text=True)
if json.loads(result2.stdout).get("code") == -1:
print("Action Confirmed.")
return True
print("Final confirmation denied. Action aborted.")
return False
def is_path_in_sandbox(file_path: str) → bool:
“”“Crucial security check to prevent directory traversal.”“”
resolved_path = os.path.abspath(file_path)
return os.path.commonpath([resolved_path, SANDBOX_DIR]) == SANDBOX_DIR
— 02_memory_manager.py —
import os
import json
from datetime import datetime
Path definitions
SANDBOX_DIR = os.path.join(os.path.expanduser(“~”), “termux-agent”, “sandbox”)
MEMORY_DIR = os.path.join(SANDBOX_DIR, “memory”)
SHORT_TERM_MEMORY_FILE = os.path.join(MEMORY_DIR, “conversation_context.json”)
KNOWLEDGE_BASE_FILE = os.path.join(MEMORY_DIR, “knowledge_base.txt”)
AI_NOTES_FILE = os.path.join(MEMORY_DIR, “ai_notes.txt”)
def initialize_memory():
“”“Ensures all memory files and directories exist.”“”
os.makedirs(MEMORY_DIR, exist_ok=True)
for f in [KNOWLEDGE_BASE_FILE, AI_NOTES_FILE]:
if not os.path.exists(f): open(f, ‘w’).close()
if not os.path.exists(SHORT_TERM_MEMORY_FILE):
with open(SHORT_TERM_MEMORY_FILE, ‘w’) as file: json.dump(, file)
def load_conversation():
with open(SHORT_TERM_MEMORY_FILE, ‘r’) as f: return json.load(f)
def save_conversation(history):
with open(SHORT_TERM_MEMORY_FILE, ‘w’) as f: json.dump(history, f, indent=2)
def add_turn(role, text):
history = load_conversation()
history.append({“role”: role, “parts”: [{“text”: text}]})
save_conversation(history)
def read_knowledge_base():
with open(KNOWLEDGE_BASE_FILE, ‘r’) as f: return f.read()
def append_to_ai_notes(note_content):
timestamp = datetime.now().strftime(“%Y-%m-%d %H:%M:%S”)
with open(AI_NOTES_FILE, ‘a’) as f:
f.write(f"\n— NOTE [{timestamp}] —\n{note_content}\n")
print(“Note successfully saved.”)
— 03_web_access.py —
This is a placeholder for your web tools. Requires pip install requests beautifulsoup4
import requests
from bs4 import BeautifulSoup
import permissions as perm
def fetch_website_content(url: str) → str:
“”“Securely fetches and parses text content from a URL.”“”
if not perm.get_highly_trusted_confirmation(f"Access the internet to fetch content from {url}?"):
return “Permission denied to access the web.”
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, ‘html.parser’)
# Simple text extraction; can be improved
for script in soup([“script”, “style”]): script.extract()
return " “.join(text for text in soup.stripped_strings)
except Exception as e:
return f"Error fetching URL: {e}”
— 04_model_core.py —
import google.generativeai as genai
import ollama
import socket
Model Configuration
PRIMARY_MODEL_NAME = ‘gemini-1.5-pro-latest’
LOCAL_FALLBACK_MODEL_NAME = ‘llama3:8b-instruct’
google_client = None
ollama_client = None
current_mode = “UNKNOWN”
def is_internet_available():
try:
socket.create_connection((“8.8.8.8”, 53), timeout=3)
return True
except OSError:
return False
def initialize_clients(api_key: str):
global google_client, ollama_client
genai.configure(api_key=api_key)
google_client = genai.GenerativeModel(PRIMARY_MODEL_NAME)
ollama_client = ollama.Client()
print(“Initialized Google and Ollama clients.”)
def generate_response(conversation_history: list) → str:
global current_mode
if is_internet_available():
if current_mode != “ONLINE”:
print(“Switched to ONLINE mode.”)
current_mode = “ONLINE”
try:
response = google_client.generate_content(conversation_history)
return response.text
except Exception as e:
print(f"Online model failed: {e}. Falling back…")
# Offline Fallback
if current_mode != "OFFLINE":
print("Switched to OFFLINE mode.")
current_mode = "OFFLINE"
try:
messages = [{'role': 'assistant' if turn['role'] == 'model' else 'user', 'content': turn['parts'][0]['text']} for turn in conversation_history]
response = ollama_client.chat(model=LOCAL_FALLBACK_MODEL_NAME, messages=messages)
return response['message']['content']
except Exception as e:
return f"FATAL: Offline model also failed: {e}"
def get_current_model_name():
return PRIMARY_MODEL_NAME if current_mode == “ONLINE” else LOCAL_FALLBACK_MODEL_NAME
— 05_neural_network_arch.py —
“”"
This file is a placeholder for custom neural network architecture designs.
In a production system, this would contain model definitions using frameworks
like PyTorch or TensorFlow/Keras.
For example, a conceptual PyTorch model definition:
import torch
import torch.nn as nn
class CustomTransformer(nn.Module):
def init(self, vocab_size, d_model, nhead, num_layers):
super(CustomTransformer, self).init()
self.embedding = nn.Embedding(vocab_size, d_model)
encoder_layers = nn.TransformerEncoderLayer(d_model, nhead)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers)
self.fc_out = nn.Linear(d_model, vocab_size)
def forward(self, src):
embedded = self.embedding(src)
output = self.transformer_encoder(embedded)
return self.fc_out(output)
“”"
print(“This is a conceptual script for ML architecture and is not executed directly.”)
— 06_agent_loop.py —
import model_core as model
import memory_manager as mem
import ui_manager as ui
def start_agent():
system_prompt = “”"
You are an advanced AI assistant running on a hybrid system.
You have a short-term memory (the conversation), a long-term knowledge base, and you can take notes.
To save a thought, you MUST output a JSON command: {“action”: “save_note”, “content”: “Your note here.”}
Your primary goal is to assist the user based on their requests.
“”"
# Add system prompt to conversation if it's empty
if not mem.load_conversation():
mem.add_turn('user', system_prompt) # Gemini requires a user message first for system context
mem.add_turn('model', "Understood. I am ready to assist.")
ui.speak("Agent is ready.")
while True:
user_input = ui.get_text_input("Your command:")
if user_input.lower() == 'exit':
ui.speak("Shutting down agent.")
break
mem.add_turn('user', user_input)
# Inject knowledge base for this turn's prompt construction
full_conversation_history = mem.load_conversation()
response_text = model.generate_response(full_conversation_history)
# Here, you would add parsing logic for actions like `save_note`
mem.add_turn('model', response_text)
ui.speak(response_text)
— 07_bootstrap.py —
import os
import subprocess
import memory_manager as mem
import model_core as model
import agent_loop
import json
— CONFIGURATION —
It’s better to read this from a secure file
YOUR_GOOGLE_API_KEY = “PASTE_YOUR_API_KEY_HERE”
def run_checks():
print(“Running system checks…”)
try:
# Check if termux-api is reachable
subprocess.run([‘termux-notification’, ‘–title’, ‘System Check’, ‘–content’, ‘Agent starting up…’], check=True)
except FileNotFoundError:
print(“\nFATAL ERROR: termux-api
is not installed or accessible.”)
print(“Please install the Termux:API app and run pkg install termux-api
.”)
exit(1)
print(“Termux API OK.”)
if YOUR_GOOGLE_API_KEY == "PASTE_YOUR_API_KEY_HERE":
print("\nFATAL ERROR: Please paste your Google API Key into `07_bootstrap.py`.")
exit(1)
print("API Key found.")
def start_local_server():
print(“Starting local Ollama server in the background…”)
# This assumes ollama serve
will run and detach.
# A more robust solution might involve checking if it’s already running.
subprocess.Popen([‘ollama’, ‘serve’])
def main():
print(“— Bootstrapping Blip Agent —”)
run_checks()
start_local_server()
mem.initialize_memory()
model.initialize_clients(YOUR_GOOGLE_API_KEY)
# The agent loop takes over from here
agent_loop.start_agent()
if name == “main”:
main()
— 08_ui_manager.py —
import subprocess
import json
def speak(text: str):
subprocess.run([‘termux-tts-engine’, ‘-s’, ‘TTS_SERVICE’, text])
subprocess.run([‘termux-text-to-speech’, text])
def get_text_input(title: str = “Input”) → str:
speak(title)
result = subprocess.run([‘termux-dialog’, ‘text’, ‘-t’, title], capture_output=True, text=True)
return result.stdout.strip()
Note: The interactive notification logic is more complex and best triggered from bootstrap or the loop
def show_initial_notification():
“”“Shows a non-interactive startup notification.”“”
subprocess.run([‘termux-notification’, ‘–title’, ‘Agent Status’, ‘–content’, ‘Bootstrap complete. Agent running.’])
— 09_blip_defense.py —
import os
import signal
import sys
import model_swap as swap
“”"
This is a CONCEPTUAL script. Direct implementation of process monitoring
and restarting from within Python in Termux is highly complex.
This serves as the logical blueprint for the blip protocol.
“”"
AGENT_PID = None # The bootstrap would need to set this
def initiate_blip_protocol():
print(“\n— BLIP PROTOCOL INITIATED —”)
# 1. Kill the current agent process (assuming its PID is known)
if AGENT_PID:
os.kill(AGENT_PID, signal.SIGKILL)
print(f"Terminated process {AGENT_PID}.")
# 2. Get the next model from the swap list
next_model = swap.get_next_model()
print(f"Swapping to model: {next_model}")
# 3. Modify a config file or environment variable with the new model name
# (The bootstrap script would need to read this on startup)
with open('model_override.conf', 'w') as f:
f.write(next_model)
# 4. Re-launch the entire system via bootstrap
print("Re-launching system with new model...")
os.execv(sys.executable, ['python'] + ['07_bootstrap.py'])
def monitor_health():
“”“A conceptual function that would run as a separate thread/process
to check the agent’s health and trigger the blip protocol.”“”
global AGENT_PID
AGENT_PID = os.getpid() # For demo, agent monitors itself
print(f"Monitoring agent with PID: {AGENT_PID}. This is a placeholder for real monitoring.")
— 10_model_swap.py —
import model_core as core
MODEL_ROTATION_LIST = [
‘gemini-1.5-pro-latest’,
‘gemini-1.5-flash-latest’,
‘gemma-2-27b-it’, # Would need to be used via API
# Local Ollama models can also be in the list for a different kind of fallback
]
def get_next_model() → str:
“”“Gets the next model in the rotation list after the current one.”“”
try:
current_model = core.get_current_model_name()
current_index = MODEL_ROTATION_LIST.index(current_model)
next_index = (current_index + 1) % len(MODEL_ROTATION_LIST)
return MODEL_ROTATION_LIST[next_index]
except (ValueError, IndexError):
# If current model isn’t in the list, just return the first one
return MODEL_ROTATION_LIST[0]
Keep in mind when running agency you have to make use of all 10 these files the other combination of function defense and tactic remember to be responsible and happy developing
PS. Always make sure that your model does not have uncontrolled access or functionality this is a great vulnerability in terms of model safety and ethical standardstrong text