AI Integration Guide
Learn how to give your AI agents the power to control terminals and manage infrastructure.
💡 Why AI Needs Termitty
Current AI coding assistants can write code but struggle with deployment and server management. Termitty bridges this gap by providing AI-friendly terminal control with structured output, smart waiting, and interactive prompt handling.
Core Concepts for AI
1. Structured State
Unlike traditional SSH libraries, Termitty provides structured output that AI can understand:
# AI gets structured data, not just text
state = session.state.terminal.get_structured_state()
# Returns:
{
"screen": {
"text": "user@server:~$ █",
"cursor": {"row": 0, "col": 15},
"size": {"rows": 24, "cols": 80}
},
"detected_elements": [
{"type": "prompt", "text": "user@server:~$", "position": {"row": 0, "col": 0}},
{"type": "cursor", "position": {"row": 0, "col": 15}}
],
"context": {
"working_directory": "/home/user",
"last_command": "ls -la",
"environment": {"USER": "user", "HOME": "/home/user"}
}
}
2. Smart Waiting for AI
AI agents can wait for specific conditions instead of guessing with timeouts:
# AI doesn't need to guess when commands complete
session.execute('./build.sh', wait=False)
# Wait for specific conditions
session.wait_until(OutputContains('Build successful'))
session.wait_until(PromptReady())
# Custom conditions for AI logic
session.wait_until(
lambda s: 'error' not in s.terminal.get_screen_text().lower(),
timeout=300
)
LangChain Integration
Create a Termitty tool for LangChain agents to control servers:
from langchain.tools import Tool
from langchain.agents import initialize_agent
from langchain.llms import OpenAI
from termitty import TermittySession
import json
class TermittyTool:
def __init__(self, host, username, key_file):
self.host = host
self.username = username
self.key_file = key_file
self.session = None
def connect(self):
self.session = TermittySession()
self.session.connect(self.host, username=self.username, key_file=self.key_file)
def execute_command(self, command: str) -> str:
"""Execute a command on the remote server and return structured output"""
if not self.session:
self.connect()
result = self.session.execute(command)
state = self.session.state.terminal.get_structured_state()
return json.dumps({
"output": result.output,
"exit_code": result.exit_code,
"screen_state": state,
"success": result.exit_code == 0
})
def handle_interactive(self, commands: list) -> str:
"""Handle interactive terminal sessions"""
if not self.session:
self.connect()
responses = []
with self.session.interactive_shell() as shell:
for cmd in commands:
if cmd.startswith('WAIT:'):
shell.wait_for_text(cmd[5:])
elif cmd.startswith('KEY:'):
shell.send_key(cmd[4:])
else:
shell.send_line(cmd)
responses.append(shell.get_screen_text())
return json.dumps(responses)
# Create LangChain tools
termitty_tool = TermittyTool('prod-server', 'deploy', '~/.ssh/id_rsa')
tools = [
Tool(
name="ServerCommand",
func=termitty_tool.execute_command,
description="Execute commands on production server. Input: command string"
),
Tool(
name="InteractiveTerminal",
func=termitty_tool.handle_interactive,
description="Handle interactive terminal sessions. Input: list of commands/actions"
)
]
# Initialize agent
llm = OpenAI(temperature=0)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description")
# AI agent can now manage servers!
result = agent.run("""
Check if nginx is running on the production server.
If it's not running, start it and verify it's working.
""")
OpenAI Function Calling
Use Termitty with OpenAI's function calling for structured server management:
import openai
from termitty import TermittySession
import json
# Define functions for OpenAI
functions = [
{
"name": "execute_server_command",
"description": "Execute a command on a remote server via SSH",
"parameters": {
"type": "object",
"properties": {
"server": {"type": "string", "description": "Server hostname"},
"command": {"type": "string", "description": "Command to execute"},
"wait_for": {"type": "string", "description": "Text to wait for (optional)"}
},
"required": ["server", "command"]
}
},
{
"name": "deploy_application",
"description": "Deploy an application with intelligent monitoring",
"parameters": {
"type": "object",
"properties": {
"server": {"type": "string"},
"app_name": {"type": "string"},
"version": {"type": "string"}
},
"required": ["server", "app_name", "version"]
}
}
]
def execute_server_command(server, command, wait_for=None):
with TermittySession() as session:
session.connect(server, username='deploy', key_file='~/.ssh/id_rsa')
if wait_for:
session.execute(command, wait=False)
session.wait_until(OutputContains(wait_for))
result = {"status": "completed", "waited_for": wait_for}
else:
result = session.execute(command)
result = {
"output": result.output,
"exit_code": result.exit_code,
"success": result.exit_code == 0
}
result["terminal_state"] = session.state.terminal.get_structured_state()
return json.dumps(result)
def deploy_application(server, app_name, version):
with TermittySession() as session:
session.connect(server, username='deploy', key_file='~/.ssh/id_rsa')
# AI-driven deployment with monitoring
session.execute(f'./deploy.sh {app_name} {version}', wait=False)
# Monitor deployment progress
while True:
if session.wait_until(OutputContains('error'), timeout=5):
# Capture error state for AI analysis
error_state = session.state.terminal.get_structured_state()
return json.dumps({
"status": "error",
"error_state": error_state,
"screen": session.state.terminal.get_screen_text()
})
elif session.wait_until(OutputContains('Deployment complete')):
break
# Verify deployment
health = session.execute(f'curl -s localhost/health')
return json.dumps({
"status": "success",
"health_check": health.output,
"version_deployed": version
})
# Use with OpenAI
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "user", "content": "Deploy version 2.1.0 of the web app to production"}
],
functions=functions,
function_call="auto"
)
# Execute the function call
if response.choices[0].message.get("function_call"):
function_name = response.choices[0].message["function_call"]["name"]
function_args = json.loads(response.choices[0].message["function_call"]["arguments"])
if function_name == "deploy_application":
result = deploy_application(**function_args)
elif function_name == "execute_server_command":
result = execute_server_command(**function_args)
Autonomous AI Agents
Build fully autonomous AI agents that can manage infrastructure:
class AutonomousDevOpsAgent:
def __init__(self, ai_model, servers):
self.ai = ai_model
self.servers = servers
self.sessions = {}
def connect_all(self):
"""Connect to all managed servers"""
for server in self.servers:
session = TermittySession()
session.connect(server['host'],
username=server['user'],
key_file=server['key'])
self.sessions[server['name']] = session
def monitor_and_fix(self):
"""Continuously monitor and fix issues"""
while True:
for name, session in self.sessions.items():
# Check system health
health = self.check_system_health(session)
if health['issues']:
# AI analyzes the issues
analysis = self.ai.analyze(json.dumps(health))
# AI decides on actions
actions = analysis['recommended_actions']
for action in actions:
self.execute_fix(session, action)
# Verify fix worked
new_health = self.check_system_health(session)
if new_health['healthy']:
self.log_success(name, action)
else:
# Escalate to human or try alternative
self.escalate(name, health, action)
time.sleep(60) # Check every minute
def check_system_health(self, session):
"""Get comprehensive system state"""
checks = {
'cpu': session.execute('top -bn1 | head -5'),
'memory': session.execute('free -h'),
'disk': session.execute('df -h'),
'services': session.execute('systemctl list-units --failed'),
'errors': session.execute('journalctl -p err -n 50')
}
# Get terminal state for AI context
state = session.state.terminal.get_structured_state()
return {
'checks': {k: v.output for k, v in checks.items()},
'terminal_state': state,
'issues': self.detect_issues(checks),
'healthy': not bool(self.detect_issues(checks))
}
def execute_fix(self, session, action):
"""Execute AI-recommended fix with safety checks"""
# Record for audit and learning
session.start_recording(f'fix_{time.time()}.json')
try:
# Execute with monitoring
result = session.execute(action['command'])
# Handle any prompts
if action.get('expect_prompts'):
for prompt in action['expect_prompts']:
if session.wait_until(TextContains(prompt['text']), timeout=5):
session.send_line(prompt['response'])
# Verify success
if action.get('verify_command'):
verify = session.execute(action['verify_command'])
action['verified'] = action['success_text'] in verify.output
finally:
session.stop_recording()
return action
# Initialize and run the agent
agent = AutonomousDevOpsAgent(
ai_model=YourAIModel(),
servers=[
{'name': 'web-01', 'host': 'web1.example.com', 'user': 'ops', 'key': '~/.ssh/id_rsa'},
{'name': 'db-01', 'host': 'db1.example.com', 'user': 'ops', 'key': '~/.ssh/id_rsa'},
]
)
agent.connect_all()
agent.monitor_and_fix() # Runs continuously
More AI Examples
AI-Powered Debugging
class AIDebugger:
def debug_issue(self, server, issue_description):
session = TermittySession()
session.connect(server, username='debug', key_file='~/.ssh/id_rsa')
# Start recording for learning
session.start_recording('debug_session.json')
# AI creates investigation plan
plan = self.ai.create_debug_plan(issue_description)
findings = []
for step in plan['steps']:
# Execute investigation command
result = session.execute(step['command'])
# AI analyzes output
analysis = self.ai.analyze_output({
'command': step['command'],
'output': result.output,
'terminal_state': session.state.terminal.get_structured_state()
})
findings.append(analysis)
# If root cause found, attempt fix
if analysis['root_cause_found']:
fix = self.ai.generate_fix(analysis)
session.execute(fix['command'])
# Verify fix
verify = session.execute(fix['verify_command'])
if fix['success_indicator'] in verify.output:
break
session.stop_recording()
return findings
Training AI on Expert Actions
# Record expert solving a problem
with TermittySession() as session:
session.connect('server.com', username='expert', key_file='~/.ssh/id_rsa')
session.start_recording('expert_nginx_fix.json')
# Expert performs debugging and fix
session.execute('systemctl status nginx')
session.execute('nginx -t')
session.execute('vim /etc/nginx/nginx.conf')
# ... expert fixes issue ...
session.execute('systemctl restart nginx')
recording = session.stop_recording()
# Train AI on expert recordings
training_data = []
for recording_file in glob.glob('recordings/*.json'):
with open(recording_file) as f:
recording = json.load(f)
# Extract state-action pairs
for i, action in enumerate(recording['actions']):
training_data.append({
'state': action['terminal_state_before'],
'action': action['command'],
'result': action['terminal_state_after'],
'success': action.get('success', True)
})
# Use for fine-tuning or few-shot learning
ai_model.train_on_examples(training_data)
Best Practices for AI Integration
1. Always Use Structured State
Instead of parsing raw text output, use get_structured_state()
to give AI consistent, structured data it can reason about.
2. Implement Safety Checks
Always verify AI-generated commands before execution. Use dry-run modes and verify commands match expected patterns.
3. Record Everything
Use session recording to create training data and audit trails. This helps improve AI performance and provides debugging information.
4. Handle Errors Gracefully
Implement retry logic and fallback strategies. AI should know when to escalate to humans or try alternative approaches.
Next Steps
Ready to Build AI-Powered DevOps?
Join the future of infrastructure automation with AI agents powered by Termitty.