first commit
This commit is contained in:
260
agent-livekit/test_enhanced_logging.py
Normal file
260
agent-livekit/test_enhanced_logging.py
Normal file
@@ -0,0 +1,260 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Enhanced Logging and Browser Action Debugging
|
||||
|
||||
This script tests the enhanced selector logging and debugging features
|
||||
to ensure they work correctly and help troubleshoot browser automation issues.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
from mcp_chrome_client import MCPChromeClient
|
||||
from debug_utils import SelectorDebugger, BrowserStateMonitor
|
||||
|
||||
# Configure logging to see all the enhanced logging output
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.StreamHandler(sys.stdout),
|
||||
logging.FileHandler('enhanced_logging_test.log')
|
||||
]
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def test_enhanced_logging():
|
||||
"""Test the enhanced logging functionality"""
|
||||
|
||||
print("🚀 Testing Enhanced Selector Logging and Browser Action Debugging")
|
||||
print("=" * 70)
|
||||
|
||||
# Configuration for MCP Chrome client
|
||||
config = {
|
||||
'mcp_server_type': 'http',
|
||||
'mcp_server_url': 'http://localhost:3000/mcp',
|
||||
'mcp_server_command': '',
|
||||
'mcp_server_args': []
|
||||
}
|
||||
|
||||
client = MCPChromeClient(config)
|
||||
debugger = SelectorDebugger(client, logger)
|
||||
monitor = BrowserStateMonitor(client, logger)
|
||||
|
||||
try:
|
||||
# Test 1: Connection and Browser Validation
|
||||
print("\n📡 Test 1: Connection and Browser Validation")
|
||||
print("-" * 50)
|
||||
|
||||
await client.connect()
|
||||
print("✅ Connected to MCP server")
|
||||
|
||||
validation_result = await client.validate_browser_connection()
|
||||
print(f"📊 Browser validation: {json.dumps(validation_result, indent=2)}")
|
||||
|
||||
# Test 2: Enhanced Voice Command Logging
|
||||
print("\n🎤 Test 2: Enhanced Voice Command Logging")
|
||||
print("-" * 50)
|
||||
|
||||
test_commands = [
|
||||
"click login button",
|
||||
"click sign in",
|
||||
"click submit",
|
||||
"click search button",
|
||||
"click login"
|
||||
]
|
||||
|
||||
for command in test_commands:
|
||||
print(f"\n🔍 Testing command: '{command}'")
|
||||
print("📝 Watch the logs for enhanced selector discovery details...")
|
||||
|
||||
try:
|
||||
result = await client.execute_voice_command(command)
|
||||
print(f"✅ Command result: {result}")
|
||||
except Exception as e:
|
||||
print(f"❌ Command failed: {e}")
|
||||
|
||||
# Test 3: Debug Voice Command Step-by-Step
|
||||
print("\n🔧 Test 3: Debug Voice Command Step-by-Step")
|
||||
print("-" * 50)
|
||||
|
||||
debug_command = "click login button"
|
||||
print(f"🔍 Debugging command: '{debug_command}'")
|
||||
|
||||
debug_result = await debugger.debug_voice_command(debug_command)
|
||||
print(f"📊 Debug results:\n{json.dumps(debug_result, indent=2, default=str)}")
|
||||
|
||||
# Test 4: Browser State Monitoring
|
||||
print("\n📊 Test 4: Browser State Monitoring")
|
||||
print("-" * 50)
|
||||
|
||||
state = await monitor.capture_state()
|
||||
issues = monitor.detect_issues(state)
|
||||
|
||||
print(f"📋 Browser state: {json.dumps(state, indent=2, default=str)}")
|
||||
print(f"⚠️ Detected issues: {issues}")
|
||||
|
||||
# Test 5: Selector Testing
|
||||
print("\n🎯 Test 5: Selector Testing")
|
||||
print("-" * 50)
|
||||
|
||||
common_login_selectors = [
|
||||
"button[type='submit']",
|
||||
"input[type='submit']",
|
||||
".login-button",
|
||||
"#login-button",
|
||||
"#loginButton",
|
||||
"button:contains('Login')",
|
||||
"button:contains('Sign In')",
|
||||
"[aria-label*='login']",
|
||||
".btn-login",
|
||||
"button.login"
|
||||
]
|
||||
|
||||
selector_test_results = await debugger.test_common_selectors(common_login_selectors)
|
||||
print(f"🔍 Selector test results:\n{json.dumps(selector_test_results, indent=2, default=str)}")
|
||||
|
||||
# Test 6: Enhanced Smart Click with Detailed Logging
|
||||
print("\n🖱️ Test 6: Enhanced Smart Click with Detailed Logging")
|
||||
print("-" * 50)
|
||||
|
||||
click_targets = [
|
||||
"login",
|
||||
"sign in",
|
||||
"submit",
|
||||
"search",
|
||||
"button"
|
||||
]
|
||||
|
||||
for target in click_targets:
|
||||
print(f"\n🎯 Testing smart click on: '{target}'")
|
||||
print("📝 Watch for detailed selector discovery and execution logs...")
|
||||
|
||||
try:
|
||||
result = await client._smart_click_mcp(target)
|
||||
print(f"✅ Smart click result: {result}")
|
||||
except Exception as e:
|
||||
print(f"❌ Smart click failed: {e}")
|
||||
|
||||
# Test 7: Debug Summary
|
||||
print("\n📈 Test 7: Debug Summary")
|
||||
print("-" * 50)
|
||||
|
||||
summary = debugger.get_debug_summary()
|
||||
print(f"📊 Debug summary:\n{json.dumps(summary, indent=2, default=str)}")
|
||||
|
||||
# Test 8: Export Debug Log
|
||||
print("\n💾 Test 8: Export Debug Log")
|
||||
print("-" * 50)
|
||||
|
||||
log_filename = debugger.export_debug_log()
|
||||
print(f"📁 Debug log exported to: {log_filename}")
|
||||
|
||||
print("\n✅ All tests completed successfully!")
|
||||
print("📝 Check the log files for detailed output:")
|
||||
print(" - enhanced_logging_test.log (main test log)")
|
||||
print(f" - {log_filename} (debug session export)")
|
||||
|
||||
except Exception as e:
|
||||
print(f"💥 Test failed: {e}")
|
||||
logger.exception("Test failed with exception")
|
||||
|
||||
finally:
|
||||
try:
|
||||
await client.disconnect()
|
||||
print("🔌 Disconnected from MCP server")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Cleanup warning: {e}")
|
||||
|
||||
|
||||
async def test_specific_scenario():
|
||||
"""Test the specific 'click login button' scenario that was reported"""
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("🎯 SPECIFIC SCENARIO TEST: 'Click Login Button'")
|
||||
print("=" * 70)
|
||||
|
||||
config = {
|
||||
'mcp_server_type': 'http',
|
||||
'mcp_server_url': 'http://localhost:3000/mcp',
|
||||
'mcp_server_command': '',
|
||||
'mcp_server_args': []
|
||||
}
|
||||
|
||||
client = MCPChromeClient(config)
|
||||
debugger = SelectorDebugger(client, logger)
|
||||
|
||||
try:
|
||||
await client.connect()
|
||||
|
||||
# Step 1: Validate browser connection
|
||||
print("\n📡 Step 1: Validating browser connection...")
|
||||
validation = await client.validate_browser_connection()
|
||||
|
||||
if not validation.get("browser_responsive"):
|
||||
print("❌ Browser is not responsive - this could be the issue!")
|
||||
return
|
||||
|
||||
print("✅ Browser is responsive")
|
||||
|
||||
# Step 2: Debug the specific command
|
||||
print("\n🔍 Step 2: Debugging 'click login button' command...")
|
||||
debug_result = await debugger.debug_voice_command("click login button")
|
||||
|
||||
print("📊 Debug Analysis:")
|
||||
print(f" Command parsed: {debug_result.get('steps', [{}])[0].get('success', False)}")
|
||||
|
||||
selector_step = next((step for step in debug_result.get('steps', []) if step.get('step') == 'selector_discovery'), None)
|
||||
if selector_step:
|
||||
print(f" Selectors found: {selector_step.get('selectors_found', False)}")
|
||||
print(f" Matching elements: {len(selector_step.get('matching_elements', []))}")
|
||||
if selector_step.get('matching_elements'):
|
||||
best_selector = selector_step['matching_elements'][0]['selector']
|
||||
print(f" Best selector: {best_selector}")
|
||||
|
||||
execution_step = next((step for step in debug_result.get('steps', []) if step.get('step') == 'action_execution'), None)
|
||||
if execution_step:
|
||||
print(f" Execution successful: {execution_step.get('success', False)}")
|
||||
if execution_step.get('errors'):
|
||||
print(f" Execution errors: {execution_step['errors']}")
|
||||
|
||||
# Step 3: Test the actual command with enhanced logging
|
||||
print("\n🚀 Step 3: Executing 'click login button' with enhanced logging...")
|
||||
result = await client.execute_voice_command("click login button")
|
||||
print(f"📝 Final result: {result}")
|
||||
|
||||
# Step 4: Analyze what happened
|
||||
print("\n📈 Step 4: Analysis and Recommendations")
|
||||
if "success" in result.lower() or "clicked" in result.lower():
|
||||
print("✅ SUCCESS: The command executed successfully!")
|
||||
print("🎉 The enhanced logging helped identify and resolve the issue.")
|
||||
else:
|
||||
print("❌ ISSUE PERSISTS: The command still failed.")
|
||||
print("🔍 Recommendations:")
|
||||
print(" 1. Check if the page has login buttons")
|
||||
print(" 2. Verify MCP server is properly connected to browser")
|
||||
print(" 3. Check browser console for JavaScript errors")
|
||||
print(" 4. Try more specific selectors")
|
||||
|
||||
except Exception as e:
|
||||
print(f"💥 Specific scenario test failed: {e}")
|
||||
logger.exception("Specific scenario test failed")
|
||||
|
||||
finally:
|
||||
try:
|
||||
await client.disconnect()
|
||||
except Exception as e:
|
||||
print(f"⚠️ Cleanup warning: {e}")
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main test function"""
|
||||
await test_enhanced_logging()
|
||||
await test_specific_scenario()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
Reference in New Issue
Block a user