Organizes 11 projects for Cerbo GX/Venus OS into a single repository: - axiom-nmea: Raymarine LightHouse protocol decoder - dbus-generator-ramp: Generator current ramp controller - dbus-lightning: Blitzortung lightning monitor - dbus-meteoblue-forecast: Meteoblue weather forecast - dbus-no-foreign-land: noforeignland.com tracking - dbus-tides: Tide prediction from depth + harmonics - dbus-vrm-history: VRM cloud history proxy - dbus-windy-station: Windy.com weather upload - mfd-custom-app: MFD app deployment package - venus-html5-app: Custom Victron HTML5 app fork - watermaker: Watermaker PLC control UI Adds root README, .gitignore, project template, and per-project .gitignore files. Sensitive config files excluded via .gitignore with .example templates provided. Made-with: Cursor
497 lines
16 KiB
Python
497 lines
16 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Field Debugger - Shows all protobuf fields in columns for mapping real-world values.
|
|
|
|
Displays each top-level field as a column, with subfields as rows.
|
|
Updates every few seconds to show value progression over time.
|
|
|
|
Usage:
|
|
python3 field_debugger.py -i 192.168.1.100 # Live capture
|
|
python3 field_debugger.py --pcap capture.pcap # From file
|
|
python3 field_debugger.py --pcap capture.pcap -n 5 # Show 5 snapshots
|
|
"""
|
|
|
|
import struct
|
|
import socket
|
|
import time
|
|
import argparse
|
|
import threading
|
|
import sys
|
|
from datetime import datetime
|
|
from collections import defaultdict
|
|
from typing import Dict, List, Any, Optional
|
|
|
|
# Wire types
|
|
WIRE_VARINT = 0
|
|
WIRE_FIXED64 = 1
|
|
WIRE_LENGTH = 2
|
|
WIRE_FIXED32 = 5
|
|
|
|
HEADER_SIZE = 20
|
|
|
|
MULTICAST_GROUPS = [
|
|
("226.192.206.98", 2561),
|
|
("226.192.206.99", 2562),
|
|
("226.192.206.100", 2563),
|
|
("226.192.206.101", 2564),
|
|
("226.192.206.102", 2565),
|
|
("226.192.219.0", 3221),
|
|
("239.2.1.1", 2154), # May contain tank/engine data
|
|
]
|
|
|
|
|
|
class ProtobufParser:
|
|
"""Parse protobuf without schema."""
|
|
|
|
def __init__(self, data: bytes):
|
|
self.data = data
|
|
self.pos = 0
|
|
|
|
def read_varint(self) -> int:
|
|
result = 0
|
|
shift = 0
|
|
while self.pos < len(self.data):
|
|
byte = self.data[self.pos]
|
|
self.pos += 1
|
|
result |= (byte & 0x7F) << shift
|
|
if not (byte & 0x80):
|
|
break
|
|
shift += 7
|
|
return result
|
|
|
|
def parse(self) -> Dict[int, Any]:
|
|
"""Parse message, return dict of field_num -> (wire_type, value, children)."""
|
|
fields = {}
|
|
while self.pos < len(self.data):
|
|
try:
|
|
start = self.pos
|
|
tag = self.read_varint()
|
|
field_num = tag >> 3
|
|
wire_type = tag & 0x07
|
|
|
|
if field_num == 0 or field_num > 1000:
|
|
break
|
|
|
|
if wire_type == WIRE_VARINT:
|
|
value = self.read_varint()
|
|
children = None
|
|
elif wire_type == WIRE_FIXED64:
|
|
value = self.data[self.pos:self.pos + 8]
|
|
self.pos += 8
|
|
children = None
|
|
elif wire_type == WIRE_LENGTH:
|
|
length = self.read_varint()
|
|
value = self.data[self.pos:self.pos + length]
|
|
self.pos += length
|
|
# Try to parse as nested
|
|
try:
|
|
nested = ProtobufParser(value)
|
|
children = nested.parse()
|
|
if nested.pos < len(value) * 0.5:
|
|
children = None
|
|
except:
|
|
children = None
|
|
elif wire_type == WIRE_FIXED32:
|
|
value = self.data[self.pos:self.pos + 4]
|
|
self.pos += 4
|
|
children = None
|
|
else:
|
|
break
|
|
|
|
fields[field_num] = (wire_type, value, children)
|
|
except:
|
|
break
|
|
return fields
|
|
|
|
|
|
# Known field labels from reverse engineering
|
|
FIELD_LABELS = {
|
|
# Top-level fields
|
|
(1,): "DeviceInfo",
|
|
(2,): "GPS",
|
|
(3,): "HeadingBlock",
|
|
(7,): "DepthBlock",
|
|
(8,): "RateOfTurn",
|
|
(10,): "Unknown10",
|
|
(12,): "Unknown12",
|
|
(13,): "WindNav",
|
|
(14,): "SensorData",
|
|
(21,): "Angles",
|
|
|
|
# Field 1 subfields (Device Info)
|
|
(1, 1): "DeviceName",
|
|
(1, 2): "SerialInfo",
|
|
|
|
# Field 2 subfields (GPS)
|
|
(2, 1): "LATITUDE",
|
|
(2, 2): "LONGITUDE",
|
|
(2, 3): "Unknown",
|
|
(2, 4): "Altitude?",
|
|
(2, 5): "Timestamp?",
|
|
(2, 6): "Distance?",
|
|
|
|
# Field 3 subfields (Heading)
|
|
(3, 1): "HeadingRaw",
|
|
(3, 2): "HEADING",
|
|
|
|
# Field 7 subfields (Depth) - only in larger packets (1472B+)
|
|
(7, 1): "DEPTH_M", # Depth in METERS
|
|
|
|
# Field 8 subfields
|
|
(8, 1): "ROT?",
|
|
(8, 2): "Unknown",
|
|
|
|
# Field 13 subfields (Wind/Navigation) - MAIN SENSOR BLOCK
|
|
(13, 1): "Heading1",
|
|
(13, 2): "Heading2",
|
|
(13, 3): "SmallAngle",
|
|
(13, 4): "TWD", # True Wind Direction
|
|
(13, 5): "TWS", # True Wind Speed
|
|
(13, 6): "AWS", # Apparent Wind Speed?
|
|
(13, 7): "AWD?", # Apparent Wind Direction?
|
|
(13, 8): "Heading1_dup",
|
|
(13, 9): "Heading2_dup",
|
|
(13, 10): "SmallAngle_dup",
|
|
(13, 11): "TWS_dup",
|
|
(13, 12): "AWS_dup",
|
|
(13, 13): "AWD_dup?",
|
|
|
|
# Field 21 subfields
|
|
(21, 1): "Unknown",
|
|
(21, 2): "Angle1",
|
|
(21, 3): "Unknown",
|
|
(21, 4): "Unknown",
|
|
}
|
|
|
|
|
|
def get_label(field_path: tuple) -> str:
|
|
"""Get label for a field path, or empty string if unknown."""
|
|
return FIELD_LABELS.get(field_path, "")
|
|
|
|
|
|
def format_value(wire_type: int, value: Any) -> str:
|
|
"""Format a protobuf value for display."""
|
|
if wire_type == WIRE_VARINT:
|
|
if value > 2**31:
|
|
return f"v:{value} (0x{value:x})"
|
|
return f"v:{value}"
|
|
|
|
elif wire_type == WIRE_FIXED64:
|
|
try:
|
|
d = struct.unpack('<d', value)[0]
|
|
if d != d: # NaN
|
|
return "d:NaN"
|
|
if abs(d) < 0.0001 and d != 0:
|
|
return f"d:{d:.2e}"
|
|
if abs(d) > 10000:
|
|
return f"d:{d:.1f}"
|
|
if -180 <= d <= 180:
|
|
return f"d:{d:.6f}"
|
|
return f"d:{d:.2f}"
|
|
except:
|
|
return f"x:{value.hex()[:16]}"
|
|
|
|
elif wire_type == WIRE_FIXED32:
|
|
try:
|
|
f = struct.unpack('<f', value)[0]
|
|
if f != f: # NaN
|
|
return "f:NaN"
|
|
# Check if could be radians (angle)
|
|
if 0 <= f <= 6.5:
|
|
deg = f * 57.2958
|
|
return f"f:{f:.3f} ({deg:.1f}°)"
|
|
# Could be speed in m/s
|
|
if 0 < f < 50:
|
|
kts = f * 1.94384
|
|
return f"f:{f:.2f} ({kts:.1f}kt)"
|
|
return f"f:{f:.3f}"
|
|
except:
|
|
return f"x:{value.hex()}"
|
|
|
|
elif wire_type == WIRE_LENGTH:
|
|
# Try as string
|
|
try:
|
|
s = value.decode('ascii')
|
|
if all(32 <= ord(c) < 127 for c in s):
|
|
if len(s) > 15:
|
|
return f's:"{s[:12]}..."'
|
|
return f's:"{s}"'
|
|
except:
|
|
pass
|
|
return f"[{len(value)}B]"
|
|
|
|
return "?"
|
|
|
|
|
|
def extract_fields(packet: bytes) -> Optional[Dict[int, Any]]:
|
|
"""Extract all fields from a packet."""
|
|
if len(packet) < HEADER_SIZE + 10:
|
|
return None
|
|
|
|
proto_data = packet[HEADER_SIZE:]
|
|
parser = ProtobufParser(proto_data)
|
|
return parser.parse()
|
|
|
|
|
|
def print_snapshot(fields: Dict[int, Any], timestamp: str, packet_size: int):
|
|
"""Print a snapshot of all fields in columnar format."""
|
|
# Build column data
|
|
columns = {} # field_num -> list of (subfield_path, value_str)
|
|
|
|
def process_field(field_num: int, wire_type: int, value: Any, children: Optional[Dict], prefix: str = ""):
|
|
col_key = field_num
|
|
if col_key not in columns:
|
|
columns[col_key] = []
|
|
|
|
if children:
|
|
# Has subfields
|
|
columns[col_key].append((f"{prefix}", "[msg]"))
|
|
for sub_num, (sub_wt, sub_val, sub_children) in sorted(children.items()):
|
|
label = get_label((field_num, sub_num))
|
|
label_str = f" {label}" if label else ""
|
|
val_str = format_value(sub_wt, sub_val)
|
|
columns[col_key].append((f" .{sub_num}{label_str}", val_str))
|
|
|
|
# Go one level deeper for nested messages
|
|
if sub_children:
|
|
for subsub_num, (subsub_wt, subsub_val, _) in sorted(sub_children.items()):
|
|
val_str2 = format_value(subsub_wt, subsub_val)
|
|
columns[col_key].append((f" .{sub_num}.{subsub_num}", val_str2))
|
|
else:
|
|
val_str = format_value(wire_type, value)
|
|
columns[col_key].append((prefix or "val", val_str))
|
|
|
|
# Process all top-level fields
|
|
for field_num, (wire_type, value, children) in sorted(fields.items()):
|
|
process_field(field_num, wire_type, value, children)
|
|
|
|
# Print header
|
|
print("\n" + "=" * 100)
|
|
print(f" {timestamp} | Packet: {packet_size} bytes | Fields: {len(fields)}")
|
|
print("=" * 100)
|
|
|
|
# Determine column layout
|
|
col_nums = sorted(columns.keys())
|
|
if not col_nums:
|
|
print(" No fields decoded")
|
|
return
|
|
|
|
# Calculate column widths
|
|
col_width = 28
|
|
cols_per_row = min(4, len(col_nums))
|
|
|
|
# Print columns in groups
|
|
for start_idx in range(0, len(col_nums), cols_per_row):
|
|
group_cols = col_nums[start_idx:start_idx + cols_per_row]
|
|
|
|
# Header row with labels
|
|
header = ""
|
|
for col_num in group_cols:
|
|
label = get_label((col_num,))
|
|
if label:
|
|
hdr_text = f"F{col_num} {label}"
|
|
else:
|
|
hdr_text = f"Field {col_num}"
|
|
header += f"| {hdr_text:<{col_width - 1}}"
|
|
print(header + "|")
|
|
print("-" * (len(group_cols) * (col_width + 2) + 1))
|
|
|
|
# Find max rows needed
|
|
max_rows = max(len(columns[c]) for c in group_cols)
|
|
|
|
# Print rows
|
|
for row_idx in range(max_rows):
|
|
row = ""
|
|
for col_num in group_cols:
|
|
col_data = columns[col_num]
|
|
if row_idx < len(col_data):
|
|
path, val = col_data[row_idx]
|
|
cell = f"{path}: {val}"
|
|
if len(cell) > col_width - 1:
|
|
cell = cell[:col_width - 4] + "..."
|
|
row += f"| {cell:<{col_width - 1}}"
|
|
else:
|
|
row += f"| {'':<{col_width - 1}}"
|
|
print(row + "|")
|
|
|
|
print()
|
|
|
|
|
|
def read_pcap(filename: str) -> List[bytes]:
|
|
"""Read packets from pcap file."""
|
|
packets = []
|
|
with open(filename, 'rb') as f:
|
|
header = f.read(24)
|
|
magic = struct.unpack('<I', header[0:4])[0]
|
|
swapped = magic == 0xd4c3b2a1
|
|
endian = '>' if swapped else '<'
|
|
|
|
while True:
|
|
pkt_header = f.read(16)
|
|
if len(pkt_header) < 16:
|
|
break
|
|
ts_sec, ts_usec, incl_len, orig_len = struct.unpack(f'{endian}IIII', pkt_header)
|
|
pkt_data = f.read(incl_len)
|
|
if len(pkt_data) < incl_len:
|
|
break
|
|
|
|
if len(pkt_data) > 42 and pkt_data[12:14] == b'\x08\x00':
|
|
ip_header_len = (pkt_data[14] & 0x0F) * 4
|
|
payload_start = 14 + ip_header_len + 8
|
|
if payload_start < len(pkt_data):
|
|
packets.append((ts_sec + ts_usec / 1e6, pkt_data[payload_start:]))
|
|
return packets
|
|
|
|
|
|
class LiveListener:
|
|
"""Listen for live packets."""
|
|
|
|
def __init__(self, interface_ip: str):
|
|
self.interface_ip = interface_ip
|
|
self.running = False
|
|
self.packets_by_group = {} # (group, port) -> (packet, size)
|
|
self.lock = threading.Lock()
|
|
|
|
def _create_socket(self, group: str, port: int):
|
|
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
|
|
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
if hasattr(socket, 'SO_REUSEPORT'):
|
|
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
|
sock.bind(('', port))
|
|
mreq = struct.pack("4s4s", socket.inet_aton(group), socket.inet_aton(self.interface_ip))
|
|
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
|
|
sock.settimeout(1.0)
|
|
return sock
|
|
|
|
def _listen(self, sock, group: str, port: int):
|
|
key = (group, port)
|
|
while self.running:
|
|
try:
|
|
data, _ = sock.recvfrom(65535)
|
|
# Keep packets with protobuf payload (header + minimal data)
|
|
if len(data) >= 40:
|
|
with self.lock:
|
|
self.packets_by_group[key] = data
|
|
except socket.timeout:
|
|
continue
|
|
except:
|
|
pass
|
|
|
|
def start(self):
|
|
self.running = True
|
|
for group, port in MULTICAST_GROUPS:
|
|
try:
|
|
sock = self._create_socket(group, port)
|
|
t = threading.Thread(target=self._listen, args=(sock, group, port), daemon=True)
|
|
t.start()
|
|
print(f"Listening on {group}:{port}")
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
def get_all_packets(self) -> Dict[tuple, bytes]:
|
|
"""Return dict of (group, port) -> packet for all groups with data."""
|
|
with self.lock:
|
|
return dict(self.packets_by_group)
|
|
|
|
def stop(self):
|
|
self.running = False
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="Field Debugger - Map protobuf fields to real values")
|
|
parser.add_argument('-i', '--interface', help='Interface IP for live capture')
|
|
parser.add_argument('--pcap', help='Read from pcap file')
|
|
parser.add_argument('-n', '--num-snapshots', type=int, default=10, help='Number of snapshots to show')
|
|
parser.add_argument('-t', '--interval', type=float, default=3.0, help='Seconds between snapshots')
|
|
parser.add_argument('-s', '--size', type=int, help='Only show packets of this size')
|
|
args = parser.parse_args()
|
|
|
|
if not args.pcap and not args.interface:
|
|
parser.error("Either --interface or --pcap required")
|
|
|
|
print("Field Debugger - Protobuf Field Mapper")
|
|
print("=" * 50)
|
|
print("Legend:")
|
|
print(" v:N = varint (integer)")
|
|
print(" d:N = double (64-bit float)")
|
|
print(" f:N (X°) = float as radians -> degrees")
|
|
print(" f:N (Xkt) = float as m/s -> knots")
|
|
print(" s:\"...\" = string")
|
|
print(" [NB] = N bytes (nested message)")
|
|
print("=" * 50)
|
|
|
|
if args.pcap:
|
|
# Read from pcap
|
|
print(f"\nReading {args.pcap}...")
|
|
packets = read_pcap(args.pcap)
|
|
print(f"Loaded {len(packets)} packets")
|
|
|
|
# Filter by size if requested
|
|
if args.size:
|
|
packets = [(ts, p) for ts, p in packets if len(p) == args.size]
|
|
print(f"Filtered to {len(packets)} packets of size {args.size}")
|
|
|
|
# Group by size
|
|
by_size = defaultdict(list)
|
|
for ts, pkt in packets:
|
|
by_size[len(pkt)].append((ts, pkt))
|
|
|
|
print(f"\nPacket sizes: {sorted(by_size.keys())}")
|
|
|
|
# Show snapshots from packets with sensor data
|
|
target_sizes = [s for s in sorted(by_size.keys()) if s >= 300]
|
|
if not target_sizes:
|
|
print("No packets >= 300 bytes found")
|
|
return
|
|
|
|
# Pick largest sensor packets
|
|
target_size = target_sizes[-1] if not args.size else args.size
|
|
target_packets = by_size.get(target_size, [])
|
|
|
|
if not target_packets:
|
|
print(f"No packets of size {target_size}")
|
|
return
|
|
|
|
# Show snapshots at intervals through the capture
|
|
step = max(1, len(target_packets) // args.num_snapshots)
|
|
for i in range(0, len(target_packets), step):
|
|
if i // step >= args.num_snapshots:
|
|
break
|
|
ts, pkt = target_packets[i]
|
|
fields = extract_fields(pkt)
|
|
if fields:
|
|
timestamp = datetime.fromtimestamp(ts).strftime("%H:%M:%S.%f")[:-3]
|
|
print_snapshot(fields, timestamp, len(pkt))
|
|
|
|
else:
|
|
# Live capture
|
|
listener = LiveListener(args.interface)
|
|
listener.start()
|
|
|
|
print(f"\nShowing {args.num_snapshots} snapshots, {args.interval}s apart")
|
|
print("Press Ctrl+C to stop\n")
|
|
|
|
try:
|
|
for i in range(args.num_snapshots):
|
|
time.sleep(args.interval)
|
|
all_packets = listener.get_all_packets()
|
|
if all_packets:
|
|
timestamp = datetime.now().strftime("%H:%M:%S.%f")[:-3]
|
|
for (group, port), pkt in sorted(all_packets.items()):
|
|
if args.size and len(pkt) != args.size:
|
|
continue
|
|
fields = extract_fields(pkt)
|
|
if fields:
|
|
header = f"{group}:{port}"
|
|
print_snapshot(fields, f"{timestamp} [{header}]", len(pkt))
|
|
else:
|
|
print(f"[{i+1}] No packets received yet...")
|
|
except KeyboardInterrupt:
|
|
print("\nStopped")
|
|
finally:
|
|
listener.stop()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|