Skip to content

Add BasicType system with type-safe storage and configurable locking #20

Add BasicType system with type-safe storage and configurable locking

Add BasicType system with type-safe storage and configurable locking #20

Workflow file for this run

name: Benchmarks
on:
pull_request:
permissions:
contents: read
pull-requests: write
jobs:
benchmark:
runs-on: macos-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Swift
uses: swift-actions/setup-swift@v2
with:
swift-version: "6.0"
- name: Run benchmarks
id: benchmark
run: |
echo "Running benchmarks..."
swift test --filter testPerformance 2>&1 | tee benchmark_output.txt
- name: Parse and format results
run: |
python3 << 'EOF'
import re
from datetime import datetime
import subprocess
# Get system info
try:
cpu_brand = subprocess.check_output(['sysctl', '-n', 'machdep.cpu.brand_string']).decode().strip()
mem_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']).decode().strip())
mem_gb = mem_bytes / (1024**3)
system_info = f"{cpu_brand}, {mem_gb:.0f} GB RAM"
except:
system_info = "macOS (GitHub Actions)"
# Read benchmark output
with open('benchmark_output.txt', 'r') as f:
content = f.read()
# Find all performance test results
pattern = r"testPerformance(\w+).*?average: ([\d.]+)"
matches = re.findall(pattern, content)
# Start markdown output
output = ["# 🚀 MemoryMap Performance Benchmarks\n"]
output.append("*256-entry capacity with double hashing, `@inline(__always)` optimizations*\n")
output.append(f"**Test Hardware:** {system_info}\n")
# Core operations section
output.append("## Core Operations\n")
output.append("| Operation | Time | Per-Op | Main Thread |")
output.append("|-----------|------|--------|-------------|")
core_ops = {
"Insert": ("Insert", 100),
"LookupHit": ("Lookup (hit)", 100),
"LookupMiss": ("Lookup (miss)", 100),
"Update": ("Update", 100),
"Remove": ("Remove (insert+delete)", 200),
"Contains": ("Contains", 100),
"MixedOperations": ("Mixed operations", 200),
}
for test_name, avg_time in matches:
if test_name in core_ops:
avg_time_f = float(avg_time)
op_name, ops = core_ops[test_name]
total_ms = avg_time_f * 1000
per_op_us = (avg_time_f * 1_000_000) / ops
if per_op_us < 1:
per_op = "<1 μs"
elif per_op_us < 1000:
per_op = f"{per_op_us:.1f} μs"
else:
per_op = f"{per_op_us/1000:.2f} ms"
if total_ms < 10:
status = "✅ Excellent"
elif total_ms < 50:
status = "✅ Good"
elif total_ms < 100:
status = "⚠️ OK"
else:
status = "❌ Review"
output.append(f"| {op_name} | {total_ms:.1f}ms | {per_op} | {status} |")
# Load factor performance
output.append("\n## Load Factor Performance (10,000 lookups)\n")
output.append("| Load % | Time | Degradation | Status |")
output.append("|--------|------|-------------|--------|")
load_factors = {
"LoadFactor25Percent": ("25%", None),
"LoadFactor50Percent": ("50%", None),
"LoadFactor75Percent": ("75%", None),
"LoadFactor90Percent": ("90%", None),
"LoadFactor99Percent": ("99%", None),
}
baseline = None
for test_name, avg_time in matches:
if test_name in load_factors:
avg_time_f = float(avg_time)
load_name = load_factors[test_name][0]
if baseline is None:
baseline = avg_time_f
degradation = "baseline"
else:
ratio = avg_time_f / baseline
degradation = f"{ratio:.1f}x"
total_ms = avg_time_f * 1000
if avg_time_f < 0.050:
status = "✅ Excellent"
elif avg_time_f < 0.100:
status = "✅ Good"
elif avg_time_f < 0.150:
status = "⚠️ OK"
else:
status = "❌ Slow"
output.append(f"| {load_name} | {total_ms:.0f}ms | {degradation} | {status} |")
# Key length impact
output.append("\n## Key Length Impact (100 ops)\n")
output.append("| Key Length | Time | Per-Op |")
output.append("|------------|------|--------|")
key_tests = {
"ShortKeys": "Short (2-3 chars)",
"MediumKeys": "Medium (~25 chars)",
"LongKeys": "Long (64 chars)",
}
for test_name, avg_time in matches:
if test_name in key_tests:
avg_time_f = float(avg_time)
key_name = key_tests[test_name]
total_ms = avg_time_f * 1000
per_op_us = (avg_time_f * 1_000_000) / 100
output.append(f"| {key_name} | {total_ms:.1f}ms | {per_op_us:.1f} μs |")
# Bulk operations
output.append("\n## Bulk Operations\n")
output.append("| Operation | Time | Description |")
output.append("|-----------|------|-------------|")
bulk_ops = {
"Count": ("Count (100 entries)", None),
"Keys": ("Keys iteration (100 entries)", None),
"ToDictionary": ("Convert to Dictionary (100 entries)", None),
"RemoveAll": ("Remove all entries", None),
"LargeBatchWrite": ("Large batch write", None),
}
for test_name, avg_time in matches:
if test_name in bulk_ops:
avg_time_f = float(avg_time)
op_name = bulk_ops[test_name][0]
total_ms = avg_time_f * 1000
if total_ms < 10:
status = "✅ Excellent"
elif total_ms < 50:
status = "✅ Good"
elif total_ms < 100:
status = "⚠️ OK"
else:
status = "❌ Review"
output.append(f"| {op_name} | {total_ms:.1f}ms | {status} |")
# Stress tests
output.append("\n## Stress & Edge Cases\n")
output.append("| Test | Time | Status |")
output.append("|------|------|--------|")
stress_tests = {
"WorstCaseProbeChain": "Worst-case probe chain",
"ManyTombstones": "Many tombstones",
"SequentialVsRandom": "Sequential vs random access",
"RandomAccess": "Random access pattern",
}
for test_name, avg_time in matches:
if test_name in stress_tests:
avg_time_f = float(avg_time)
test_desc = stress_tests[test_name]
total_ms = avg_time_f * 1000
if total_ms < 50:
status = "✅ Good"
elif total_ms < 100:
status = "⚠️ OK"
else:
status = "❌ Slow"
output.append(f"| {test_desc} | {total_ms:.0f}ms | {status} |")
# Persistence operations
output.append("\n## Persistence\n")
output.append("| Operation | Time | Status |")
output.append("|-----------|------|--------|")
persistence_ops = {
"WriteCloseReopen": "Write, close, reopen",
}
for test_name, avg_time in matches:
if test_name in persistence_ops:
avg_time_f = float(avg_time)
op_name = persistence_ops[test_name]
total_ms = avg_time_f * 1000
if total_ms < 50:
status = "✅ Excellent"
elif total_ms < 100:
status = "✅ Good"
else:
status = "⚠️ OK"
output.append(f"| {op_name} | {total_ms:.0f}ms | {status} |")
# Main thread budget and capacity info
output.append("\n## Performance Characteristics")
output.append("### Main Thread Budget")
output.append("- ✅ **Excellent**: <10ms - Perfect for UI interactions")
output.append("- ✅ **Good**: 10-50ms - Acceptable for most operations")
output.append("- ⚠️ **OK**: 50-100ms - Use with caution on main thread")
output.append("- ❌ **Review**: >100ms - Consider background thread")
output.append("\n*Target: 16.67ms/frame @ 60fps, 8.33ms/frame @ 120fps*")
output.append("\n### Capacity & Optimization")
output.append("- **Fixed capacity**: 256 entries")
output.append("- **Recommended usage**: ≤200 keys for optimal performance")
output.append("- **Memory footprint**: ~306KB per store")
output.append("- **Key optimizations**: Double hashing, `@inline(__always)`, cache-aligned access")
# Summary
total_tests = len(re.findall(r"Test Case.*passed", content))
output.append(f"\n---\n**Total tests:** {total_tests} passed | _Generated {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}_")
# Write to file
with open('benchmark_results.md', 'w') as f:
f.write('\n'.join(output))
print('\n'.join(output))
EOF
- name: Comment PR with results
if: github.event_name == 'pull_request'
uses: thollander/actions-comment-pull-request@v2
with:
filePath: benchmark_results.md
comment_tag: benchmark-results
- name: Comment commit with results
if: github.event_name == 'push'
uses: peter-evans/commit-comment@v3
with:
body-path: benchmark_results.md