Files
HPCS6500-py/calibration.py
2026-02-05 09:13:07 +07:00

896 lines
34 KiB
Python

#!/usr/bin/env python3
"""
LED Light Calibration Tool
Calibrates the bilinear interpolation table for CCT/Lumen mixing ratios.
Works with an external measurement script that writes to measurement.json.
Usage:
1. Start the external measurement script (reads spectrometer/lux meter)
2. Run this calibration tool
3. The tool sets test points and waits for measurements
4. After all points are measured, it generates a calibration table
"""
import serial
import time
import json
import sys
import argparse
import subprocess
from pathlib import Path
# Calibration grid (matches firmware)
CCT_POINTS = [2600, 2900, 3200, 3500, 3800, 4100, 4400, 4700, 5000]
LUMEN_POINTS = [200, 375, 550, 725, 900]
NUM_CCT = len(CCT_POINTS)
NUM_LUMEN = len(LUMEN_POINTS)
# UART commands
CMD_SET_COLOR = 0x81
CMD_SET_INTENSITY = 0x82
CMD_PROFILE_OFF = 0x84
CMD_CAL_START = 0x8A
CMD_CAL_WRITE = 0x8B
CMD_CAL_COMMIT = 0x8C
CMD_CAL_CLEAR = 0x8D
CMD_CAL_READ = 0x8E
CMD_CAL_WRITE_Y = 0x8F
RSP_ACK = 0xC0
# Default y-data tables (mA points: 0, 11, 33, 65, 150 -> lumen output)
Y27_DEFAULT = [0, 39, 179, 369, 844]
Y50_DEFAULT = [0, 51, 217, 441, 969]
X_DATA = [0, 11, 33, 65, 150] # mA drive points
# Default paths
CALIBRATION_OUTPUT = Path(__file__).parent / "calibration_table.json"
DEFAULT_SPECTROMETER = Path(__file__).parent / "spectrometer.py"
class CalibrationTool:
def __init__(self, port, baudrate=115200, spectrometer_script=None):
self.port = port
self.baudrate = baudrate
self.spectrometer_script = spectrometer_script or DEFAULT_SPECTROMETER
self.ser = None
self.measurements = {} # (cct, lumen) -> {'measured_cct': x, 'measured_lumen': y}
def connect(self):
"""Connect to the light via serial port."""
self.ser = serial.Serial(self.port, self.baudrate, timeout=1)
time.sleep(0.5) # Wait for connection to stabilize
self.ser.reset_input_buffer()
print(f"Connected to {self.port}")
def disconnect(self):
"""Disconnect from the light."""
if self.ser:
self.ser.close()
self.ser = None
def send_command(self, cmd, payload, timeout=2.0):
"""Send a 5-byte command and wait for response, skipping diagnostic messages."""
if len(payload) != 4:
raise ValueError("Payload must be 4 bytes")
# Flush any pending input
self.ser.reset_input_buffer()
data = bytes([cmd]) + bytes(payload)
self.ser.write(data)
# Read 5-byte messages until we get a response (0xC0 or 0xC1)
start = time.time()
while time.time() - start < timeout:
response = self.ser.read(5)
if len(response) < 5:
continue
# Check if this is a command response (0xC0=ACK, 0xC1=NAK)
if response[0] == RSP_ACK:
# Verify the echoed command matches what we sent
if response[1] == cmd:
return True
else:
print(f"ACK mismatch: sent 0x{cmd:02x}, got {response.hex()}")
# Keep reading, this might be from a previous command
continue
elif response[0] == 0xC1: # RSP_NAK
print(f"NAK received: {response.hex()}")
return False
# Otherwise it's a diagnostic message, skip it
print("Timeout waiting for response")
return False
def set_intensity(self, intensity):
"""Set light intensity (0-1000 lux target)."""
lo = intensity & 0xFF
hi = (intensity >> 8) & 0xFF
return self.send_command(CMD_SET_INTENSITY, [lo, hi, 0, 0])
def set_color(self, cct):
"""Set color temperature (2600-5000K)."""
lo = cct & 0xFF
hi = (cct >> 8) & 0xFF
return self.send_command(CMD_SET_COLOR, [lo, hi, 0, 0])
def disable_profile(self):
"""Disable automatic profile, switch to manual mode."""
return self.send_command(CMD_PROFILE_OFF, [0, 0, 0, 0])
def cal_start(self):
"""Start calibration write sequence (erases flash)."""
result = self.send_command(CMD_CAL_START, [0, 0, 0, 0], timeout=5.0)
print(f" cal_start result: {result}")
return result
def cal_write(self, index, value):
"""Write a single calibration value (index 0-44, value 0-255)."""
result = self.send_command(CMD_CAL_WRITE, [index, value, 0, 0])
if not result:
print(f" cal_write failed at index {index}, value {value}")
return result
def cal_write_y(self, table, index, value):
"""Write a y-data calibration value. table: 0=y27, 1=y50. value: uint16."""
lo = value & 0xFF
hi = (value >> 8) & 0xFF
return self.send_command(CMD_CAL_WRITE_Y, [table, index, lo, hi])
def cal_commit(self):
"""Commit calibration data to flash."""
return self.send_command(CMD_CAL_COMMIT, [0, 0, 0, 0], timeout=5.0)
def cal_clear(self):
"""Clear calibration (revert to defaults)."""
return self.send_command(CMD_CAL_CLEAR, [0, 0, 0, 0])
def write_calibration_table(self, table, y27_cal=None, y50_cal=None, verbose=True):
"""Write calibration data to the device.
Args:
table: 9x5 mixing ratio table
y27_cal: 5-element list of lumen values for 2700K LED (optional)
y50_cal: 5-element list of lumen values for 5000K LED (optional)
"""
if verbose:
print("Writing calibration to device...")
# Ensure clean UART state before starting
self.ser.reset_input_buffer()
time.sleep(0.1)
if not self.cal_start():
if verbose:
print(" Failed to start calibration write")
return False
time.sleep(1.0) # Wait for flash erase to complete
# Write mixing ratios
index = 0
for i, row in enumerate(table):
for j, value in enumerate(row):
if not self.cal_write(index, value):
if verbose:
print(f" Failed to write mixing ratio index {index}")
return False
index += 1
time.sleep(0.05)
# Write y27 data (use defaults if not provided)
y27 = y27_cal if y27_cal else Y27_DEFAULT
for i, value in enumerate(y27):
if not self.cal_write_y(0, i, int(value)):
if verbose:
print(f" Failed to write y27[{i}]")
return False
time.sleep(0.05)
# Write y50 data (use defaults if not provided)
y50 = y50_cal if y50_cal else Y50_DEFAULT
for i, value in enumerate(y50):
if not self.cal_write_y(1, i, int(value)):
if verbose:
print(f" Failed to write y50[{i}]")
return False
time.sleep(0.05)
time.sleep(0.5) # Wait before commit
if not self.cal_commit():
if verbose:
print(" Failed to commit calibration")
return False
if verbose:
print(" Calibration written successfully!")
return True
def cal_read(self, index):
"""Read a single calibration value. Returns (index, value, valid) or None."""
self.ser.write(bytes([CMD_CAL_READ, index, 0, 0, 0]))
response = self.ser.read(5)
if len(response) < 5 or response[0] != RSP_ACK:
return None
return (response[1], response[2], response[3])
def read_calibration_table(self, verbose=True):
"""Read the entire calibration table from device."""
if verbose:
print("Reading calibration table from device...")
table = []
valid = None
for i in range(NUM_CCT):
row = []
for j in range(NUM_LUMEN):
index = i * NUM_LUMEN + j
result = self.cal_read(index)
if result is None:
if verbose:
print(f" Failed to read index {index}")
return None
row.append(result[1])
if valid is None:
valid = result[2]
table.append(row)
if verbose:
status = "calibrated" if valid else "defaults"
print(f" Read complete ({status})")
return table, valid
def set_test_point(self, cct, lumen, verbose=False):
"""Set the light to a specific CCT and lumen test point."""
if verbose:
print(f" Setting CCT={cct}K, Lumen={lumen}")
if not self.set_color(cct):
if verbose:
print(" Failed to set color")
return False
time.sleep(0.2)
if not self.set_intensity(lumen):
if verbose:
print(" Failed to set intensity")
return False
return True
def take_measurement(self, timeout=30, verbose=False):
"""
Call spectrometer script to take a measurement.
Expected JSON output: {"lumen": 450.5, "cct": 3250}
"""
if verbose:
print(f" Taking measurement...")
try:
result = subprocess.run(
['uv', 'run', 'python', str(self.spectrometer_script), '--measure'],
capture_output=True,
text=True,
timeout=timeout
)
if result.returncode != 0:
if verbose:
print(f" Spectrometer error: {result.stderr.strip()}")
return None
data = json.loads(result.stdout)
if 'lumen' in data and 'cct' in data:
return data
if verbose:
print(f" Invalid response: missing lumen or cct")
return None
except subprocess.TimeoutExpired:
if verbose:
print(f" Timeout waiting for spectrometer")
return None
except json.JSONDecodeError as e:
if verbose:
print(f" Invalid JSON from spectrometer: {e}")
return None
except FileNotFoundError:
if verbose:
print(f" Spectrometer script not found: {self.spectrometer_script}")
return None
def measure_all_points(self):
"""Measure all calibration points once. Returns True if all succeeded.
Points are ordered to minimize drive current changes between consecutive
measurements: sorted by lumen (ascending), with CCT in snake pattern
(alternating direction) to avoid large jumps.
"""
total_points = len(CCT_POINTS) * len(LUMEN_POINTS)
current_point = 0
success_count = 0
# Generate measurement order: snake pattern to minimize current jumps
# At each lumen level, alternate CCT direction
measurement_order = []
for i, lumen in enumerate(LUMEN_POINTS):
ccts = CCT_POINTS if i % 2 == 0 else list(reversed(CCT_POINTS))
for cct in ccts:
measurement_order.append((cct, lumen))
for cct, lumen in measurement_order:
current_point += 1
key = (cct, lumen)
print(f" [{current_point}/{total_points}] CCT={cct}K, Lumen={lumen}", end="")
# Set test point
if not self.set_test_point(cct, lumen):
print(" - FAILED to set")
continue
# Wait for light to stabilize
time.sleep(4.0)
# Take measurement with plausibility check
max_retries = 3
for attempt in range(max_retries):
measurement = self.take_measurement()
if not measurement:
if attempt < max_retries - 1:
time.sleep(2.0)
continue
print(" - NO READING")
break
# Plausibility check: deviation must be < 60%
cct_dev = abs(measurement['cct'] - cct) / cct
lumen_dev = abs(measurement['lumen'] - lumen) / lumen if lumen > 0 else 0
if cct_dev > 0.6 or lumen_dev > 0.6:
if attempt < max_retries - 1:
time.sleep(4.0)
continue
print(f" - IMPLAUSIBLE ({cct_dev*100:.0f}%/{lumen_dev*100:.0f}%)")
break
# Measurement is plausible, store it
self.measurements[key] = {
'target_cct': cct,
'target_lumen': lumen,
'measured_cct': measurement['cct'],
'measured_lumen': measurement['lumen']
}
cct_err = (measurement['cct'] - cct) / cct * 100
lumen_err = (measurement['lumen'] - lumen) / lumen * 100
print(f" -> CCT:{cct_err:+5.1f}% Lum:{lumen_err:+5.1f}%")
success_count += 1
break
return success_count == total_points
def print_deviation_table(self):
"""Print a table showing deviation of all measured points."""
print("\n" + "=" * 70)
print("DEVIATION TABLE (% error from target)")
print("=" * 70)
# Header
header = " |"
for lumen in LUMEN_POINTS:
header += f" {lumen:4d}lm |"
print(header)
print("-" * len(header))
max_cct_dev = 0
max_lumen_dev = 0
all_within_tolerance = True
cct_devs = []
lumen_devs = []
for cct in CCT_POINTS:
row = f"{cct:5d}K |"
for lumen in LUMEN_POINTS:
key = (cct, lumen)
if key in self.measurements:
m = self.measurements[key]
cct_dev = (m['measured_cct'] - cct) / cct * 100
lumen_dev = (m['measured_lumen'] - lumen) / lumen * 100
cct_devs.append(cct_dev)
lumen_devs.append(lumen_dev)
max_cct_dev = max(max_cct_dev, abs(cct_dev))
max_lumen_dev = max(max_lumen_dev, abs(lumen_dev))
if abs(cct_dev) > 5 or abs(lumen_dev) > 5:
all_within_tolerance = False
row += f"{cct_dev:+4.1f}/{lumen_dev:+4.1f}|"
else:
row += " --- |"
all_within_tolerance = False
print(row)
# Calculate standard deviation
def calc_std(values):
if len(values) < 2:
return 0.0
mean = sum(values) / len(values)
variance = sum((x - mean) ** 2 for x in values) / len(values)
return variance ** 0.5
cct_std = calc_std(cct_devs)
lumen_std = calc_std(lumen_devs)
print("-" * len(header))
print(f"Format: CCT%/Lumen% | Max CCT: {max_cct_dev:.1f}% | Max Lumen: {max_lumen_dev:.1f}%")
print(f"Standard deviation | CCT: {cct_std:.2f}% | Lumen: {lumen_std:.2f}%")
return all_within_tolerance, max_cct_dev, max_lumen_dev
def calibrate_y_data(self):
"""Calibrate y-data tables by measuring lumen output at fixed drive points.
Returns (y27_cal, y50_cal) or (None, None) on failure.
"""
print("\n" + "=" * 70)
print("Y-DATA CALIBRATION")
print("=" * 70)
print("Measuring lumen output at fixed drive currents")
print(f"Drive points (mA): {X_DATA}")
print("=" * 70)
# First, clear any existing calibration to use defaults
print("\nClearing existing calibration to use default tables...")
time.sleep(0.2) # Brief delay to ensure UART is idle
self.ser.reset_input_buffer() # Clear any pending diagnostics
if not self.cal_clear():
print("Failed to clear calibration")
return None, None
time.sleep(1.0)
# Calibrate 2700K LED (use 2600K CCT = pure warm LED, mixing ratio 0)
print("\n--- Calibrating 2700K LED (pure warm) ---")
y27_cal = self._measure_led_curve(2600, Y27_DEFAULT)
if y27_cal is None:
return None, None
# Turn off LEDs and wait for thermal stabilization before switching
print("\nTurning off LEDs, waiting 10s for thermal stabilization...")
self.set_intensity(0)
time.sleep(10.0)
# Calibrate 5000K LED (use 5000K CCT = pure cool LED, mixing ratio 255)
print("\n--- Calibrating 5000K LED (pure cool) ---")
y50_cal = self._measure_led_curve(5000, Y50_DEFAULT)
if y50_cal is None:
return None, None
print("\n" + "=" * 70)
print("Y-DATA CALIBRATION RESULTS")
print("=" * 70)
print(f" mA points: {X_DATA}")
print(f" y27 (2700K): {y27_cal} (default: {Y27_DEFAULT})")
print(f" y50 (5000K): {y50_cal} (default: {Y50_DEFAULT})")
return y27_cal, y50_cal
def _measure_led_curve(self, cct, default_lumens):
"""Measure lumen output at each drive point for a single LED type.
Args:
cct: Color temperature to set (2600 for pure 2700K LED, 5000 for pure 5000K LED)
default_lumens: Default y-data values to use as intensity targets
Returns:
List of 5 measured lumen values, or None on failure
"""
measured = []
# Set color (this determines which LED is primarily driven)
if not self.set_color(cct):
print(f" Failed to set color to {cct}K")
return None
time.sleep(0.5)
for i, target_lumen in enumerate(default_lumens):
ma = X_DATA[i]
print(f" Point {i+1}/5: {ma:3d}mA (intensity={target_lumen:3d})", end="")
if ma == 0:
# Zero point is always 0 lumen
measured.append(0)
print(" -> 0 lm (zero point)")
continue
# Set intensity to the default lumen value
# With cleared calibration (defaults), this drives at the target mA
if not self.set_intensity(target_lumen):
print(" - FAILED to set intensity")
return None
# Wait for LED to stabilize (longer for last point at max current)
stabilize_time = 5.0 if i == len(default_lumens) - 1 else 2.0
time.sleep(stabilize_time)
# Take measurement with retry
reading = None
for attempt in range(3):
reading = self.take_measurement()
if reading and reading['lumen'] > 0:
break
time.sleep(1.0)
if reading is None:
print(" - NO READING")
return None
actual_lumen = int(round(reading['lumen']))
deviation = (actual_lumen - target_lumen) / target_lumen * 100 if target_lumen > 0 else 0
measured.append(actual_lumen)
print(f" -> {actual_lumen:4d} lm ({deviation:+5.1f}%)")
return measured
def run_calibration(self, skip_y=False):
"""Run full calibration: y-data first, then iterative mixing ratios.
Args:
skip_y: If True, skip y-data calibration (only do mixing ratios)
"""
print("=" * 70)
print("LED Light Full Calibration")
print("=" * 70)
print("Phase 1: Y-data calibration (LED lumen curves)")
print("Phase 2: Mixing ratio calibration (CCT/lumen targeting)")
print("=" * 70)
self.connect()
y27_cal = None
y50_cal = None
try:
# Disable profile mode
print("\nDisabling profile mode...")
self.disable_profile()
time.sleep(0.5)
# Phase 1: Y-data calibration
if not skip_y:
y27_cal, y50_cal = self.calibrate_y_data()
if y27_cal is None:
print("\nY-data calibration failed, aborting.")
return
# Write y-data immediately with default mixing ratios
print("\nWriting y-data to device...")
default_mixing = [
[ 0, 0, 0, 0, 0],
[ 32, 38, 36, 37, 36],
[ 84, 72, 70, 71, 71],
[142, 100, 99, 99, 99],
[175, 131, 126, 127, 126],
[155, 154, 154, 157, 153],
[206, 170, 184, 184, 185],
[242, 223, 208, 216, 217],
[255, 255, 255, 255, 255]
]
if not self.write_calibration_table(default_mixing, y27_cal, y50_cal, verbose=False):
print("Failed to write y-data, aborting.")
return
print("Y-data written, proceeding to mixing ratio calibration...")
time.sleep(1.0)
# Phase 2: Mixing ratio calibration
print("\n" + "=" * 70)
print("MIXING RATIO CALIBRATION")
print("=" * 70)
print("Target: < 5% deviation on all points")
print("Maximum passes: 10")
max_passes = 10
for pass_num in range(1, max_passes + 1):
print(f"\n{'='*70}")
print(f"PASS {pass_num}/{max_passes}")
print("=" * 70)
# Clear previous measurements for fresh pass
self.measurements = {}
# Measure all points
print("\nMeasuring all points...")
self.measure_all_points()
# Print deviation table
converged, max_cct, max_lumen = self.print_deviation_table()
# Save progress
self.save_calibration()
if converged:
print(f"\n*** CONVERGED after {pass_num} pass(es)! ***")
print("All points within 5% tolerance.")
break
if pass_num == max_passes:
print(f"\n*** MAX PASSES REACHED ***")
print(f"Best achieved: CCT {max_cct:.1f}%, Lumen {max_lumen:.1f}%")
break
# Generate and apply corrections
print("\nCalculating and applying corrections...")
calibrated_table = self.generate_table(verbose=False)
if calibrated_table:
if self.write_calibration_table(calibrated_table, y27_cal, y50_cal, verbose=False):
print("Corrections applied, starting next pass...")
time.sleep(1.0)
else:
print("Failed to write calibration, stopping.")
break
finally:
self.disconnect()
print("\n" + "=" * 70)
print("Calibration complete!")
print("=" * 70)
def save_calibration(self):
"""Save current calibration state to file."""
data = {
'measurements': {
f"{k[0]},{k[1]}": v for k, v in self.measurements.items()
},
'cct_points': CCT_POINTS,
'lumen_points': LUMEN_POINTS
}
with open(CALIBRATION_OUTPUT, 'w') as f:
json.dump(data, f, indent=2)
def generate_table(self, verbose=False):
"""
Generate the calibrated mixing ratio table using direct proportional correction.
Simple approach: mixing ratio controls CCT.
- If measured CCT < target CCT: increase ratio (more 5000K LED)
- If measured CCT > target CCT: decrease ratio (less 5000K LED)
- Correction is proportional to error, scaled by CCT-to-ratio gradient
"""
if verbose:
print("\nGenerating calibration table...")
# Read current mixing ratios from device (baseline)
baseline_table = None
if self.ser and self.ser.is_open:
result = self.read_calibration_table(verbose=verbose)
if result:
baseline_table, _ = result
if verbose:
print("Using current device values as baseline")
if baseline_table is None:
if verbose:
print("Using firmware defaults as baseline")
baseline_table = [
[ 0, 0, 0, 0, 0], # 2600K
[ 32, 38, 36, 37, 36], # 2900K
[ 84, 72, 70, 71, 71], # 3200K
[142, 100, 99, 99, 99], # 3500K
[175, 131, 126, 127, 126], # 3800K
[155, 154, 154, 157, 153], # 4100K
[206, 170, 184, 184, 185], # 4400K
[242, 223, 208, 216, 217], # 4700K
[255, 255, 255, 255, 255] # 5000K
]
# CCT range: 2600K to 5000K = 2400K maps to ratio 0-255
# Gradient: ~9.4 K per ratio unit (but use 10 for simpler math)
K_PER_RATIO = 10.0
corrections = {}
for (cct, lumen), m in self.measurements.items():
i = CCT_POINTS.index(cct)
j = LUMEN_POINTS.index(lumen)
baseline_r = baseline_table[i][j]
target_cct = m['target_cct']
target_lumen = m['target_lumen']
measured_cct = m['measured_cct']
measured_lumen = m['measured_lumen']
cct_error = target_cct - measured_cct # positive = need higher CCT = more 5000K = higher ratio
lumen_error = target_lumen - measured_lumen
# Simple proportional correction: dr = cct_error / K_PER_RATIO
# If measured CCT is 100K too low, increase ratio by 10 units
dr = cct_error / K_PER_RATIO
# Apply correction with gain factor (can be tuned)
# Use 1.0 for full correction, <1 for damping, >1 for aggressive
gain = 1.0
dr = dr * gain
# Limit adjustment per iteration to prevent oscillation
dr = max(-30, min(30, dr))
corrections[(cct, lumen)] = {
'cct_error': cct_error,
'lumen_error': lumen_error,
'lumen_error_pct': 100 * lumen_error / target_lumen if target_lumen > 0 else 0,
'mixing_adjust': dr,
'baseline_ratio': baseline_r
}
# Apply corrections to generate calibrated table
calibrated_table = []
for i, cct in enumerate(CCT_POINTS):
row = []
for j, lumen in enumerate(LUMEN_POINTS):
baseline_r = baseline_table[i][j]
key = (cct, lumen)
if key in corrections:
c = corrections[key]
adjusted = baseline_r + c['mixing_adjust']
adjusted = max(0, min(255, round(adjusted)))
else:
adjusted = baseline_r
row.append(int(adjusted))
calibrated_table.append(row)
# Print correction details
if verbose:
print("\nCorrection summary:")
print(f"{'CCT':>5} {'Lumen':>5} | {'CCT err':>8} {'Lum err':>8} | {'Base':>4} {'Adj':>6} {'New':>4}")
print("-" * 65)
for i, cct in enumerate(CCT_POINTS):
for j, lumen in enumerate(LUMEN_POINTS):
key = (cct, lumen)
if key in corrections:
c = corrections[key]
new_r = calibrated_table[i][j]
print(f"{cct:>5} {lumen:>5} | {c['cct_error']:>+7.0f}K {c['lumen_error']:>+7.0f}lm | {c['baseline_ratio']:>4} {c['mixing_adjust']:>+6.1f} {new_r:>4}")
# Print final table
print("\nCalibrated mixing_ratios table:")
print("const uint8_t mixing_ratios[NUM_CCT][NUM_LUMEN] = {")
for i, (cct, row) in enumerate(zip(CCT_POINTS, calibrated_table)):
values = ", ".join(f"{v:3d}" for v in row)
comment = f"// {cct}K"
if i == 0:
comment += " (pure 2700K)"
elif i == len(CCT_POINTS) - 1:
comment += " (pure 5000K)"
comma = "," if i < len(CCT_POINTS) - 1 else ""
print(f" {{{values}}}{comma} {comment}")
print("};")
# Save to calibration file
data = {
'measurements': {f"{k[0]},{k[1]}": v for k, v in self.measurements.items()},
'corrections': {f"{k[0]},{k[1]}": v for k, v in corrections.items()},
'cct_points': CCT_POINTS,
'lumen_points': LUMEN_POINTS,
'baseline_table': baseline_table,
'calibrated_table': calibrated_table
}
with open(CALIBRATION_OUTPUT, 'w') as f:
json.dump(data, f, indent=2)
if verbose:
print(f"\nCalibration saved to {CALIBRATION_OUTPUT}")
return calibrated_table
def main():
parser = argparse.ArgumentParser(description='LED Light Calibration Tool')
parser.add_argument('port', nargs='?', default='COM3',
help='Serial port (default: COM3)')
parser.add_argument('--baudrate', '-b', type=int, default=115200,
help='Baud rate (default: 115200)')
parser.add_argument('--spectrometer', '-s', type=Path,
default=DEFAULT_SPECTROMETER,
help='Path to spectrometer script (default: spectrometer.py)')
parser.add_argument('--generate-only', '-g', action='store_true',
help='Only generate table from existing measurements')
parser.add_argument('--write', '-w', action='store_true',
help='Write calibration table to device')
parser.add_argument('--clear', '-c', action='store_true',
help='Clear calibration on device (revert to defaults)')
parser.add_argument('--read', action='store_true',
help='Read calibration table from device')
parser.add_argument('--skip-y', action='store_true',
help='Skip y-data calibration (only do mixing ratios)')
parser.add_argument('--y-only', action='store_true',
help='Only run y-data calibration (no mixing ratios)')
args = parser.parse_args()
tool = CalibrationTool(args.port, args.baudrate, args.spectrometer)
if args.read:
# Read calibration from device
tool.connect()
try:
result = tool.read_calibration_table()
if result:
table, valid = result
status = "CALIBRATED" if valid else "DEFAULTS"
print(f"\nCalibration status: {status}")
print("\nMixing ratios table:")
for i, (cct, row) in enumerate(zip(CCT_POINTS, table)):
values = ", ".join(f"{v:3d}" for v in row)
print(f" {cct}K: [{values}]")
finally:
tool.disconnect()
elif args.clear:
# Clear calibration on device
tool.connect()
try:
if tool.cal_clear():
print("Calibration cleared successfully")
else:
print("Failed to clear calibration")
finally:
tool.disconnect()
elif args.y_only:
# Only run y-data calibration
tool.connect()
try:
tool.disable_profile()
time.sleep(0.5)
y27_cal, y50_cal = tool.calibrate_y_data()
if y27_cal and y50_cal:
# Write y-data with default mixing ratios
print("\nWriting y-data calibration (with default mixing ratios)...")
default_mixing = [
[ 0, 0, 0, 0, 0], # 2600K
[ 32, 38, 36, 37, 36], # 2900K
[ 84, 72, 70, 71, 71], # 3200K
[142, 100, 99, 99, 99], # 3500K
[175, 131, 126, 127, 126], # 3800K
[155, 154, 154, 157, 153], # 4100K
[206, 170, 184, 184, 185], # 4400K
[242, 223, 208, 216, 217], # 4700K
[255, 255, 255, 255, 255] # 5000K
]
if tool.write_calibration_table(default_mixing, y27_cal, y50_cal):
print("Y-data calibration saved successfully!")
# Save to file as well
data = {
'y27_cal': y27_cal,
'y50_cal': y50_cal,
'x_data': X_DATA
}
with open(CALIBRATION_OUTPUT, 'w') as f:
json.dump(data, f, indent=2)
else:
print("Failed to write y-data calibration")
finally:
tool.disconnect()
elif args.generate_only or args.write:
# Load existing measurements and generate/write table
if CALIBRATION_OUTPUT.exists():
with open(CALIBRATION_OUTPUT, 'r') as f:
saved = json.load(f)
if 'measurements' in saved:
tool.measurements = {
tuple(map(int, k.split(','))): v
for k, v in saved['measurements'].items()
}
# Connect to read current values from device
tool.connect()
try:
table = tool.generate_table(verbose=True)
if args.write and table:
# Load y-data if available
y27 = saved.get('y27_cal')
y50 = saved.get('y50_cal')
tool.write_calibration_table(table, y27, y50)
finally:
tool.disconnect()
else:
print(f"No calibration file found: {CALIBRATION_OUTPUT}")
else:
tool.run_calibration(skip_y=args.skip_y)
if __name__ == "__main__":
main()