From a8c0c5d921d47ff92f70acd2df058686feae11ef Mon Sep 17 00:00:00 2001 From: Thomas Hallock Date: Thu, 8 Jan 2026 10:58:28 -0600 Subject: [PATCH] fix(vision): remove quantization from model export - corrupts weights! MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The --quantize_uint8 flag in tensorflowjs_converter corrupts model weights, causing completely wrong predictions in the browser even though Python testing works fine. This was documented in .claude/CLAUDE.md but the training script still used quantization. Model size increases (556KB → 2.2MB) but predictions are now correct. Removed quantization from all 3 export paths: - SavedModel → GraphModel conversion - Keras → LayersModel fallback - Direct Python API fallback After this fix, re-run training via /vision-training/train to get a working model. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- .../web/scripts/train-column-classifier/train_model.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/apps/web/scripts/train-column-classifier/train_model.py b/apps/web/scripts/train-column-classifier/train_model.py index 0ab4c151..8940431e 100644 --- a/apps/web/scripts/train-column-classifier/train_model.py +++ b/apps/web/scripts/train-column-classifier/train_model.py @@ -473,11 +473,13 @@ def export_to_tfjs(model, keras_path: str, output_dir: str, use_json: bool = Fal # Run tensorflowjs_converter on the SavedModel # Using tf_saved_model input format which handles nested models properly + # IMPORTANT: Do NOT use --quantize_uint8 - it corrupts model weights! + # See apps/web/.claude/CLAUDE.md "Quantization Corruption" section. + # Model size increases (556KB → 2.2MB) but predictions are correct. cmd = [ sys.executable, "-m", "tensorflowjs.converters.converter", "--input_format=tf_saved_model", "--output_format=tfjs_graph_model", - "--quantize_uint8", "*", str(saved_model_path), str(output_path), ] @@ -492,12 +494,11 @@ def export_to_tfjs(model, keras_path: str, output_dir: str, use_json: bool = Fal "phase": "exporting" }, use_json) - # Fall back to Keras format conversion + # Fall back to Keras format conversion (no quantization!) cmd = [ sys.executable, "-m", "tensorflowjs.converters.converter", "--input_format=keras", "--output_format=tfjs_layers_model", - "--quantize_uint8", "*", keras_path, str(output_path), ] @@ -511,13 +512,12 @@ def export_to_tfjs(model, keras_path: str, output_dir: str, use_json: bool = Fal "phase": "exporting" }, use_json) - # Fall back to direct Python API save + # Fall back to direct Python API save (no quantization!) emit_progress("status", {"message": "Falling back to direct Python API save...", "phase": "exporting"}, use_json) import tensorflowjs as tfjs tfjs.converters.save_keras_model( model, str(output_path), - quantization_dtype_map={"uint8": "*"}, ) # Patch for Keras 3.x compatibility (if we used layers_model format)