Compare commits
14 Commits
abacus-rea
...
abacus-rea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41aa7ff33f | ||
|
|
1be6151bae | ||
|
|
70b363ce88 | ||
|
|
d90d263b2a | ||
|
|
43524d8238 | ||
|
|
a5025f01bc | ||
|
|
a8fb77e8e3 | ||
|
|
e80ef04f45 | ||
|
|
b3b769c0e2 | ||
|
|
ff59612e7b | ||
|
|
d8c764595d | ||
|
|
005140a1e7 | ||
|
|
5d0ac65bdd | ||
|
|
7a9185eadb |
@@ -422,7 +422,47 @@
|
||||
"Bash(apps/web/src/lib/vision/perspectiveTransform.ts )",
|
||||
"Bash(apps/web/src/socket-server.ts)",
|
||||
"Bash(apps/web/src/components/vision/CalibrationOverlay.tsx )",
|
||||
"Bash(apps/web/src/components/practice/ActiveSession.tsx )"
|
||||
"Bash(apps/web/src/components/practice/ActiveSession.tsx )",
|
||||
"Bash(open -a Preview:*)",
|
||||
"Bash(pip3 install:*)",
|
||||
"Bash(pip3 uninstall:*)",
|
||||
"Bash(/opt/homebrew/bin/python3:*)",
|
||||
"Bash(/usr/bin/python3:*)",
|
||||
"Bash(/opt/homebrew/bin/pip3 install:*)",
|
||||
"Bash(source:*)",
|
||||
"Bash(pip install:*)",
|
||||
"Bash(/opt/homebrew/opt/python@3.11/bin/python3.11:*)",
|
||||
"Bash(tensorflowjs_converter:*)",
|
||||
"Bash(public/models/abacus-column-classifier/column-classifier.keras )",
|
||||
"Bash(public/models/abacus-column-classifier/)",
|
||||
"Bash(public/models/abacus-column-classifier/column-classifier.h5 )",
|
||||
"Bash(apps/web/scripts/train-column-classifier/train_model.py )",
|
||||
"Bash(apps/web/src/app/remote-camera/[sessionId]/page.tsx )",
|
||||
"Bash(apps/web/src/hooks/useColumnClassifier.ts )",
|
||||
"Bash(apps/web/src/lib/vision/columnClassifier.ts )",
|
||||
"Bash(\"apps/web/src/app/remote-camera/[sessionId]/page.tsx\" )",
|
||||
"Bash(apps/web/drizzle/0054_new_mathemanic.sql )",
|
||||
"Bash(apps/web/drizzle/meta/0054_snapshot.json )",
|
||||
"Bash(apps/web/src/components/AbacusDisplayDropdown.tsx )",
|
||||
"Bash(apps/web/src/db/schema/abacus-settings.ts )",
|
||||
"Bash(packages/abacus-react/src/AbacusContext.tsx)",
|
||||
"Bash(apps/web/src/lib/vision/frameProcessor.ts )",
|
||||
"Bash(apps/web/src/lib/vision/beadDetector.ts )",
|
||||
"Bash(apps/web/public/models/abacus-column-classifier/model.json )",
|
||||
"Bash(.claude/settings.local.json)",
|
||||
"Bash(apps/web/src/components/MyAbacus.tsx )",
|
||||
"Bash(apps/web/src/contexts/MyAbacusContext.tsx )",
|
||||
"Bash(apps/web/src/components/vision/DockedVisionFeed.tsx )",
|
||||
"Bash(apps/web/src/components/vision/VisionIndicator.tsx )",
|
||||
"Bash(apps/web/src/components/vision/VisionSetupModal.tsx)",
|
||||
"Bash(npx storybook:*)",
|
||||
"Bash(apps/web/src/hooks/usePhoneCamera.ts )",
|
||||
"Bash(apps/web/src/lib/remote-camera/session-manager.ts )",
|
||||
"Bash(apps/web/src/test/setup.ts )",
|
||||
"Bash(apps/web/src/hooks/__tests__/useRemoteCameraDesktop.test.ts )",
|
||||
"Bash(apps/web/src/hooks/__tests__/useRemoteCameraPhone.test.ts )",
|
||||
"Bash(apps/web/src/lib/remote-camera/__tests__/)",
|
||||
"Bash(packages/abacus-react/CHANGELOG.md )"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
@@ -116,13 +116,9 @@
|
||||
"abacus_settings_user_id_users_id_fk": {
|
||||
"name": "abacus_settings_user_id_users_id_fk",
|
||||
"tableFrom": "abacus_settings",
|
||||
"columnsFrom": [
|
||||
"user_id"
|
||||
],
|
||||
"columnsFrom": ["user_id"],
|
||||
"tableTo": "users",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -240,9 +236,7 @@
|
||||
"indexes": {
|
||||
"arcade_rooms_code_unique": {
|
||||
"name": "arcade_rooms_code_unique",
|
||||
"columns": [
|
||||
"code"
|
||||
],
|
||||
"columns": ["code"],
|
||||
"isUnique": true
|
||||
}
|
||||
},
|
||||
@@ -339,26 +333,18 @@
|
||||
"arcade_sessions_room_id_arcade_rooms_id_fk": {
|
||||
"name": "arcade_sessions_room_id_arcade_rooms_id_fk",
|
||||
"tableFrom": "arcade_sessions",
|
||||
"columnsFrom": [
|
||||
"room_id"
|
||||
],
|
||||
"columnsFrom": ["room_id"],
|
||||
"tableTo": "arcade_rooms",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
},
|
||||
"arcade_sessions_user_id_users_id_fk": {
|
||||
"name": "arcade_sessions_user_id_users_id_fk",
|
||||
"tableFrom": "arcade_sessions",
|
||||
"columnsFrom": [
|
||||
"user_id"
|
||||
],
|
||||
"columnsFrom": ["user_id"],
|
||||
"tableTo": "users",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -424,9 +410,7 @@
|
||||
"indexes": {
|
||||
"players_user_id_idx": {
|
||||
"name": "players_user_id_idx",
|
||||
"columns": [
|
||||
"user_id"
|
||||
],
|
||||
"columns": ["user_id"],
|
||||
"isUnique": false
|
||||
}
|
||||
},
|
||||
@@ -434,13 +418,9 @@
|
||||
"players_user_id_users_id_fk": {
|
||||
"name": "players_user_id_users_id_fk",
|
||||
"tableFrom": "players",
|
||||
"columnsFrom": [
|
||||
"user_id"
|
||||
],
|
||||
"columnsFrom": ["user_id"],
|
||||
"tableTo": "users",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -514,9 +494,7 @@
|
||||
"indexes": {
|
||||
"idx_room_members_user_id_unique": {
|
||||
"name": "idx_room_members_user_id_unique",
|
||||
"columns": [
|
||||
"user_id"
|
||||
],
|
||||
"columns": ["user_id"],
|
||||
"isUnique": true
|
||||
}
|
||||
},
|
||||
@@ -524,13 +502,9 @@
|
||||
"room_members_room_id_arcade_rooms_id_fk": {
|
||||
"name": "room_members_room_id_arcade_rooms_id_fk",
|
||||
"tableFrom": "room_members",
|
||||
"columnsFrom": [
|
||||
"room_id"
|
||||
],
|
||||
"columnsFrom": ["room_id"],
|
||||
"tableTo": "arcade_rooms",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -605,13 +579,9 @@
|
||||
"room_member_history_room_id_arcade_rooms_id_fk": {
|
||||
"name": "room_member_history_room_id_arcade_rooms_id_fk",
|
||||
"tableFrom": "room_member_history",
|
||||
"columnsFrom": [
|
||||
"room_id"
|
||||
],
|
||||
"columnsFrom": ["room_id"],
|
||||
"tableTo": "arcade_rooms",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -713,10 +683,7 @@
|
||||
"indexes": {
|
||||
"idx_room_invitations_user_room": {
|
||||
"name": "idx_room_invitations_user_room",
|
||||
"columns": [
|
||||
"user_id",
|
||||
"room_id"
|
||||
],
|
||||
"columns": ["user_id", "room_id"],
|
||||
"isUnique": true
|
||||
}
|
||||
},
|
||||
@@ -724,13 +691,9 @@
|
||||
"room_invitations_room_id_arcade_rooms_id_fk": {
|
||||
"name": "room_invitations_room_id_arcade_rooms_id_fk",
|
||||
"tableFrom": "room_invitations",
|
||||
"columnsFrom": [
|
||||
"room_id"
|
||||
],
|
||||
"columnsFrom": ["room_id"],
|
||||
"tableTo": "arcade_rooms",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -833,13 +796,9 @@
|
||||
"room_reports_room_id_arcade_rooms_id_fk": {
|
||||
"name": "room_reports_room_id_arcade_rooms_id_fk",
|
||||
"tableFrom": "room_reports",
|
||||
"columnsFrom": [
|
||||
"room_id"
|
||||
],
|
||||
"columnsFrom": ["room_id"],
|
||||
"tableTo": "arcade_rooms",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -918,10 +877,7 @@
|
||||
"indexes": {
|
||||
"idx_room_bans_user_room": {
|
||||
"name": "idx_room_bans_user_room",
|
||||
"columns": [
|
||||
"user_id",
|
||||
"room_id"
|
||||
],
|
||||
"columns": ["user_id", "room_id"],
|
||||
"isUnique": true
|
||||
}
|
||||
},
|
||||
@@ -929,13 +885,9 @@
|
||||
"room_bans_room_id_arcade_rooms_id_fk": {
|
||||
"name": "room_bans_room_id_arcade_rooms_id_fk",
|
||||
"tableFrom": "room_bans",
|
||||
"columnsFrom": [
|
||||
"room_id"
|
||||
],
|
||||
"columnsFrom": ["room_id"],
|
||||
"tableTo": "arcade_rooms",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -998,13 +950,9 @@
|
||||
"user_stats_user_id_users_id_fk": {
|
||||
"name": "user_stats_user_id_users_id_fk",
|
||||
"tableFrom": "user_stats",
|
||||
"columnsFrom": [
|
||||
"user_id"
|
||||
],
|
||||
"columnsFrom": ["user_id"],
|
||||
"tableTo": "users",
|
||||
"columnsTo": [
|
||||
"id"
|
||||
],
|
||||
"columnsTo": ["id"],
|
||||
"onUpdate": "no action",
|
||||
"onDelete": "cascade"
|
||||
}
|
||||
@@ -1062,16 +1010,12 @@
|
||||
"indexes": {
|
||||
"users_guest_id_unique": {
|
||||
"name": "users_guest_id_unique",
|
||||
"columns": [
|
||||
"guest_id"
|
||||
],
|
||||
"columns": ["guest_id"],
|
||||
"isUnique": true
|
||||
},
|
||||
"users_email_unique": {
|
||||
"name": "users_email_unique",
|
||||
"columns": [
|
||||
"email"
|
||||
],
|
||||
"columns": ["email"],
|
||||
"isUnique": true
|
||||
}
|
||||
},
|
||||
@@ -1091,4 +1035,4 @@
|
||||
"internal": {
|
||||
"indexes": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -388,4 +388,4 @@
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
Binary file not shown.
778
apps/web/public/models/abacus-column-classifier/model.json
Normal file
778
apps/web/public/models/abacus-column-classifier/model.json
Normal file
@@ -0,0 +1,778 @@
|
||||
{
|
||||
"format": "layers-model",
|
||||
"generatedBy": "keras v3.13.0",
|
||||
"convertedBy": "TensorFlow.js Converter v4.22.0",
|
||||
"modelTopology": {
|
||||
"keras_version": "3.13.0",
|
||||
"backend": "tensorflow",
|
||||
"model_config": {
|
||||
"class_name": "Sequential",
|
||||
"config": {
|
||||
"name": "sequential",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"layers": [
|
||||
{
|
||||
"class_name": "InputLayer",
|
||||
"config": {
|
||||
"dtype": "float32",
|
||||
"sparse": false,
|
||||
"ragged": false,
|
||||
"name": "input_layer",
|
||||
"optional": false,
|
||||
"batchInputShape": [null, 128, 64, 1]
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Conv2D",
|
||||
"config": {
|
||||
"name": "conv2d",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"filters": 32,
|
||||
"kernel_size": [3, 3],
|
||||
"strides": [1, 1],
|
||||
"padding": "same",
|
||||
"data_format": "channels_last",
|
||||
"dilation_rate": [1, 1],
|
||||
"groups": 1,
|
||||
"activation": "relu",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": {
|
||||
"seed": null
|
||||
},
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"activity_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "BatchNormalization",
|
||||
"config": {
|
||||
"name": "batch_normalization",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"axis": -1,
|
||||
"momentum": 0.99,
|
||||
"epsilon": 0.001,
|
||||
"center": true,
|
||||
"scale": true,
|
||||
"beta_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"gamma_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_mean_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_variance_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"beta_regularizer": null,
|
||||
"gamma_regularizer": null,
|
||||
"beta_constraint": null,
|
||||
"gamma_constraint": null,
|
||||
"synchronized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "MaxPooling2D",
|
||||
"config": {
|
||||
"name": "max_pooling2d",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"pool_size": [2, 2],
|
||||
"padding": "valid",
|
||||
"strides": [2, 2],
|
||||
"data_format": "channels_last"
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dropout",
|
||||
"config": {
|
||||
"name": "dropout",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"rate": 0.25,
|
||||
"seed": null,
|
||||
"noise_shape": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Conv2D",
|
||||
"config": {
|
||||
"name": "conv2d_1",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"filters": 64,
|
||||
"kernel_size": [3, 3],
|
||||
"strides": [1, 1],
|
||||
"padding": "same",
|
||||
"data_format": "channels_last",
|
||||
"dilation_rate": [1, 1],
|
||||
"groups": 1,
|
||||
"activation": "relu",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": {
|
||||
"seed": null
|
||||
},
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"activity_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "BatchNormalization",
|
||||
"config": {
|
||||
"name": "batch_normalization_1",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"axis": -1,
|
||||
"momentum": 0.99,
|
||||
"epsilon": 0.001,
|
||||
"center": true,
|
||||
"scale": true,
|
||||
"beta_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"gamma_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_mean_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_variance_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"beta_regularizer": null,
|
||||
"gamma_regularizer": null,
|
||||
"beta_constraint": null,
|
||||
"gamma_constraint": null,
|
||||
"synchronized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "MaxPooling2D",
|
||||
"config": {
|
||||
"name": "max_pooling2d_1",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"pool_size": [2, 2],
|
||||
"padding": "valid",
|
||||
"strides": [2, 2],
|
||||
"data_format": "channels_last"
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dropout",
|
||||
"config": {
|
||||
"name": "dropout_1",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"rate": 0.25,
|
||||
"seed": null,
|
||||
"noise_shape": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Conv2D",
|
||||
"config": {
|
||||
"name": "conv2d_2",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"filters": 128,
|
||||
"kernel_size": [3, 3],
|
||||
"strides": [1, 1],
|
||||
"padding": "same",
|
||||
"data_format": "channels_last",
|
||||
"dilation_rate": [1, 1],
|
||||
"groups": 1,
|
||||
"activation": "relu",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": {
|
||||
"seed": null
|
||||
},
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"activity_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "BatchNormalization",
|
||||
"config": {
|
||||
"name": "batch_normalization_2",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"axis": -1,
|
||||
"momentum": 0.99,
|
||||
"epsilon": 0.001,
|
||||
"center": true,
|
||||
"scale": true,
|
||||
"beta_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"gamma_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_mean_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_variance_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"beta_regularizer": null,
|
||||
"gamma_regularizer": null,
|
||||
"beta_constraint": null,
|
||||
"gamma_constraint": null,
|
||||
"synchronized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "MaxPooling2D",
|
||||
"config": {
|
||||
"name": "max_pooling2d_2",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"pool_size": [2, 2],
|
||||
"padding": "valid",
|
||||
"strides": [2, 2],
|
||||
"data_format": "channels_last"
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dropout",
|
||||
"config": {
|
||||
"name": "dropout_2",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"rate": 0.25,
|
||||
"seed": null,
|
||||
"noise_shape": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Flatten",
|
||||
"config": {
|
||||
"name": "flatten",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"data_format": "channels_last"
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dense",
|
||||
"config": {
|
||||
"name": "dense",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"units": 128,
|
||||
"activation": "relu",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": {
|
||||
"seed": null
|
||||
},
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null,
|
||||
"quantization_config": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "BatchNormalization",
|
||||
"config": {
|
||||
"name": "batch_normalization_3",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"axis": -1,
|
||||
"momentum": 0.99,
|
||||
"epsilon": 0.001,
|
||||
"center": true,
|
||||
"scale": true,
|
||||
"beta_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"gamma_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_mean_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_variance_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"beta_regularizer": null,
|
||||
"gamma_regularizer": null,
|
||||
"beta_constraint": null,
|
||||
"gamma_constraint": null,
|
||||
"synchronized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dropout",
|
||||
"config": {
|
||||
"name": "dropout_3",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"rate": 0.5,
|
||||
"seed": null,
|
||||
"noise_shape": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dense",
|
||||
"config": {
|
||||
"name": "dense_1",
|
||||
"trainable": true,
|
||||
"dtype": "float32",
|
||||
"units": 10,
|
||||
"activation": "softmax",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": {
|
||||
"seed": null
|
||||
},
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null,
|
||||
"quantization_config": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"build_input_shape": [null, 128, 64, 1]
|
||||
}
|
||||
},
|
||||
"training_config": {
|
||||
"loss": "sparse_categorical_crossentropy",
|
||||
"loss_weights": null,
|
||||
"metrics": ["accuracy"],
|
||||
"weighted_metrics": null,
|
||||
"run_eagerly": false,
|
||||
"steps_per_execution": 1,
|
||||
"jit_compile": false,
|
||||
"optimizer_config": {
|
||||
"class_name": "Adam",
|
||||
"config": {
|
||||
"name": "adam",
|
||||
"learning_rate": 0.0010000000474974513,
|
||||
"weight_decay": null,
|
||||
"clipnorm": null,
|
||||
"global_clipnorm": null,
|
||||
"clipvalue": null,
|
||||
"use_ema": false,
|
||||
"ema_momentum": 0.99,
|
||||
"ema_overwrite_frequency": null,
|
||||
"loss_scale_factor": null,
|
||||
"gradient_accumulation_steps": null,
|
||||
"beta_1": 0.9,
|
||||
"beta_2": 0.999,
|
||||
"epsilon": 1e-7,
|
||||
"amsgrad": false
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"weightsManifest": [
|
||||
{
|
||||
"paths": ["group1-shard1of1.bin"],
|
||||
"weights": [
|
||||
{
|
||||
"name": "batch_normalization/gamma",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.970035195350647,
|
||||
"scale": 0.00039288062675326476,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization/beta",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.04866361422281639,
|
||||
"scale": 0.00040217862994063134,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization/moving_mean",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.000010939256753772497,
|
||||
"scale": 0.001048501559268391,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization/moving_variance",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.000532817910425365,
|
||||
"scale": 0.00016297123568388176,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_1/gamma",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.9726127982139587,
|
||||
"scale": 0.00019898110744999905,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_1/beta",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.06264814909766703,
|
||||
"scale": 0.00037290564939087515,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_1/moving_mean",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.12544548511505127,
|
||||
"scale": 0.001907470179539101,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_1/moving_variance",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.042508192360401154,
|
||||
"scale": 0.002489794206385519,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_2/gamma",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.975760817527771,
|
||||
"scale": 0.0003113854165170707,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_2/beta",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.023137448749998037,
|
||||
"scale": 0.00013072004943501716,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_2/moving_mean",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.015866611152887344,
|
||||
"scale": 0.005222073358063605,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_2/moving_variance",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.01432291604578495,
|
||||
"scale": 0.00944612571860061,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_3/gamma",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.9765098690986633,
|
||||
"scale": 0.0008689317048764697,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_3/beta",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.05253423078387391,
|
||||
"scale": 0.00032833894239921196,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_3/moving_mean",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 2.3402893845059225e-8,
|
||||
"scale": 0.124165194550534,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_3/moving_variance",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.000532600621227175,
|
||||
"scale": 0.8092722632006888,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d/kernel",
|
||||
"shape": [3, 3, 1, 32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.1684967933916578,
|
||||
"scale": 0.0012961291799358293,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d/bias",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.014791351323034248,
|
||||
"scale": 0.00019462304372413485,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d_1/kernel",
|
||||
"shape": [3, 3, 32, 64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.14185832411635155,
|
||||
"scale": 0.0010912178778180888,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d_1/bias",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.052345379924072934,
|
||||
"scale": 0.00033341006321065564,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d_2/kernel",
|
||||
"shape": [3, 3, 64, 128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.09215074052997664,
|
||||
"scale": 0.0007199276603904425,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d_2/bias",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.052666782806901374,
|
||||
"scale": 0.00035346834098591524,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dense/kernel",
|
||||
"shape": [16384, 128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.1078803108311167,
|
||||
"scale": 0.0006960020053620432,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dense/bias",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.010696043731535184,
|
||||
"scale": 0.00013539295862702763,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dense_1/kernel",
|
||||
"shape": [128, 10],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.26071277062098186,
|
||||
"scale": 0.002190863618663713,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dense_1/bias",
|
||||
"shape": [10],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.020677046455881174,
|
||||
"scale": 0.00016028718182853623,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,858 @@
|
||||
{
|
||||
"format": "layers-model",
|
||||
"generatedBy": "keras v3.13.0",
|
||||
"convertedBy": "TensorFlow.js Converter v4.22.0",
|
||||
"modelTopology": {
|
||||
"keras_version": "3.13.0",
|
||||
"backend": "tensorflow",
|
||||
"model_config": {
|
||||
"class_name": "Sequential",
|
||||
"config": {
|
||||
"name": "sequential",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"class_name": "InputLayer",
|
||||
"config": {
|
||||
"batch_shape": [null, 128, 64, 1],
|
||||
"dtype": "float32",
|
||||
"sparse": false,
|
||||
"ragged": false,
|
||||
"name": "input_layer",
|
||||
"optional": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Conv2D",
|
||||
"config": {
|
||||
"name": "conv2d",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"filters": 32,
|
||||
"kernel_size": [3, 3],
|
||||
"strides": [1, 1],
|
||||
"padding": "same",
|
||||
"data_format": "channels_last",
|
||||
"dilation_rate": [1, 1],
|
||||
"groups": 1,
|
||||
"activation": "relu",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": { "seed": null },
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"activity_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "BatchNormalization",
|
||||
"config": {
|
||||
"name": "batch_normalization",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"axis": -1,
|
||||
"momentum": 0.99,
|
||||
"epsilon": 0.001,
|
||||
"center": true,
|
||||
"scale": true,
|
||||
"beta_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"gamma_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_mean_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_variance_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"beta_regularizer": null,
|
||||
"gamma_regularizer": null,
|
||||
"beta_constraint": null,
|
||||
"gamma_constraint": null,
|
||||
"synchronized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "MaxPooling2D",
|
||||
"config": {
|
||||
"name": "max_pooling2d",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"pool_size": [2, 2],
|
||||
"padding": "valid",
|
||||
"strides": [2, 2],
|
||||
"data_format": "channels_last"
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dropout",
|
||||
"config": {
|
||||
"name": "dropout",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"rate": 0.25,
|
||||
"seed": null,
|
||||
"noise_shape": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Conv2D",
|
||||
"config": {
|
||||
"name": "conv2d_1",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"filters": 64,
|
||||
"kernel_size": [3, 3],
|
||||
"strides": [1, 1],
|
||||
"padding": "same",
|
||||
"data_format": "channels_last",
|
||||
"dilation_rate": [1, 1],
|
||||
"groups": 1,
|
||||
"activation": "relu",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": { "seed": null },
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"activity_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "BatchNormalization",
|
||||
"config": {
|
||||
"name": "batch_normalization_1",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"axis": -1,
|
||||
"momentum": 0.99,
|
||||
"epsilon": 0.001,
|
||||
"center": true,
|
||||
"scale": true,
|
||||
"beta_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"gamma_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_mean_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_variance_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"beta_regularizer": null,
|
||||
"gamma_regularizer": null,
|
||||
"beta_constraint": null,
|
||||
"gamma_constraint": null,
|
||||
"synchronized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "MaxPooling2D",
|
||||
"config": {
|
||||
"name": "max_pooling2d_1",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"pool_size": [2, 2],
|
||||
"padding": "valid",
|
||||
"strides": [2, 2],
|
||||
"data_format": "channels_last"
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dropout",
|
||||
"config": {
|
||||
"name": "dropout_1",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"rate": 0.25,
|
||||
"seed": null,
|
||||
"noise_shape": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Conv2D",
|
||||
"config": {
|
||||
"name": "conv2d_2",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"filters": 128,
|
||||
"kernel_size": [3, 3],
|
||||
"strides": [1, 1],
|
||||
"padding": "same",
|
||||
"data_format": "channels_last",
|
||||
"dilation_rate": [1, 1],
|
||||
"groups": 1,
|
||||
"activation": "relu",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": { "seed": null },
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"activity_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "BatchNormalization",
|
||||
"config": {
|
||||
"name": "batch_normalization_2",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"axis": -1,
|
||||
"momentum": 0.99,
|
||||
"epsilon": 0.001,
|
||||
"center": true,
|
||||
"scale": true,
|
||||
"beta_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"gamma_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_mean_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_variance_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"beta_regularizer": null,
|
||||
"gamma_regularizer": null,
|
||||
"beta_constraint": null,
|
||||
"gamma_constraint": null,
|
||||
"synchronized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "MaxPooling2D",
|
||||
"config": {
|
||||
"name": "max_pooling2d_2",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"pool_size": [2, 2],
|
||||
"padding": "valid",
|
||||
"strides": [2, 2],
|
||||
"data_format": "channels_last"
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dropout",
|
||||
"config": {
|
||||
"name": "dropout_2",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"rate": 0.25,
|
||||
"seed": null,
|
||||
"noise_shape": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Flatten",
|
||||
"config": {
|
||||
"name": "flatten",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"data_format": "channels_last"
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dense",
|
||||
"config": {
|
||||
"name": "dense",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"units": 128,
|
||||
"activation": "relu",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": { "seed": null },
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null,
|
||||
"quantization_config": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "BatchNormalization",
|
||||
"config": {
|
||||
"name": "batch_normalization_3",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"axis": -1,
|
||||
"momentum": 0.99,
|
||||
"epsilon": 0.001,
|
||||
"center": true,
|
||||
"scale": true,
|
||||
"beta_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"gamma_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_mean_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"moving_variance_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Ones",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"beta_regularizer": null,
|
||||
"gamma_regularizer": null,
|
||||
"beta_constraint": null,
|
||||
"gamma_constraint": null,
|
||||
"synchronized": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dropout",
|
||||
"config": {
|
||||
"name": "dropout_3",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"rate": 0.5,
|
||||
"seed": null,
|
||||
"noise_shape": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"class_name": "Dense",
|
||||
"config": {
|
||||
"name": "dense_1",
|
||||
"trainable": true,
|
||||
"dtype": {
|
||||
"module": "keras",
|
||||
"class_name": "DTypePolicy",
|
||||
"config": { "name": "float32" },
|
||||
"registered_name": null
|
||||
},
|
||||
"units": 10,
|
||||
"activation": "softmax",
|
||||
"use_bias": true,
|
||||
"kernel_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "GlorotUniform",
|
||||
"config": { "seed": null },
|
||||
"registered_name": null
|
||||
},
|
||||
"bias_initializer": {
|
||||
"module": "keras.initializers",
|
||||
"class_name": "Zeros",
|
||||
"config": {},
|
||||
"registered_name": null
|
||||
},
|
||||
"kernel_regularizer": null,
|
||||
"bias_regularizer": null,
|
||||
"kernel_constraint": null,
|
||||
"bias_constraint": null,
|
||||
"quantization_config": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"build_input_shape": [null, 128, 64, 1]
|
||||
}
|
||||
},
|
||||
"training_config": {
|
||||
"loss": "sparse_categorical_crossentropy",
|
||||
"loss_weights": null,
|
||||
"metrics": ["accuracy"],
|
||||
"weighted_metrics": null,
|
||||
"run_eagerly": false,
|
||||
"steps_per_execution": 1,
|
||||
"jit_compile": false,
|
||||
"optimizer_config": {
|
||||
"class_name": "Adam",
|
||||
"config": {
|
||||
"name": "adam",
|
||||
"learning_rate": 0.0010000000474974513,
|
||||
"weight_decay": null,
|
||||
"clipnorm": null,
|
||||
"global_clipnorm": null,
|
||||
"clipvalue": null,
|
||||
"use_ema": false,
|
||||
"ema_momentum": 0.99,
|
||||
"ema_overwrite_frequency": null,
|
||||
"loss_scale_factor": null,
|
||||
"gradient_accumulation_steps": null,
|
||||
"beta_1": 0.9,
|
||||
"beta_2": 0.999,
|
||||
"epsilon": 1e-7,
|
||||
"amsgrad": false
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"weightsManifest": [
|
||||
{
|
||||
"paths": ["group1-shard1of1.bin"],
|
||||
"weights": [
|
||||
{
|
||||
"name": "batch_normalization/gamma",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.970035195350647,
|
||||
"scale": 0.00039288062675326476,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization/beta",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.04866361422281639,
|
||||
"scale": 0.00040217862994063134,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization/moving_mean",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 1.0939256753772497e-5,
|
||||
"scale": 0.001048501559268391,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization/moving_variance",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.000532817910425365,
|
||||
"scale": 0.00016297123568388176,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_1/gamma",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.9726127982139587,
|
||||
"scale": 0.00019898110744999905,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_1/beta",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.06264814909766703,
|
||||
"scale": 0.00037290564939087515,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_1/moving_mean",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.12544548511505127,
|
||||
"scale": 0.001907470179539101,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_1/moving_variance",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.042508192360401154,
|
||||
"scale": 0.002489794206385519,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_2/gamma",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.975760817527771,
|
||||
"scale": 0.0003113854165170707,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_2/beta",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.023137448749998037,
|
||||
"scale": 0.00013072004943501716,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_2/moving_mean",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.015866611152887344,
|
||||
"scale": 0.005222073358063605,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_2/moving_variance",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.01432291604578495,
|
||||
"scale": 0.00944612571860061,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_3/gamma",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.9765098690986633,
|
||||
"scale": 0.0008689317048764697,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_3/beta",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.05253423078387391,
|
||||
"scale": 0.00032833894239921196,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_3/moving_mean",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 2.3402893845059225e-8,
|
||||
"scale": 0.124165194550534,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "batch_normalization_3/moving_variance",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": 0.000532600621227175,
|
||||
"scale": 0.8092722632006888,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d/kernel",
|
||||
"shape": [3, 3, 1, 32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.1684967933916578,
|
||||
"scale": 0.0012961291799358293,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d/bias",
|
||||
"shape": [32],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.014791351323034248,
|
||||
"scale": 0.00019462304372413485,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d_1/kernel",
|
||||
"shape": [3, 3, 32, 64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.14185832411635155,
|
||||
"scale": 0.0010912178778180888,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d_1/bias",
|
||||
"shape": [64],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.052345379924072934,
|
||||
"scale": 0.00033341006321065564,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d_2/kernel",
|
||||
"shape": [3, 3, 64, 128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.09215074052997664,
|
||||
"scale": 0.0007199276603904425,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "conv2d_2/bias",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.052666782806901374,
|
||||
"scale": 0.00035346834098591524,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dense/kernel",
|
||||
"shape": [16384, 128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.1078803108311167,
|
||||
"scale": 0.0006960020053620432,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dense/bias",
|
||||
"shape": [128],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.010696043731535184,
|
||||
"scale": 0.00013539295862702763,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dense_1/kernel",
|
||||
"shape": [128, 10],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.26071277062098186,
|
||||
"scale": 0.002190863618663713,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dense_1/bias",
|
||||
"shape": [10],
|
||||
"dtype": "float32",
|
||||
"quantization": {
|
||||
"dtype": "uint8",
|
||||
"min": -0.020677046455881174,
|
||||
"scale": 0.00016028718182853623,
|
||||
"original_dtype": "float32"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
'use client'
|
||||
|
||||
import { useRouter } from 'next/navigation'
|
||||
import { useCallback, useMemo, useState } from 'react'
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react'
|
||||
import { useToast } from '@/components/common/ToastContext'
|
||||
import { useMyAbacus } from '@/contexts/MyAbacusContext'
|
||||
import { PageWithNav } from '@/components/PageWithNav'
|
||||
import {
|
||||
ActiveSession,
|
||||
@@ -43,6 +44,7 @@ interface PracticeClientProps {
|
||||
export function PracticeClient({ studentId, player, initialSession }: PracticeClientProps) {
|
||||
const router = useRouter()
|
||||
const { showError } = useToast()
|
||||
const { setVisionFrameCallback } = useMyAbacus()
|
||||
|
||||
// Track pause state for HUD display (ActiveSession owns the modal and actual pause logic)
|
||||
const [isPaused, setIsPaused] = useState(false)
|
||||
@@ -168,7 +170,7 @@ export function PracticeClient({ studentId, player, initialSession }: PracticeCl
|
||||
// broadcastState is updated by ActiveSession via the onBroadcastStateChange callback
|
||||
// onAbacusControl receives control events from observing teacher
|
||||
// onTeacherPause/onTeacherResume receive pause/resume commands from teacher
|
||||
const { sendPartTransition, sendPartTransitionComplete } = useSessionBroadcast(
|
||||
const { sendPartTransition, sendPartTransitionComplete, sendVisionFrame } = useSessionBroadcast(
|
||||
currentPlan.id,
|
||||
studentId,
|
||||
broadcastState,
|
||||
@@ -179,6 +181,17 @@ export function PracticeClient({ studentId, player, initialSession }: PracticeCl
|
||||
}
|
||||
)
|
||||
|
||||
// Wire vision frame callback to broadcast vision frames to observers
|
||||
useEffect(() => {
|
||||
setVisionFrameCallback((frame) => {
|
||||
sendVisionFrame(frame.imageData, frame.detectedValue, frame.confidence)
|
||||
})
|
||||
|
||||
return () => {
|
||||
setVisionFrameCallback(null)
|
||||
}
|
||||
}, [setVisionFrameCallback, sendVisionFrame])
|
||||
|
||||
// Build session HUD data for PracticeSubNav
|
||||
const sessionHud: SessionHudData | undefined = currentPart
|
||||
? {
|
||||
|
||||
@@ -94,9 +94,13 @@ export default function RemoteCameraPage() {
|
||||
// Validate session on mount
|
||||
useEffect(() => {
|
||||
async function validateSession() {
|
||||
console.log('[RemoteCameraPage] Validating session:', sessionId)
|
||||
try {
|
||||
const response = await fetch(`/api/remote-camera?sessionId=${sessionId}`)
|
||||
console.log('[RemoteCameraPage] Session validation response:', response.status)
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
console.log('[RemoteCameraPage] Session valid:', data)
|
||||
setSessionStatus('connected')
|
||||
} else if (response.status === 404) {
|
||||
setSessionStatus('expired')
|
||||
@@ -107,6 +111,7 @@ export default function RemoteCameraPage() {
|
||||
setSessionError(data.error || 'Failed to validate session')
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('[RemoteCameraPage] Session validation error:', err)
|
||||
setSessionStatus('error')
|
||||
setSessionError('Network error')
|
||||
}
|
||||
|
||||
@@ -301,7 +301,9 @@ export function AbacusDisplayDropdown({
|
||||
step="1"
|
||||
value={config.physicalAbacusColumns}
|
||||
onChange={(e) =>
|
||||
updateConfig({ physicalAbacusColumns: parseInt(e.target.value, 10) })
|
||||
updateConfig({
|
||||
physicalAbacusColumns: parseInt(e.target.value, 10),
|
||||
})
|
||||
}
|
||||
className={css({
|
||||
flex: 1,
|
||||
|
||||
@@ -9,6 +9,9 @@ import { createRoot } from 'react-dom/client'
|
||||
import { HomeHeroContext } from '@/contexts/HomeHeroContext'
|
||||
import { type DockAnimationState, useMyAbacus } from '@/contexts/MyAbacusContext'
|
||||
import { useTheme } from '@/contexts/ThemeContext'
|
||||
import { DockedVisionFeed } from '@/components/vision/DockedVisionFeed'
|
||||
import { VisionIndicator } from '@/components/vision/VisionIndicator'
|
||||
import { VisionSetupModal } from '@/components/vision/VisionSetupModal'
|
||||
import { css } from '../../styled-system/css'
|
||||
|
||||
/**
|
||||
@@ -85,6 +88,8 @@ export function MyAbacus() {
|
||||
clearDockRequest,
|
||||
abacusValue: contextAbacusValue,
|
||||
setDockedValue,
|
||||
visionConfig,
|
||||
isVisionSetupComplete,
|
||||
} = useMyAbacus()
|
||||
const appConfig = useAbacusConfig()
|
||||
const pathname = usePathname()
|
||||
@@ -493,6 +498,9 @@ export function MyAbacus() {
|
||||
position: 'relative',
|
||||
})}
|
||||
>
|
||||
{/* Vision indicator - positioned at top-right, before undock button */}
|
||||
<VisionIndicator size="small" position="top-left" />
|
||||
|
||||
{/* Undock button - positioned at top-right of dock container */}
|
||||
<button
|
||||
data-action="undock-abacus"
|
||||
@@ -536,44 +544,67 @@ export function MyAbacus() {
|
||||
data-element="abacus-display"
|
||||
className={css({
|
||||
filter: 'drop-shadow(0 4px 12px rgba(251, 191, 36, 0.2))',
|
||||
width: '100%',
|
||||
height: '100%',
|
||||
})}
|
||||
>
|
||||
<AbacusReact
|
||||
key="docked"
|
||||
value={dock.value ?? abacusValue}
|
||||
defaultValue={dock.defaultValue}
|
||||
columns={dock.columns ?? 5}
|
||||
scaleFactor={effectiveScaleFactor}
|
||||
beadShape={appConfig.beadShape}
|
||||
showNumbers={dock.showNumbers ?? true}
|
||||
interactive={dock.interactive ?? true}
|
||||
animated={dock.animated ?? true}
|
||||
customStyles={structuralStyles}
|
||||
onValueChange={(newValue: number | bigint) => {
|
||||
const numValue = Number(newValue)
|
||||
// Update the appropriate state based on dock mode
|
||||
// (unless dock provides its own value prop for full control)
|
||||
if (dock.value === undefined) {
|
||||
// When docked by user, update context value; otherwise update local/hero
|
||||
if (isDockedByUser) {
|
||||
setDockedValue(numValue)
|
||||
} else {
|
||||
setAbacusValue(numValue)
|
||||
{/* Show vision feed when enabled, otherwise show digital abacus */}
|
||||
{visionConfig.enabled && isVisionSetupComplete ? (
|
||||
<DockedVisionFeed
|
||||
columnCount={dock.columns ?? 5}
|
||||
onValueDetected={(value) => {
|
||||
// Update the appropriate state based on dock mode
|
||||
if (dock.value === undefined) {
|
||||
if (isDockedByUser) {
|
||||
setDockedValue(value)
|
||||
} else {
|
||||
setAbacusValue(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also call dock's callback if provided
|
||||
if (dock.onValueChange) {
|
||||
dock.onValueChange(numValue)
|
||||
}
|
||||
}}
|
||||
enhanced3d="realistic"
|
||||
material3d={{
|
||||
heavenBeads: 'glossy',
|
||||
earthBeads: 'satin',
|
||||
lighting: 'dramatic',
|
||||
woodGrain: true,
|
||||
}}
|
||||
/>
|
||||
// Also call dock's callback if provided
|
||||
if (dock.onValueChange) {
|
||||
dock.onValueChange(value)
|
||||
}
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<AbacusReact
|
||||
key="docked"
|
||||
value={dock.value ?? abacusValue}
|
||||
defaultValue={dock.defaultValue}
|
||||
columns={dock.columns ?? 5}
|
||||
scaleFactor={effectiveScaleFactor}
|
||||
beadShape={appConfig.beadShape}
|
||||
showNumbers={dock.showNumbers ?? true}
|
||||
interactive={dock.interactive ?? true}
|
||||
animated={dock.animated ?? true}
|
||||
customStyles={structuralStyles}
|
||||
onValueChange={(newValue: number | bigint) => {
|
||||
const numValue = Number(newValue)
|
||||
// Update the appropriate state based on dock mode
|
||||
// (unless dock provides its own value prop for full control)
|
||||
if (dock.value === undefined) {
|
||||
// When docked by user, update context value; otherwise update local/hero
|
||||
if (isDockedByUser) {
|
||||
setDockedValue(numValue)
|
||||
} else {
|
||||
setAbacusValue(numValue)
|
||||
}
|
||||
}
|
||||
// Also call dock's callback if provided
|
||||
if (dock.onValueChange) {
|
||||
dock.onValueChange(numValue)
|
||||
}
|
||||
}}
|
||||
enhanced3d="realistic"
|
||||
material3d={{
|
||||
heavenBeads: 'glossy',
|
||||
earthBeads: 'satin',
|
||||
lighting: 'dramatic',
|
||||
woodGrain: true,
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>,
|
||||
dock.element
|
||||
@@ -820,6 +851,9 @@ export function MyAbacus() {
|
||||
`,
|
||||
}}
|
||||
/>
|
||||
|
||||
{/* Vision setup modal - controlled by context state */}
|
||||
<VisionSetupModal />
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import { PracticeFeedback } from '../practice/PracticeFeedback'
|
||||
import { PurposeBadge } from '../practice/PurposeBadge'
|
||||
import { SessionProgressIndicator } from '../practice/SessionProgressIndicator'
|
||||
import { VerticalProblem } from '../practice/VerticalProblem'
|
||||
import { ObserverVisionFeed } from '../vision/ObserverVisionFeed'
|
||||
|
||||
interface SessionObserverModalProps {
|
||||
/** Whether the modal is open */
|
||||
@@ -162,6 +163,7 @@ export function SessionObserverView({
|
||||
state,
|
||||
results,
|
||||
transitionState,
|
||||
visionFrame,
|
||||
isConnected,
|
||||
isObserving,
|
||||
error,
|
||||
@@ -756,15 +758,9 @@ export function SessionObserverView({
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* AbacusDock - positioned exactly like ActiveSession */}
|
||||
{/* Vision feed or AbacusDock - positioned exactly like ActiveSession */}
|
||||
{state.phase === 'problem' && (problemHeight ?? 0) > 0 && (
|
||||
<AbacusDock
|
||||
id="teacher-observer-dock"
|
||||
columns={abacusColumns}
|
||||
interactive={true}
|
||||
showNumbers={false}
|
||||
animated={true}
|
||||
onValueChange={handleTeacherAbacusChange}
|
||||
<div
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
left: '100%',
|
||||
@@ -773,7 +769,22 @@ export function SessionObserverView({
|
||||
marginLeft: '1.5rem',
|
||||
})}
|
||||
style={{ height: problemHeight ?? undefined }}
|
||||
/>
|
||||
>
|
||||
{/* Show vision feed if available, otherwise show teacher's abacus dock */}
|
||||
{visionFrame ? (
|
||||
<ObserverVisionFeed frame={visionFrame} />
|
||||
) : (
|
||||
<AbacusDock
|
||||
id="teacher-observer-dock"
|
||||
columns={abacusColumns}
|
||||
interactive={true}
|
||||
showNumbers={false}
|
||||
animated={true}
|
||||
onValueChange={handleTeacherAbacusChange}
|
||||
style={{ height: '100%' }}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
|
||||
@@ -7,8 +7,6 @@ import { useMyAbacus } from '@/contexts/MyAbacusContext'
|
||||
import { useTheme } from '@/contexts/ThemeContext'
|
||||
import {
|
||||
getCurrentProblemInfo,
|
||||
isInRetryEpoch,
|
||||
needsRetryTransition,
|
||||
type ProblemSlot,
|
||||
type SessionHealth,
|
||||
type SessionPart,
|
||||
@@ -50,8 +48,6 @@ import { PracticeHelpOverlay } from './PracticeHelpOverlay'
|
||||
import { ProblemDebugPanel } from './ProblemDebugPanel'
|
||||
import { VerticalProblem } from './VerticalProblem'
|
||||
import type { ReceivedAbacusControl } from '@/hooks/useSessionBroadcast'
|
||||
import { AbacusVisionBridge } from '../vision'
|
||||
import { Z_INDEX } from '@/constants/zIndex'
|
||||
|
||||
/**
|
||||
* Timing data for the current problem attempt
|
||||
@@ -995,9 +991,6 @@ export function ActiveSession({
|
||||
// Track previous epoch to detect epoch changes
|
||||
const prevEpochRef = useRef<number>(0)
|
||||
|
||||
// Vision mode state - for physical abacus camera detection
|
||||
const [isVisionEnabled, setIsVisionEnabled] = useState(false)
|
||||
|
||||
// Browse mode state - isBrowseMode is controlled via props
|
||||
// browseIndex can be controlled (browseIndexProp + onBrowseIndexChange) or internal
|
||||
const [internalBrowseIndex, setInternalBrowseIndex] = useState(0)
|
||||
@@ -1323,17 +1316,6 @@ export function ActiveSession({
|
||||
[setAnswer]
|
||||
)
|
||||
|
||||
// Handle value detected from vision (physical abacus camera)
|
||||
const handleVisionValueDetected = useCallback(
|
||||
(value: number) => {
|
||||
// Update the docked abacus to show the detected value
|
||||
setDockedValue(value)
|
||||
// Also set the answer input
|
||||
setAnswer(String(value))
|
||||
},
|
||||
[setDockedValue, setAnswer]
|
||||
)
|
||||
|
||||
// Handle submit
|
||||
const handleSubmit = useCallback(async () => {
|
||||
// Allow submitting from inputting, awaitingDisambiguation, or helpMode
|
||||
@@ -1996,56 +1978,22 @@ export function ActiveSession({
|
||||
{/* Abacus dock - positioned absolutely so it doesn't affect problem centering */}
|
||||
{/* Width 100% matches problem width, height matches problem height */}
|
||||
{currentPart.type === 'abacus' && !showHelpOverlay && (problemHeight ?? 0) > 0 && (
|
||||
<>
|
||||
<AbacusDock
|
||||
id="practice-abacus"
|
||||
columns={calculateAbacusColumns(attempt.problem.terms)}
|
||||
interactive={true}
|
||||
showNumbers={false}
|
||||
animated={true}
|
||||
onValueChange={handleAbacusDockValueChange}
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
left: '100%',
|
||||
top: 0,
|
||||
width: '100%',
|
||||
marginLeft: '1.5rem',
|
||||
})}
|
||||
style={{ height: problemHeight }}
|
||||
/>
|
||||
{/* Vision mode toggle button */}
|
||||
<button
|
||||
type="button"
|
||||
data-action="toggle-vision"
|
||||
data-enabled={isVisionEnabled}
|
||||
onClick={() => setIsVisionEnabled((prev) => !prev)}
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
left: '100%',
|
||||
bottom: 0,
|
||||
marginLeft: '1.5rem',
|
||||
px: 2,
|
||||
py: 1,
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: 1,
|
||||
fontSize: 'xs',
|
||||
bg: isVisionEnabled ? 'green.600' : isDark ? 'gray.700' : 'gray.200',
|
||||
color: isVisionEnabled ? 'white' : isDark ? 'gray.300' : 'gray.700',
|
||||
border: 'none',
|
||||
borderRadius: 'md',
|
||||
cursor: 'pointer',
|
||||
transition: 'all 0.2s',
|
||||
_hover: {
|
||||
bg: isVisionEnabled ? 'green.500' : isDark ? 'gray.600' : 'gray.300',
|
||||
},
|
||||
})}
|
||||
title="Use camera to detect physical abacus"
|
||||
>
|
||||
<span>📷</span>
|
||||
<span>Vision</span>
|
||||
</button>
|
||||
</>
|
||||
<AbacusDock
|
||||
id="practice-abacus"
|
||||
columns={calculateAbacusColumns(attempt.problem.terms)}
|
||||
interactive={true}
|
||||
showNumbers={false}
|
||||
animated={true}
|
||||
onValueChange={handleAbacusDockValueChange}
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
left: '100%',
|
||||
top: 0,
|
||||
width: '100%',
|
||||
marginLeft: '1.5rem',
|
||||
})}
|
||||
style={{ height: problemHeight }}
|
||||
/>
|
||||
)}
|
||||
</animated.div>
|
||||
</animated.div>
|
||||
@@ -2130,27 +2078,6 @@ export function ActiveSession({
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Abacus Vision Bridge - floating camera panel for physical abacus detection */}
|
||||
{isVisionEnabled && currentPart.type === 'abacus' && attempt && (
|
||||
<div
|
||||
data-component="vision-panel"
|
||||
className={css({
|
||||
position: 'fixed',
|
||||
top: '200px', // Below main nav (80px) + sub nav (~56px) + mini sub-nav (~60px)
|
||||
right: '1rem',
|
||||
zIndex: Z_INDEX.DROPDOWN, // Above content but below modals
|
||||
boxShadow: 'xl',
|
||||
borderRadius: 'xl',
|
||||
})}
|
||||
>
|
||||
<AbacusVisionBridge
|
||||
columnCount={abacusDisplayConfig.physicalAbacusColumns}
|
||||
onValueDetected={handleVisionValueDetected}
|
||||
onClose={() => setIsVisionEnabled(false)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Session Paused Modal - rendered here as single source of truth */}
|
||||
<SessionPausedModal
|
||||
isOpen={isPaused}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
697
apps/web/src/components/vision/DockedVisionFeed.tsx
Normal file
697
apps/web/src/components/vision/DockedVisionFeed.tsx
Normal file
@@ -0,0 +1,697 @@
|
||||
'use client'
|
||||
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import { useMyAbacus } from '@/contexts/MyAbacusContext'
|
||||
import { useRemoteCameraDesktop } from '@/hooks/useRemoteCameraDesktop'
|
||||
import {
|
||||
cleanupArucoDetector,
|
||||
detectMarkers,
|
||||
initArucoDetector,
|
||||
isArucoAvailable,
|
||||
loadAruco,
|
||||
} from '@/lib/vision/arucoDetection'
|
||||
import { useFrameStability } from '@/hooks/useFrameStability'
|
||||
import { VisionCameraFeed } from './VisionCameraFeed'
|
||||
import { css } from '../../../styled-system/css'
|
||||
import type { CalibrationGrid } from '@/types/vision'
|
||||
|
||||
/**
|
||||
* Feature flag: Enable automatic abacus value detection from video feed.
|
||||
*
|
||||
* When enabled:
|
||||
* - Runs CV-based bead detection on video frames
|
||||
* - Shows detected value overlay
|
||||
* - Calls setDockedValue and onValueDetected with detected values
|
||||
*
|
||||
* When disabled:
|
||||
* - Only shows the video feed (no detection)
|
||||
* - Hides the detection overlay
|
||||
* - Does not interfere with student's manual input
|
||||
*
|
||||
* Set to true when ready to work on improving detection accuracy.
|
||||
*/
|
||||
const ENABLE_AUTO_DETECTION = false
|
||||
|
||||
// Only import detection modules when auto-detection is enabled
|
||||
// This ensures the detection code is tree-shaken when disabled
|
||||
let analyzeColumns: typeof import('@/lib/vision/beadDetector').analyzeColumns
|
||||
let analysesToDigits: typeof import('@/lib/vision/beadDetector').analysesToDigits
|
||||
let digitsToNumber: typeof import('@/lib/vision/beadDetector').digitsToNumber
|
||||
let processVideoFrame: typeof import('@/lib/vision/frameProcessor').processVideoFrame
|
||||
let processImageFrame: typeof import('@/lib/vision/frameProcessor').processImageFrame
|
||||
|
||||
if (ENABLE_AUTO_DETECTION) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
||||
const beadDetector = require('@/lib/vision/beadDetector')
|
||||
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
||||
const frameProcessor = require('@/lib/vision/frameProcessor')
|
||||
analyzeColumns = beadDetector.analyzeColumns
|
||||
analysesToDigits = beadDetector.analysesToDigits
|
||||
digitsToNumber = beadDetector.digitsToNumber
|
||||
processVideoFrame = frameProcessor.processVideoFrame
|
||||
processImageFrame = frameProcessor.processImageFrame
|
||||
}
|
||||
|
||||
interface DockedVisionFeedProps {
|
||||
/** Called when a stable value is detected */
|
||||
onValueDetected?: (value: number) => void
|
||||
/** Number of columns to detect */
|
||||
columnCount?: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Renders the processed camera feed in place of the docked abacus
|
||||
*
|
||||
* When vision is enabled in MyAbacusContext, this component:
|
||||
* - For local camera: Opens the saved camera, applies calibration, runs detection
|
||||
* - For remote camera: Receives frames from phone, runs detection
|
||||
* - Shows the video feed with detection overlay
|
||||
*/
|
||||
export function DockedVisionFeed({ onValueDetected, columnCount = 5 }: DockedVisionFeedProps) {
|
||||
const { visionConfig, setDockedValue, setVisionEnabled, setVisionCalibration, emitVisionFrame } =
|
||||
useMyAbacus()
|
||||
|
||||
const videoRef = useRef<HTMLVideoElement>(null)
|
||||
const remoteImageRef = useRef<HTMLImageElement>(null)
|
||||
const rectifiedCanvasRef = useRef<HTMLCanvasElement | null>(null)
|
||||
const animationFrameRef = useRef<number | null>(null)
|
||||
const markerDetectionFrameRef = useRef<number | null>(null)
|
||||
const lastInferenceTimeRef = useRef<number>(0)
|
||||
const lastBroadcastTimeRef = useRef<number>(0)
|
||||
|
||||
const [videoStream, setVideoStream] = useState<MediaStream | null>(null)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
const [isLoading, setIsLoading] = useState(true)
|
||||
const [detectedValue, setDetectedValue] = useState<number | null>(null)
|
||||
const [confidence, setConfidence] = useState(0)
|
||||
const [isArucoReady, setIsArucoReady] = useState(false)
|
||||
const [markersFound, setMarkersFound] = useState(0)
|
||||
|
||||
// Stability tracking for detected values (hook must be called unconditionally)
|
||||
const stability = useFrameStability()
|
||||
|
||||
// Determine camera source from explicit activeCameraSource field
|
||||
const isLocalCamera = visionConfig.activeCameraSource === 'local'
|
||||
const isRemoteCamera = visionConfig.activeCameraSource === 'phone'
|
||||
|
||||
// Load and initialize ArUco on mount (for local camera auto-calibration)
|
||||
useEffect(() => {
|
||||
if (!isLocalCamera) return
|
||||
|
||||
let cancelled = false
|
||||
|
||||
const initAruco = async () => {
|
||||
try {
|
||||
await loadAruco()
|
||||
if (cancelled) return
|
||||
|
||||
const available = isArucoAvailable()
|
||||
if (available) {
|
||||
initArucoDetector()
|
||||
setIsArucoReady(true)
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('[DockedVisionFeed] Failed to load ArUco:', err)
|
||||
}
|
||||
}
|
||||
|
||||
initAruco()
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [isLocalCamera])
|
||||
|
||||
// Cleanup ArUco detector on unmount
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
cleanupArucoDetector()
|
||||
}
|
||||
}, [])
|
||||
|
||||
// Auto-calibration loop using ArUco markers (for local camera)
|
||||
useEffect(() => {
|
||||
if (!visionConfig.enabled || !isLocalCamera || !videoStream || !isArucoReady) {
|
||||
if (markerDetectionFrameRef.current) {
|
||||
cancelAnimationFrame(markerDetectionFrameRef.current)
|
||||
markerDetectionFrameRef.current = null
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const video = videoRef.current
|
||||
if (!video) return
|
||||
|
||||
let running = true
|
||||
|
||||
const detectLoop = () => {
|
||||
if (!running || !video || video.readyState < 2) {
|
||||
if (running) {
|
||||
markerDetectionFrameRef.current = requestAnimationFrame(detectLoop)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const result = detectMarkers(video)
|
||||
setMarkersFound(result.markersFound)
|
||||
|
||||
// Auto-update calibration when all 4 markers found
|
||||
if (result.allMarkersFound && result.quadCorners) {
|
||||
const grid: CalibrationGrid = {
|
||||
roi: {
|
||||
x: Math.min(result.quadCorners.topLeft.x, result.quadCorners.bottomLeft.x),
|
||||
y: Math.min(result.quadCorners.topLeft.y, result.quadCorners.topRight.y),
|
||||
width:
|
||||
Math.max(result.quadCorners.topRight.x, result.quadCorners.bottomRight.x) -
|
||||
Math.min(result.quadCorners.topLeft.x, result.quadCorners.bottomLeft.x),
|
||||
height:
|
||||
Math.max(result.quadCorners.bottomLeft.y, result.quadCorners.bottomRight.y) -
|
||||
Math.min(result.quadCorners.topLeft.y, result.quadCorners.topRight.y),
|
||||
},
|
||||
corners: result.quadCorners,
|
||||
columnCount,
|
||||
columnDividers: Array.from({ length: columnCount - 1 }, (_, i) => (i + 1) / columnCount),
|
||||
rotation: 0,
|
||||
}
|
||||
// Update calibration in context
|
||||
setVisionCalibration(grid)
|
||||
}
|
||||
|
||||
markerDetectionFrameRef.current = requestAnimationFrame(detectLoop)
|
||||
}
|
||||
|
||||
detectLoop()
|
||||
|
||||
return () => {
|
||||
running = false
|
||||
if (markerDetectionFrameRef.current) {
|
||||
cancelAnimationFrame(markerDetectionFrameRef.current)
|
||||
markerDetectionFrameRef.current = null
|
||||
}
|
||||
}
|
||||
}, [
|
||||
visionConfig.enabled,
|
||||
isLocalCamera,
|
||||
videoStream,
|
||||
isArucoReady,
|
||||
columnCount,
|
||||
setVisionCalibration,
|
||||
])
|
||||
|
||||
// Remote camera hook
|
||||
const {
|
||||
isPhoneConnected: remoteIsPhoneConnected,
|
||||
latestFrame: remoteLatestFrame,
|
||||
subscribe: remoteSubscribe,
|
||||
unsubscribe: remoteUnsubscribe,
|
||||
} = useRemoteCameraDesktop()
|
||||
|
||||
const INFERENCE_INTERVAL_MS = 100 // 10fps
|
||||
|
||||
// Start local camera when component mounts (only for local camera)
|
||||
useEffect(() => {
|
||||
if (!visionConfig.enabled || !isLocalCamera || !visionConfig.cameraDeviceId) {
|
||||
return
|
||||
}
|
||||
|
||||
let cancelled = false
|
||||
setIsLoading(true)
|
||||
setError(null)
|
||||
|
||||
const startCamera = async () => {
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
video: {
|
||||
deviceId: { exact: visionConfig.cameraDeviceId! },
|
||||
width: { ideal: 1280 },
|
||||
height: { ideal: 720 },
|
||||
},
|
||||
})
|
||||
|
||||
if (cancelled) {
|
||||
stream.getTracks().forEach((track) => track.stop())
|
||||
return
|
||||
}
|
||||
|
||||
setVideoStream(stream)
|
||||
setIsLoading(false)
|
||||
} catch (err) {
|
||||
if (cancelled) return
|
||||
console.error('[DockedVisionFeed] Failed to start camera:', err)
|
||||
setError('Failed to access camera')
|
||||
setIsLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
startCamera()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [visionConfig.enabled, isLocalCamera, visionConfig.cameraDeviceId])
|
||||
|
||||
// Stop camera when stream changes or component unmounts
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
if (videoStream) {
|
||||
videoStream.getTracks().forEach((track) => track.stop())
|
||||
}
|
||||
}
|
||||
}, [videoStream])
|
||||
|
||||
// Attach stream to video element
|
||||
useEffect(() => {
|
||||
if (videoRef.current && videoStream) {
|
||||
videoRef.current.srcObject = videoStream
|
||||
}
|
||||
}, [videoStream])
|
||||
|
||||
// Subscribe to remote camera session
|
||||
useEffect(() => {
|
||||
if (!visionConfig.enabled || !isRemoteCamera || !visionConfig.remoteCameraSessionId) {
|
||||
return
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
remoteSubscribe(visionConfig.remoteCameraSessionId)
|
||||
|
||||
return () => {
|
||||
remoteUnsubscribe()
|
||||
}
|
||||
}, [
|
||||
visionConfig.enabled,
|
||||
isRemoteCamera,
|
||||
visionConfig.remoteCameraSessionId,
|
||||
remoteSubscribe,
|
||||
remoteUnsubscribe,
|
||||
])
|
||||
|
||||
// Update loading state when remote camera connects
|
||||
useEffect(() => {
|
||||
if (isRemoteCamera && remoteIsPhoneConnected) {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}, [isRemoteCamera, remoteIsPhoneConnected])
|
||||
|
||||
// Process local camera frames for detection (only when enabled)
|
||||
const processLocalFrame = useCallback(() => {
|
||||
// Skip detection when feature is disabled
|
||||
if (!ENABLE_AUTO_DETECTION) return
|
||||
|
||||
const now = performance.now()
|
||||
if (now - lastInferenceTimeRef.current < INFERENCE_INTERVAL_MS) {
|
||||
return
|
||||
}
|
||||
lastInferenceTimeRef.current = now
|
||||
|
||||
const video = videoRef.current
|
||||
if (!video || video.readyState < 2) return
|
||||
if (!visionConfig.calibration) return
|
||||
|
||||
// Process video frame into column strips
|
||||
const columnImages = processVideoFrame(video, visionConfig.calibration)
|
||||
if (columnImages.length === 0) return
|
||||
|
||||
// Use CV-based bead detection
|
||||
const analyses = analyzeColumns(columnImages)
|
||||
const { digits, minConfidence } = analysesToDigits(analyses)
|
||||
|
||||
// Convert to number
|
||||
const value = digitsToNumber(digits)
|
||||
|
||||
// Push to stability buffer
|
||||
stability.pushFrame(value, minConfidence)
|
||||
}, [visionConfig.calibration, stability])
|
||||
|
||||
// Process remote camera frames for detection (only when enabled)
|
||||
useEffect(() => {
|
||||
// Skip detection when feature is disabled
|
||||
if (!ENABLE_AUTO_DETECTION) return
|
||||
|
||||
if (!isRemoteCamera || !remoteIsPhoneConnected || !remoteLatestFrame) {
|
||||
return
|
||||
}
|
||||
|
||||
const now = performance.now()
|
||||
if (now - lastInferenceTimeRef.current < INFERENCE_INTERVAL_MS) {
|
||||
return
|
||||
}
|
||||
lastInferenceTimeRef.current = now
|
||||
|
||||
const image = remoteImageRef.current
|
||||
if (!image || !image.complete || image.naturalWidth === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// Phone sends pre-cropped frames in auto mode, so no calibration needed
|
||||
const columnImages = processImageFrame(image, null, columnCount)
|
||||
if (columnImages.length === 0) return
|
||||
|
||||
// Use CV-based bead detection
|
||||
const analyses = analyzeColumns(columnImages)
|
||||
const { digits, minConfidence } = analysesToDigits(analyses)
|
||||
|
||||
// Convert to number
|
||||
const value = digitsToNumber(digits)
|
||||
|
||||
// Push to stability buffer
|
||||
stability.pushFrame(value, minConfidence)
|
||||
}, [isRemoteCamera, remoteIsPhoneConnected, remoteLatestFrame, columnCount, stability])
|
||||
|
||||
// Local camera detection loop (only when enabled)
|
||||
useEffect(() => {
|
||||
// Skip detection loop when feature is disabled
|
||||
if (!ENABLE_AUTO_DETECTION) return
|
||||
|
||||
if (!visionConfig.enabled || !isLocalCamera || !videoStream || !visionConfig.calibration) {
|
||||
return
|
||||
}
|
||||
|
||||
let running = true
|
||||
|
||||
const loop = () => {
|
||||
if (!running) return
|
||||
|
||||
processLocalFrame()
|
||||
animationFrameRef.current = requestAnimationFrame(loop)
|
||||
}
|
||||
|
||||
loop()
|
||||
|
||||
return () => {
|
||||
running = false
|
||||
if (animationFrameRef.current) {
|
||||
cancelAnimationFrame(animationFrameRef.current)
|
||||
animationFrameRef.current = null
|
||||
}
|
||||
}
|
||||
}, [
|
||||
visionConfig.enabled,
|
||||
isLocalCamera,
|
||||
videoStream,
|
||||
visionConfig.calibration,
|
||||
processLocalFrame,
|
||||
])
|
||||
|
||||
// Handle stable value changes (only when auto-detection is enabled)
|
||||
useEffect(() => {
|
||||
// Skip value updates when feature is disabled
|
||||
if (!ENABLE_AUTO_DETECTION) return
|
||||
|
||||
if (stability.stableValue !== null && stability.stableValue !== detectedValue) {
|
||||
setDetectedValue(stability.stableValue)
|
||||
setConfidence(stability.currentConfidence)
|
||||
setDockedValue(stability.stableValue)
|
||||
onValueDetected?.(stability.stableValue)
|
||||
}
|
||||
}, [
|
||||
stability.stableValue,
|
||||
stability.currentConfidence,
|
||||
detectedValue,
|
||||
setDockedValue,
|
||||
onValueDetected,
|
||||
])
|
||||
|
||||
// Broadcast vision frames to observers (5fps to save bandwidth)
|
||||
const BROADCAST_INTERVAL_MS = 200
|
||||
useEffect(() => {
|
||||
if (!visionConfig.enabled) return
|
||||
|
||||
let running = true
|
||||
|
||||
const broadcastLoop = () => {
|
||||
if (!running) return
|
||||
|
||||
const now = performance.now()
|
||||
if (now - lastBroadcastTimeRef.current >= BROADCAST_INTERVAL_MS) {
|
||||
lastBroadcastTimeRef.current = now
|
||||
|
||||
// Capture from rectified canvas (local camera) or remote image
|
||||
let imageData: string | null = null
|
||||
|
||||
if (isLocalCamera && rectifiedCanvasRef.current) {
|
||||
const canvas = rectifiedCanvasRef.current
|
||||
if (canvas.width > 0 && canvas.height > 0) {
|
||||
// Convert canvas to JPEG (quality 0.7 for bandwidth)
|
||||
imageData = canvas.toDataURL('image/jpeg', 0.7).replace('data:image/jpeg;base64,', '')
|
||||
}
|
||||
} else if (isRemoteCamera && remoteLatestFrame) {
|
||||
// Remote camera already sends base64 JPEG
|
||||
imageData = remoteLatestFrame.imageData
|
||||
}
|
||||
|
||||
if (imageData) {
|
||||
emitVisionFrame({
|
||||
imageData,
|
||||
detectedValue,
|
||||
confidence,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
requestAnimationFrame(broadcastLoop)
|
||||
}
|
||||
|
||||
broadcastLoop()
|
||||
|
||||
return () => {
|
||||
running = false
|
||||
}
|
||||
}, [
|
||||
visionConfig.enabled,
|
||||
isLocalCamera,
|
||||
isRemoteCamera,
|
||||
remoteLatestFrame,
|
||||
detectedValue,
|
||||
confidence,
|
||||
emitVisionFrame,
|
||||
])
|
||||
|
||||
const handleDisableVision = (e: React.MouseEvent) => {
|
||||
e.stopPropagation()
|
||||
setVisionEnabled(false)
|
||||
if (videoStream) {
|
||||
videoStream.getTracks().forEach((track) => track.stop())
|
||||
}
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<div
|
||||
data-component="docked-vision-feed"
|
||||
data-status="error"
|
||||
className={css({
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
gap: 2,
|
||||
p: 4,
|
||||
bg: 'red.900/30',
|
||||
borderRadius: 'lg',
|
||||
color: 'red.400',
|
||||
textAlign: 'center',
|
||||
})}
|
||||
>
|
||||
<span className={css({ fontSize: 'xl' })}>⚠️</span>
|
||||
<span className={css({ fontSize: 'sm' })}>{error}</span>
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleDisableVision}
|
||||
className={css({
|
||||
mt: 2,
|
||||
px: 3,
|
||||
py: 1,
|
||||
bg: 'gray.700',
|
||||
color: 'white',
|
||||
borderRadius: 'md',
|
||||
fontSize: 'xs',
|
||||
border: 'none',
|
||||
cursor: 'pointer',
|
||||
})}
|
||||
>
|
||||
Disable Vision
|
||||
</button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div
|
||||
data-component="docked-vision-feed"
|
||||
data-status="loading"
|
||||
className={css({
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
gap: 2,
|
||||
p: 4,
|
||||
bg: 'gray.800/50',
|
||||
borderRadius: 'lg',
|
||||
color: 'gray.400',
|
||||
})}
|
||||
>
|
||||
<span className={css({ fontSize: 'xl' })}>📷</span>
|
||||
<span className={css({ fontSize: 'sm' })}>
|
||||
{isRemoteCamera ? 'Connecting to phone...' : 'Starting camera...'}
|
||||
</span>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
data-component="docked-vision-feed"
|
||||
data-status="active"
|
||||
data-source={isRemoteCamera ? 'remote' : 'local'}
|
||||
className={css({
|
||||
position: 'relative',
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
alignItems: 'center',
|
||||
overflow: 'hidden',
|
||||
borderRadius: 'lg',
|
||||
bg: 'black',
|
||||
width: '100%',
|
||||
height: '100%',
|
||||
})}
|
||||
>
|
||||
{/* Rectified video feed - local camera */}
|
||||
{isLocalCamera && (
|
||||
<VisionCameraFeed
|
||||
videoStream={videoStream}
|
||||
calibration={visionConfig.calibration}
|
||||
showRectifiedView={true}
|
||||
videoRef={(el) => {
|
||||
videoRef.current = el
|
||||
}}
|
||||
rectifiedCanvasRef={(el) => {
|
||||
rectifiedCanvasRef.current = el
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Remote camera feed */}
|
||||
{isRemoteCamera && remoteLatestFrame && (
|
||||
<img
|
||||
ref={remoteImageRef}
|
||||
src={`data:image/jpeg;base64,${remoteLatestFrame.imageData}`}
|
||||
alt="Phone camera view"
|
||||
className={css({
|
||||
width: '100%',
|
||||
height: 'auto',
|
||||
objectFit: 'contain',
|
||||
})}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Waiting for remote frames */}
|
||||
{isRemoteCamera && !remoteLatestFrame && remoteIsPhoneConnected && (
|
||||
<div
|
||||
className={css({
|
||||
width: '100%',
|
||||
aspectRatio: '2/1',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
color: 'gray.400',
|
||||
fontSize: 'sm',
|
||||
})}
|
||||
>
|
||||
Waiting for frames...
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Detection overlay - only shown when auto-detection is enabled */}
|
||||
{ENABLE_AUTO_DETECTION && (
|
||||
<div
|
||||
data-element="detection-overlay"
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
display: 'flex',
|
||||
justifyContent: 'space-between',
|
||||
alignItems: 'center',
|
||||
p: 2,
|
||||
bg: 'rgba(0, 0, 0, 0.7)',
|
||||
backdropFilter: 'blur(4px)',
|
||||
})}
|
||||
>
|
||||
{/* Detected value */}
|
||||
<div className={css({ display: 'flex', alignItems: 'center', gap: 2 })}>
|
||||
<span
|
||||
className={css({
|
||||
fontSize: 'lg',
|
||||
fontWeight: 'bold',
|
||||
color: 'white',
|
||||
fontFamily: 'mono',
|
||||
})}
|
||||
>
|
||||
{detectedValue !== null ? detectedValue : '---'}
|
||||
</span>
|
||||
{detectedValue !== null && (
|
||||
<span className={css({ fontSize: 'xs', color: 'gray.400' })}>
|
||||
{Math.round(confidence * 100)}%
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Stability indicator */}
|
||||
<div className={css({ display: 'flex', alignItems: 'center', gap: 1 })}>
|
||||
{stability.consecutiveFrames > 0 && (
|
||||
<div className={css({ display: 'flex', gap: 0.5 })}>
|
||||
{Array.from({ length: 3 }).map((_, i) => (
|
||||
<div
|
||||
key={i}
|
||||
className={css({
|
||||
w: '6px',
|
||||
h: '6px',
|
||||
borderRadius: 'full',
|
||||
bg: i < stability.consecutiveFrames ? 'green.500' : 'gray.600',
|
||||
})}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Disable button */}
|
||||
<button
|
||||
type="button"
|
||||
data-action="disable-vision"
|
||||
onClick={handleDisableVision}
|
||||
title="Disable vision mode"
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
top: '4px',
|
||||
right: '4px',
|
||||
w: '24px',
|
||||
h: '24px',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
bg: 'rgba(0, 0, 0, 0.5)',
|
||||
backdropFilter: 'blur(4px)',
|
||||
border: '1px solid rgba(255, 255, 255, 0.3)',
|
||||
borderRadius: 'md',
|
||||
color: 'white',
|
||||
fontSize: 'xs',
|
||||
cursor: 'pointer',
|
||||
zIndex: 10,
|
||||
opacity: 0.7,
|
||||
_hover: {
|
||||
bg: 'rgba(239, 68, 68, 0.8)',
|
||||
opacity: 1,
|
||||
},
|
||||
})}
|
||||
>
|
||||
✕
|
||||
</button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
128
apps/web/src/components/vision/ObserverVisionFeed.tsx
Normal file
128
apps/web/src/components/vision/ObserverVisionFeed.tsx
Normal file
@@ -0,0 +1,128 @@
|
||||
'use client'
|
||||
|
||||
import type { ObservedVisionFrame } from '@/hooks/useSessionObserver'
|
||||
import { css } from '../../../styled-system/css'
|
||||
|
||||
interface ObserverVisionFeedProps {
|
||||
/** The latest vision frame from the observed student */
|
||||
frame: ObservedVisionFrame
|
||||
}
|
||||
|
||||
/**
|
||||
* Displays the vision feed received from an observed student's session.
|
||||
*
|
||||
* Used in the SessionObserver modal when the student has abacus vision enabled.
|
||||
* Shows the processed camera feed with detection status overlay.
|
||||
*/
|
||||
export function ObserverVisionFeed({ frame }: ObserverVisionFeedProps) {
|
||||
// Calculate age of frame for staleness indicator
|
||||
const frameAge = Date.now() - frame.receivedAt
|
||||
const isStale = frameAge > 1000 // More than 1 second old
|
||||
|
||||
return (
|
||||
<div
|
||||
data-component="observer-vision-feed"
|
||||
data-stale={isStale}
|
||||
className={css({
|
||||
position: 'relative',
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
borderRadius: 'lg',
|
||||
overflow: 'hidden',
|
||||
bg: 'black',
|
||||
})}
|
||||
>
|
||||
{/* Video frame */}
|
||||
<img
|
||||
src={`data:image/jpeg;base64,${frame.imageData}`}
|
||||
alt="Student's abacus vision feed"
|
||||
className={css({
|
||||
width: '100%',
|
||||
height: 'auto',
|
||||
display: 'block',
|
||||
opacity: isStale ? 0.5 : 1,
|
||||
transition: 'opacity 0.3s',
|
||||
})}
|
||||
/>
|
||||
|
||||
{/* Detection overlay */}
|
||||
<div
|
||||
data-element="detection-overlay"
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
display: 'flex',
|
||||
justifyContent: 'space-between',
|
||||
alignItems: 'center',
|
||||
p: 2,
|
||||
bg: 'rgba(0, 0, 0, 0.7)',
|
||||
backdropFilter: 'blur(4px)',
|
||||
})}
|
||||
>
|
||||
{/* Detected value */}
|
||||
<div className={css({ display: 'flex', alignItems: 'center', gap: 2 })}>
|
||||
<span
|
||||
className={css({
|
||||
fontSize: 'lg',
|
||||
fontWeight: 'bold',
|
||||
color: 'white',
|
||||
fontFamily: 'mono',
|
||||
})}
|
||||
>
|
||||
{frame.detectedValue !== null ? frame.detectedValue : '---'}
|
||||
</span>
|
||||
{frame.detectedValue !== null && (
|
||||
<span className={css({ fontSize: 'xs', color: 'gray.400' })}>
|
||||
{Math.round(frame.confidence * 100)}%
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Live indicator */}
|
||||
<div className={css({ display: 'flex', alignItems: 'center', gap: 1 })}>
|
||||
<div
|
||||
className={css({
|
||||
w: '8px',
|
||||
h: '8px',
|
||||
borderRadius: 'full',
|
||||
bg: isStale ? 'gray.500' : 'green.500',
|
||||
animation: isStale ? 'none' : 'pulse 2s infinite',
|
||||
})}
|
||||
/>
|
||||
<span
|
||||
className={css({
|
||||
fontSize: 'xs',
|
||||
color: isStale ? 'gray.500' : 'green.400',
|
||||
})}
|
||||
>
|
||||
{isStale ? 'Stale' : 'Live'}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Vision mode badge */}
|
||||
<div
|
||||
data-element="vision-badge"
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
top: '4px',
|
||||
left: '4px',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: 1,
|
||||
px: 2,
|
||||
py: 1,
|
||||
bg: 'rgba(0, 0, 0, 0.6)',
|
||||
borderRadius: 'md',
|
||||
fontSize: 'xs',
|
||||
color: 'cyan.400',
|
||||
})}
|
||||
>
|
||||
<span>📷</span>
|
||||
<span>Vision</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
'use client'
|
||||
|
||||
import { useEffect, useState } from 'react'
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { AbacusQRCode } from '@/components/common/AbacusQRCode'
|
||||
import { useRemoteCameraSession } from '@/hooks/useRemoteCameraSession'
|
||||
import { css } from '../../../styled-system/css'
|
||||
@@ -32,6 +32,10 @@ export function RemoteCameraQRCode({
|
||||
const { session, isCreating, error, createSession, setExistingSession, getPhoneUrl } =
|
||||
useRemoteCameraSession()
|
||||
|
||||
// Ref to track if we've already initiated session creation
|
||||
// This prevents React 18 Strict Mode from creating duplicate sessions
|
||||
const creationInitiatedRef = useRef(false)
|
||||
|
||||
// If we have an existing session ID, use it instead of creating a new one
|
||||
useEffect(() => {
|
||||
if (existingSessionId && !session) {
|
||||
@@ -40,8 +44,10 @@ export function RemoteCameraQRCode({
|
||||
}, [existingSessionId, session, setExistingSession])
|
||||
|
||||
// Create session on mount only if no existing session
|
||||
// Use ref to prevent duplicate creation in React 18 Strict Mode
|
||||
useEffect(() => {
|
||||
if (!session && !isCreating && !existingSessionId) {
|
||||
if (!session && !isCreating && !existingSessionId && !creationInitiatedRef.current) {
|
||||
creationInitiatedRef.current = true
|
||||
createSession().then((newSession) => {
|
||||
if (newSession && onSessionCreated) {
|
||||
onSessionCreated(newSession.sessionId)
|
||||
|
||||
@@ -58,8 +58,14 @@ export function RemoteCameraReceiver({
|
||||
const [calibration, setCalibration] = useState<CalibrationGrid | null>(null)
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
const imageRef = useRef<HTMLImageElement>(null)
|
||||
const [containerDimensions, setContainerDimensions] = useState({ width: 0, height: 0 })
|
||||
const [imageDimensions, setImageDimensions] = useState({ width: 0, height: 0 })
|
||||
const [containerDimensions, setContainerDimensions] = useState({
|
||||
width: 0,
|
||||
height: 0,
|
||||
})
|
||||
const [imageDimensions, setImageDimensions] = useState({
|
||||
width: 0,
|
||||
height: 0,
|
||||
})
|
||||
|
||||
// Subscribe when sessionId changes
|
||||
useEffect(() => {
|
||||
@@ -100,7 +106,10 @@ export function RemoteCameraReceiver({
|
||||
// Track image dimensions when it loads
|
||||
const handleImageLoad = useCallback((e: React.SyntheticEvent<HTMLImageElement>) => {
|
||||
const img = e.currentTarget
|
||||
setImageDimensions({ width: img.naturalWidth, height: img.naturalHeight })
|
||||
setImageDimensions({
|
||||
width: img.naturalWidth,
|
||||
height: img.naturalHeight,
|
||||
})
|
||||
}, [])
|
||||
|
||||
// Create image src from base64 data
|
||||
|
||||
@@ -19,6 +19,8 @@ export interface VisionCameraFeedProps {
|
||||
showRectifiedView?: boolean
|
||||
/** Video element ref callback for external access */
|
||||
videoRef?: (el: HTMLVideoElement | null) => void
|
||||
/** Rectified canvas ref callback for external access (only when showRectifiedView=true) */
|
||||
rectifiedCanvasRef?: (el: HTMLCanvasElement | null) => void
|
||||
/** Called when video metadata is loaded (provides dimensions) */
|
||||
onVideoReady?: (width: number, height: number) => void
|
||||
/** Children rendered over the video (e.g., CalibrationOverlay) */
|
||||
@@ -55,6 +57,7 @@ export function VisionCameraFeed({
|
||||
showCalibrationGrid = false,
|
||||
showRectifiedView = false,
|
||||
videoRef: externalVideoRef,
|
||||
rectifiedCanvasRef: externalCanvasRef,
|
||||
onVideoReady,
|
||||
children,
|
||||
}: VisionCameraFeedProps): ReactNode {
|
||||
@@ -82,6 +85,13 @@ export function VisionCameraFeed({
|
||||
}
|
||||
}, [externalVideoRef])
|
||||
|
||||
// Set canvas ref for external access (when rectified view is active)
|
||||
useEffect(() => {
|
||||
if (externalCanvasRef && showRectifiedView) {
|
||||
externalCanvasRef(rectifiedCanvasRef.current)
|
||||
}
|
||||
}, [externalCanvasRef, showRectifiedView])
|
||||
|
||||
// Attach stream to video element
|
||||
useEffect(() => {
|
||||
const video = internalVideoRef.current
|
||||
|
||||
122
apps/web/src/components/vision/VisionIndicator.tsx
Normal file
122
apps/web/src/components/vision/VisionIndicator.tsx
Normal file
@@ -0,0 +1,122 @@
|
||||
'use client'
|
||||
|
||||
import { useMyAbacus } from '@/contexts/MyAbacusContext'
|
||||
import { css } from '../../../styled-system/css'
|
||||
|
||||
interface VisionIndicatorProps {
|
||||
/** Size variant */
|
||||
size?: 'small' | 'medium'
|
||||
/** Position for absolute placement */
|
||||
position?: 'top-left' | 'bottom-right'
|
||||
}
|
||||
|
||||
/**
|
||||
* Camera icon indicator for abacus vision mode
|
||||
*
|
||||
* Shows:
|
||||
* - 🔴 Red dot = not configured (no camera or no calibration)
|
||||
* - 🟢 Green dot = configured and enabled
|
||||
* - ⚪ Gray = configured but disabled
|
||||
*
|
||||
* Click behavior:
|
||||
* - If not configured: opens setup modal
|
||||
* - If configured: toggles vision on/off
|
||||
*/
|
||||
export function VisionIndicator({
|
||||
size = 'medium',
|
||||
position = 'bottom-right',
|
||||
}: VisionIndicatorProps) {
|
||||
const { visionConfig, isVisionSetupComplete, openVisionSetup } = useMyAbacus()
|
||||
|
||||
const handleClick = (e: React.MouseEvent) => {
|
||||
e.stopPropagation()
|
||||
// Always open setup modal on click for now
|
||||
// This gives users easy access to vision settings
|
||||
openVisionSetup()
|
||||
}
|
||||
|
||||
const handleContextMenu = (e: React.MouseEvent) => {
|
||||
e.preventDefault()
|
||||
e.stopPropagation()
|
||||
// Right-click always opens setup
|
||||
openVisionSetup()
|
||||
}
|
||||
|
||||
// Determine status indicator color
|
||||
const statusColor = !isVisionSetupComplete
|
||||
? 'red.500' // Not configured
|
||||
: visionConfig.enabled
|
||||
? 'green.500' // Enabled
|
||||
: 'gray.400' // Configured but disabled
|
||||
|
||||
const statusLabel = !isVisionSetupComplete
|
||||
? 'Vision not configured'
|
||||
: visionConfig.enabled
|
||||
? 'Vision enabled'
|
||||
: 'Vision disabled'
|
||||
|
||||
const sizeStyles =
|
||||
size === 'small'
|
||||
? { w: '20px', h: '20px', fontSize: '10px' }
|
||||
: { w: '28px', h: '28px', fontSize: '14px' }
|
||||
|
||||
const positionStyles =
|
||||
position === 'top-left'
|
||||
? { top: 0, left: 0, margin: '4px' }
|
||||
: { bottom: 0, right: 0, margin: '4px' }
|
||||
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
data-vision-status={
|
||||
!isVisionSetupComplete ? 'not-configured' : visionConfig.enabled ? 'enabled' : 'disabled'
|
||||
}
|
||||
onClick={handleClick}
|
||||
onContextMenu={handleContextMenu}
|
||||
title={`${statusLabel} (right-click for settings)`}
|
||||
style={{
|
||||
position: 'absolute',
|
||||
...positionStyles,
|
||||
}}
|
||||
className={css({
|
||||
...sizeStyles,
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
bg: 'rgba(0, 0, 0, 0.5)',
|
||||
backdropFilter: 'blur(4px)',
|
||||
border: '1px solid rgba(255, 255, 255, 0.3)',
|
||||
borderRadius: 'md',
|
||||
color: 'white',
|
||||
cursor: 'pointer',
|
||||
transition: 'all 0.2s',
|
||||
zIndex: 10,
|
||||
opacity: 0.8,
|
||||
_hover: {
|
||||
bg: 'rgba(0, 0, 0, 0.7)',
|
||||
opacity: 1,
|
||||
transform: 'scale(1.1)',
|
||||
},
|
||||
})}
|
||||
>
|
||||
{/* Camera icon */}
|
||||
<span style={{ position: 'relative' }}>
|
||||
📷{/* Status dot */}
|
||||
<span
|
||||
data-element="vision-status-dot"
|
||||
className={css({
|
||||
position: 'absolute',
|
||||
top: '-2px',
|
||||
right: '-4px',
|
||||
w: '8px',
|
||||
h: '8px',
|
||||
borderRadius: 'full',
|
||||
bg: statusColor,
|
||||
border: '1px solid white',
|
||||
boxShadow: '0 1px 2px rgba(0,0,0,0.3)',
|
||||
})}
|
||||
/>
|
||||
</span>
|
||||
</button>
|
||||
)
|
||||
}
|
||||
99
apps/web/src/components/vision/VisionSetupModal.tsx
Normal file
99
apps/web/src/components/vision/VisionSetupModal.tsx
Normal file
@@ -0,0 +1,99 @@
|
||||
'use client'
|
||||
|
||||
import { useMyAbacus } from '@/contexts/MyAbacusContext'
|
||||
import { css } from '../../../styled-system/css'
|
||||
import { AbacusVisionBridge } from './AbacusVisionBridge'
|
||||
|
||||
/**
|
||||
* Modal for configuring abacus vision settings
|
||||
*
|
||||
* Renders AbacusVisionBridge directly in a draggable modal.
|
||||
* The bridge component handles all camera/calibration configuration.
|
||||
*/
|
||||
export function VisionSetupModal() {
|
||||
const {
|
||||
isVisionSetupOpen,
|
||||
closeVisionSetup,
|
||||
visionConfig,
|
||||
isVisionSetupComplete,
|
||||
setVisionEnabled,
|
||||
setVisionCamera,
|
||||
setVisionCalibration,
|
||||
setVisionRemoteSession,
|
||||
setVisionCameraSource,
|
||||
dock,
|
||||
} = useMyAbacus()
|
||||
|
||||
const handleClearSettings = () => {
|
||||
setVisionCamera(null)
|
||||
setVisionCalibration(null)
|
||||
setVisionRemoteSession(null)
|
||||
setVisionCameraSource(null)
|
||||
setVisionEnabled(false)
|
||||
}
|
||||
|
||||
const handleToggleVision = () => {
|
||||
setVisionEnabled(!visionConfig.enabled)
|
||||
}
|
||||
|
||||
if (!isVisionSetupOpen) return null
|
||||
|
||||
return (
|
||||
<div
|
||||
data-component="vision-setup-modal"
|
||||
className={css({
|
||||
position: 'fixed',
|
||||
inset: 0,
|
||||
bg: 'rgba(0, 0, 0, 0.7)',
|
||||
backdropFilter: 'blur(4px)',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
zIndex: 10000,
|
||||
})}
|
||||
onClick={closeVisionSetup}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === 'Escape') {
|
||||
closeVisionSetup()
|
||||
}
|
||||
}}
|
||||
>
|
||||
{/* AbacusVisionBridge is a motion.div with drag - stopPropagation prevents backdrop close */}
|
||||
<div onClick={(e) => e.stopPropagation()}>
|
||||
<AbacusVisionBridge
|
||||
columnCount={dock?.columns ?? 5}
|
||||
onValueDetected={() => {
|
||||
// Value detected - configuration is working
|
||||
}}
|
||||
onClose={closeVisionSetup}
|
||||
onConfigurationChange={(config) => {
|
||||
// Save configuration to context as it changes
|
||||
if (config.cameraDeviceId !== undefined) {
|
||||
setVisionCamera(config.cameraDeviceId)
|
||||
}
|
||||
if (config.calibration !== undefined) {
|
||||
setVisionCalibration(config.calibration)
|
||||
}
|
||||
if (config.remoteCameraSessionId !== undefined) {
|
||||
setVisionRemoteSession(config.remoteCameraSessionId)
|
||||
}
|
||||
if (config.activeCameraSource !== undefined) {
|
||||
setVisionCameraSource(config.activeCameraSource)
|
||||
}
|
||||
}}
|
||||
// Use saved activeCameraSource if available, otherwise infer from configs
|
||||
initialCameraSource={
|
||||
visionConfig.activeCameraSource ??
|
||||
(visionConfig.remoteCameraSessionId && !visionConfig.cameraDeviceId ? 'phone' : 'local')
|
||||
}
|
||||
// Show enable/disable and clear buttons
|
||||
showVisionControls={true}
|
||||
isVisionEnabled={visionConfig.enabled}
|
||||
isVisionSetupComplete={isVisionSetupComplete}
|
||||
onToggleVision={handleToggleVision}
|
||||
onClearSettings={handleClearSettings}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,191 @@
|
||||
/**
|
||||
* Unit tests for ObserverVisionFeed component
|
||||
*
|
||||
* Note: Canvas.Image mock is provided in src/test/setup.ts to prevent
|
||||
* jsdom errors with data URI images. Actual image rendering is verified
|
||||
* through integration/e2e tests.
|
||||
*/
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import type { ObservedVisionFrame } from '@/hooks/useSessionObserver'
|
||||
import { ObserverVisionFeed } from '../ObserverVisionFeed'
|
||||
|
||||
describe('ObserverVisionFeed', () => {
|
||||
const createMockFrame = (overrides?: Partial<ObservedVisionFrame>): ObservedVisionFrame => ({
|
||||
imageData: 'base64ImageData==',
|
||||
detectedValue: 123,
|
||||
confidence: 0.95,
|
||||
receivedAt: Date.now(),
|
||||
...overrides,
|
||||
})
|
||||
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers()
|
||||
})
|
||||
|
||||
describe('rendering', () => {
|
||||
it('renders the vision feed container', () => {
|
||||
const frame = createMockFrame()
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByRole('img')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('displays the image with correct src', () => {
|
||||
const frame = createMockFrame({ imageData: 'testImageData123' })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
const img = screen.getByRole('img') as HTMLImageElement
|
||||
// Check the src property (not attribute) because our test setup
|
||||
// intercepts data:image/ src attributes to prevent jsdom canvas errors
|
||||
expect(img.src).toBe('data:image/jpeg;base64,testImageData123')
|
||||
})
|
||||
|
||||
it('has appropriate alt text for accessibility', () => {
|
||||
const frame = createMockFrame()
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
const img = screen.getByRole('img')
|
||||
expect(img).toHaveAttribute('alt', "Student's abacus vision feed")
|
||||
})
|
||||
})
|
||||
|
||||
describe('detected value display', () => {
|
||||
it('displays the detected value', () => {
|
||||
const frame = createMockFrame({ detectedValue: 456, confidence: 0.87 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('456')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('displays confidence percentage', () => {
|
||||
const frame = createMockFrame({ detectedValue: 123, confidence: 0.87 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('87%')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('displays dashes when detectedValue is null', () => {
|
||||
const frame = createMockFrame({ detectedValue: null, confidence: 0 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('---')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('hides confidence when value is null', () => {
|
||||
const frame = createMockFrame({ detectedValue: null, confidence: 0.95 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.queryByText('95%')).not.toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('handles zero as a valid detected value', () => {
|
||||
const frame = createMockFrame({ detectedValue: 0, confidence: 0.99 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('0')).toBeInTheDocument()
|
||||
expect(screen.getByText('99%')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('live/stale indicator', () => {
|
||||
it('shows Live status for fresh frames (less than 1 second old)', () => {
|
||||
const now = Date.now()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const frame = createMockFrame({ receivedAt: now - 500 }) // 500ms ago
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('Live')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('shows Stale status for old frames (more than 1 second old)', () => {
|
||||
const now = Date.now()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const frame = createMockFrame({ receivedAt: now - 1500 }) // 1.5 seconds ago
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('Stale')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('sets stale data attribute when frame is old', () => {
|
||||
const now = Date.now()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const frame = createMockFrame({ receivedAt: now - 2000 }) // 2 seconds ago
|
||||
const { container } = render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
const component = container.querySelector('[data-component="observer-vision-feed"]')
|
||||
expect(component).toHaveAttribute('data-stale', 'true')
|
||||
})
|
||||
|
||||
it('sets stale data attribute to false for fresh frames', () => {
|
||||
const now = Date.now()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const frame = createMockFrame({ receivedAt: now - 100 }) // 100ms ago
|
||||
const { container } = render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
const component = container.querySelector('[data-component="observer-vision-feed"]')
|
||||
expect(component).toHaveAttribute('data-stale', 'false')
|
||||
})
|
||||
|
||||
it('reduces image opacity for stale frames', () => {
|
||||
const now = Date.now()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const frame = createMockFrame({ receivedAt: now - 2000 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
const img = screen.getByRole('img')
|
||||
// The opacity should be reduced for stale frames
|
||||
expect(img.className).toBeDefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('vision badge', () => {
|
||||
it('displays the vision badge', () => {
|
||||
const frame = createMockFrame()
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('📷')).toBeInTheDocument()
|
||||
expect(screen.getByText('Vision')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('handles very large detected values', () => {
|
||||
const frame = createMockFrame({ detectedValue: 99999, confidence: 1.0 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('99999')).toBeInTheDocument()
|
||||
expect(screen.getByText('100%')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('rounds confidence to nearest integer', () => {
|
||||
const frame = createMockFrame({ detectedValue: 123, confidence: 0.876 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('88%')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('handles confidence edge case of exactly 1', () => {
|
||||
const frame = createMockFrame({ detectedValue: 123, confidence: 1.0 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('100%')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('handles confidence edge case of exactly 0', () => {
|
||||
const frame = createMockFrame({ detectedValue: 123, confidence: 0 })
|
||||
render(<ObserverVisionFeed frame={frame} />)
|
||||
|
||||
expect(screen.getByText('0%')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,173 @@
|
||||
/**
|
||||
* Unit tests for VisionIndicator component
|
||||
*/
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { VisionIndicator } from '../VisionIndicator'
|
||||
|
||||
// Mock the MyAbacusContext
|
||||
const mockOpenVisionSetup = vi.fn()
|
||||
const mockVisionConfig = {
|
||||
enabled: false,
|
||||
cameraDeviceId: null,
|
||||
calibration: null,
|
||||
remoteCameraSessionId: null,
|
||||
}
|
||||
|
||||
vi.mock('@/contexts/MyAbacusContext', () => ({
|
||||
useMyAbacus: () => ({
|
||||
visionConfig: mockVisionConfig,
|
||||
isVisionSetupComplete:
|
||||
mockVisionConfig.cameraDeviceId !== null && mockVisionConfig.calibration !== null,
|
||||
openVisionSetup: mockOpenVisionSetup,
|
||||
}),
|
||||
}))
|
||||
|
||||
describe('VisionIndicator', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
// Reset to default state
|
||||
mockVisionConfig.enabled = false
|
||||
mockVisionConfig.cameraDeviceId = null
|
||||
mockVisionConfig.calibration = null
|
||||
mockVisionConfig.remoteCameraSessionId = null
|
||||
})
|
||||
|
||||
describe('rendering', () => {
|
||||
it('renders the camera icon', () => {
|
||||
render(<VisionIndicator />)
|
||||
expect(screen.getByText('📷')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('renders with medium size by default', () => {
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
// Medium size button should exist with the vision-status attribute
|
||||
expect(button).toHaveAttribute('data-vision-status')
|
||||
})
|
||||
|
||||
it('renders with small size when specified', () => {
|
||||
render(<VisionIndicator size="small" />)
|
||||
expect(screen.getByRole('button')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('status indicator', () => {
|
||||
it('shows not-configured status when camera is not set', () => {
|
||||
mockVisionConfig.cameraDeviceId = null
|
||||
mockVisionConfig.calibration = null
|
||||
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
expect(button).toHaveAttribute('data-vision-status', 'not-configured')
|
||||
})
|
||||
|
||||
it('shows disabled status when configured but not enabled', () => {
|
||||
mockVisionConfig.cameraDeviceId = 'camera-123'
|
||||
mockVisionConfig.calibration = {
|
||||
roi: { x: 0, y: 0, width: 100, height: 100 },
|
||||
columnCount: 5,
|
||||
columnDividers: [],
|
||||
rotation: 0,
|
||||
}
|
||||
mockVisionConfig.enabled = false
|
||||
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
expect(button).toHaveAttribute('data-vision-status', 'disabled')
|
||||
})
|
||||
|
||||
it('shows enabled status when configured and enabled', () => {
|
||||
mockVisionConfig.cameraDeviceId = 'camera-123'
|
||||
mockVisionConfig.calibration = {
|
||||
roi: { x: 0, y: 0, width: 100, height: 100 },
|
||||
columnCount: 5,
|
||||
columnDividers: [],
|
||||
rotation: 0,
|
||||
}
|
||||
mockVisionConfig.enabled = true
|
||||
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
expect(button).toHaveAttribute('data-vision-status', 'enabled')
|
||||
})
|
||||
})
|
||||
|
||||
describe('click behavior', () => {
|
||||
it('opens setup modal on click', () => {
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
|
||||
fireEvent.click(button)
|
||||
|
||||
expect(mockOpenVisionSetup).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('opens setup modal on right-click', () => {
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
|
||||
fireEvent.contextMenu(button)
|
||||
|
||||
expect(mockOpenVisionSetup).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('stops event propagation on click', () => {
|
||||
const parentClickHandler = vi.fn()
|
||||
|
||||
render(
|
||||
<div onClick={parentClickHandler}>
|
||||
<VisionIndicator />
|
||||
</div>
|
||||
)
|
||||
const button = screen.getByRole('button')
|
||||
|
||||
fireEvent.click(button)
|
||||
|
||||
expect(parentClickHandler).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('accessibility', () => {
|
||||
it('has appropriate title based on status', () => {
|
||||
mockVisionConfig.cameraDeviceId = null
|
||||
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
|
||||
expect(button).toHaveAttribute('title', expect.stringContaining('not configured'))
|
||||
})
|
||||
|
||||
it('updates title when vision is enabled', () => {
|
||||
mockVisionConfig.cameraDeviceId = 'camera-123'
|
||||
mockVisionConfig.calibration = {
|
||||
roi: { x: 0, y: 0, width: 100, height: 100 },
|
||||
columnCount: 5,
|
||||
columnDividers: [],
|
||||
rotation: 0,
|
||||
}
|
||||
mockVisionConfig.enabled = true
|
||||
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
|
||||
expect(button).toHaveAttribute('title', expect.stringContaining('enabled'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('positioning', () => {
|
||||
it('uses bottom-right position by default', () => {
|
||||
render(<VisionIndicator />)
|
||||
const button = screen.getByRole('button')
|
||||
|
||||
expect(button.style.position).toBe('absolute')
|
||||
})
|
||||
|
||||
it('accepts top-left position', () => {
|
||||
render(<VisionIndicator position="top-left" />)
|
||||
const button = screen.getByRole('button')
|
||||
|
||||
expect(button.style.position).toBe('absolute')
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -6,9 +6,76 @@ import {
|
||||
type MutableRefObject,
|
||||
useCallback,
|
||||
useContext,
|
||||
useEffect,
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react'
|
||||
import type { CalibrationGrid } from '@/types/vision'
|
||||
|
||||
/**
|
||||
* Camera source type for vision
|
||||
*/
|
||||
export type CameraSourceType = 'local' | 'phone'
|
||||
|
||||
/**
|
||||
* Configuration for abacus vision (camera-based input)
|
||||
*/
|
||||
export interface VisionConfig {
|
||||
/** Whether vision mode is enabled */
|
||||
enabled: boolean
|
||||
/** Selected camera device ID */
|
||||
cameraDeviceId: string | null
|
||||
/** Saved calibration grid for cropping */
|
||||
calibration: CalibrationGrid | null
|
||||
/** Remote phone camera session ID (for phone-as-camera mode) */
|
||||
remoteCameraSessionId: string | null
|
||||
/** Currently active camera source - tracks which camera is in use */
|
||||
activeCameraSource: CameraSourceType | null
|
||||
}
|
||||
|
||||
const DEFAULT_VISION_CONFIG: VisionConfig = {
|
||||
enabled: false,
|
||||
cameraDeviceId: null,
|
||||
calibration: null,
|
||||
remoteCameraSessionId: null,
|
||||
activeCameraSource: null,
|
||||
}
|
||||
|
||||
const VISION_CONFIG_STORAGE_KEY = 'abacus-vision-config'
|
||||
|
||||
/**
|
||||
* Load vision config from localStorage
|
||||
*/
|
||||
function loadVisionConfig(): VisionConfig {
|
||||
if (typeof window === 'undefined') return DEFAULT_VISION_CONFIG
|
||||
try {
|
||||
const stored = localStorage.getItem(VISION_CONFIG_STORAGE_KEY)
|
||||
if (stored) {
|
||||
const parsed = JSON.parse(stored)
|
||||
return {
|
||||
...DEFAULT_VISION_CONFIG,
|
||||
...parsed,
|
||||
// Always start with vision disabled - user must re-enable
|
||||
enabled: false,
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('[MyAbacusContext] Failed to load vision config:', e)
|
||||
}
|
||||
return DEFAULT_VISION_CONFIG
|
||||
}
|
||||
|
||||
/**
|
||||
* Save vision config to localStorage
|
||||
*/
|
||||
function saveVisionConfig(config: VisionConfig): void {
|
||||
if (typeof window === 'undefined') return
|
||||
try {
|
||||
localStorage.setItem(VISION_CONFIG_STORAGE_KEY, JSON.stringify(config))
|
||||
} catch (e) {
|
||||
console.error('[MyAbacusContext] Failed to save vision config:', e)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration for a docked abacus
|
||||
@@ -54,6 +121,23 @@ export interface DockAnimationState {
|
||||
toScale: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Vision frame data for broadcasting
|
||||
*/
|
||||
export interface VisionFrameData {
|
||||
/** Base64-encoded JPEG image data */
|
||||
imageData: string
|
||||
/** Detected abacus value (null if not yet detected) */
|
||||
detectedValue: number | null
|
||||
/** Detection confidence (0-1) */
|
||||
confidence: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback type for vision frame broadcasting
|
||||
*/
|
||||
export type VisionFrameCallback = (frame: VisionFrameData) => void
|
||||
|
||||
interface MyAbacusContextValue {
|
||||
isOpen: boolean
|
||||
open: () => void
|
||||
@@ -107,6 +191,31 @@ interface MyAbacusContextValue {
|
||||
setDockedValue: (value: number) => void
|
||||
/** Current abacus value (for reading) */
|
||||
abacusValue: number
|
||||
// Vision-related state
|
||||
/** Current vision configuration */
|
||||
visionConfig: VisionConfig
|
||||
/** Whether vision setup is complete (has camera and calibration) */
|
||||
isVisionSetupComplete: boolean
|
||||
/** Set whether vision is enabled */
|
||||
setVisionEnabled: (enabled: boolean) => void
|
||||
/** Set the selected camera device ID */
|
||||
setVisionCamera: (deviceId: string | null) => void
|
||||
/** Set the calibration grid */
|
||||
setVisionCalibration: (calibration: CalibrationGrid | null) => void
|
||||
/** Set the remote camera session ID */
|
||||
setVisionRemoteSession: (sessionId: string | null) => void
|
||||
/** Set the active camera source */
|
||||
setVisionCameraSource: (source: CameraSourceType | null) => void
|
||||
/** Whether the vision setup modal is open */
|
||||
isVisionSetupOpen: boolean
|
||||
/** Open the vision setup modal */
|
||||
openVisionSetup: () => void
|
||||
/** Close the vision setup modal */
|
||||
closeVisionSetup: () => void
|
||||
/** Set a callback for receiving vision frames (for broadcasting to observers) */
|
||||
setVisionFrameCallback: (callback: VisionFrameCallback | null) => void
|
||||
/** Emit a vision frame (called by DockedVisionFeed) */
|
||||
emitVisionFrame: (frame: VisionFrameData) => void
|
||||
}
|
||||
|
||||
const MyAbacusContext = createContext<MyAbacusContextValue | undefined>(undefined)
|
||||
@@ -124,6 +233,16 @@ export function MyAbacusProvider({ children }: { children: React.ReactNode }) {
|
||||
const [pendingDockRequest, setPendingDockRequest] = useState(false)
|
||||
const [abacusValue, setAbacusValue] = useState(0)
|
||||
|
||||
// Vision state
|
||||
const [visionConfig, setVisionConfig] = useState<VisionConfig>(DEFAULT_VISION_CONFIG)
|
||||
const [isVisionSetupOpen, setIsVisionSetupOpen] = useState(false)
|
||||
|
||||
// Load vision config from localStorage on mount
|
||||
useEffect(() => {
|
||||
const loaded = loadVisionConfig()
|
||||
setVisionConfig(loaded)
|
||||
}, [])
|
||||
|
||||
const open = useCallback(() => setIsOpen(true), [])
|
||||
const close = useCallback(() => setIsOpen(false), [])
|
||||
const toggle = useCallback(() => setIsOpen((prev) => !prev), [])
|
||||
@@ -200,6 +319,73 @@ export function MyAbacusProvider({ children }: { children: React.ReactNode }) {
|
||||
setAbacusValue(value)
|
||||
}, [])
|
||||
|
||||
// Vision callbacks
|
||||
// Setup is complete if an active camera source is set and configured:
|
||||
// - Local camera: has camera device (calibration is optional - auto-crop works without it)
|
||||
// - Remote camera: has remote session ID (phone handles calibration)
|
||||
const isVisionSetupComplete =
|
||||
visionConfig.activeCameraSource !== null &&
|
||||
((visionConfig.activeCameraSource === 'local' && visionConfig.cameraDeviceId !== null) ||
|
||||
(visionConfig.activeCameraSource === 'phone' && visionConfig.remoteCameraSessionId !== null))
|
||||
|
||||
const setVisionEnabled = useCallback((enabled: boolean) => {
|
||||
setVisionConfig((prev) => {
|
||||
const updated = { ...prev, enabled }
|
||||
saveVisionConfig(updated)
|
||||
return updated
|
||||
})
|
||||
}, [])
|
||||
|
||||
const setVisionCamera = useCallback((deviceId: string | null) => {
|
||||
setVisionConfig((prev) => {
|
||||
const updated = { ...prev, cameraDeviceId: deviceId }
|
||||
saveVisionConfig(updated)
|
||||
return updated
|
||||
})
|
||||
}, [])
|
||||
|
||||
const setVisionCalibration = useCallback((calibration: CalibrationGrid | null) => {
|
||||
setVisionConfig((prev) => {
|
||||
const updated = { ...prev, calibration }
|
||||
saveVisionConfig(updated)
|
||||
return updated
|
||||
})
|
||||
}, [])
|
||||
|
||||
const setVisionRemoteSession = useCallback((sessionId: string | null) => {
|
||||
setVisionConfig((prev) => {
|
||||
const updated = { ...prev, remoteCameraSessionId: sessionId }
|
||||
saveVisionConfig(updated)
|
||||
return updated
|
||||
})
|
||||
}, [])
|
||||
|
||||
const setVisionCameraSource = useCallback((source: CameraSourceType | null) => {
|
||||
setVisionConfig((prev) => {
|
||||
const updated = { ...prev, activeCameraSource: source }
|
||||
saveVisionConfig(updated)
|
||||
return updated
|
||||
})
|
||||
}, [])
|
||||
|
||||
const openVisionSetup = useCallback(() => {
|
||||
setIsVisionSetupOpen(true)
|
||||
}, [])
|
||||
const closeVisionSetup = useCallback(() => {
|
||||
setIsVisionSetupOpen(false)
|
||||
}, [])
|
||||
|
||||
// Vision frame broadcasting
|
||||
const visionFrameCallbackRef = useRef<VisionFrameCallback | null>(null)
|
||||
|
||||
const setVisionFrameCallback = useCallback((callback: VisionFrameCallback | null) => {
|
||||
visionFrameCallbackRef.current = callback
|
||||
}, [])
|
||||
|
||||
const emitVisionFrame = useCallback((frame: VisionFrameData) => {
|
||||
visionFrameCallbackRef.current?.(frame)
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<MyAbacusContext.Provider
|
||||
value={{
|
||||
@@ -233,6 +419,19 @@ export function MyAbacusProvider({ children }: { children: React.ReactNode }) {
|
||||
clearDockRequest,
|
||||
setDockedValue,
|
||||
abacusValue,
|
||||
// Vision
|
||||
visionConfig,
|
||||
isVisionSetupComplete,
|
||||
setVisionEnabled,
|
||||
setVisionCamera,
|
||||
setVisionCalibration,
|
||||
setVisionRemoteSession,
|
||||
setVisionCameraSource,
|
||||
isVisionSetupOpen,
|
||||
openVisionSetup,
|
||||
closeVisionSetup,
|
||||
setVisionFrameCallback,
|
||||
emitVisionFrame,
|
||||
}}
|
||||
>
|
||||
{children}
|
||||
|
||||
432
apps/web/src/contexts/__tests__/MyAbacusContext.vision.test.tsx
Normal file
432
apps/web/src/contexts/__tests__/MyAbacusContext.vision.test.tsx
Normal file
@@ -0,0 +1,432 @@
|
||||
/**
|
||||
* Unit tests for MyAbacusContext vision functionality
|
||||
*/
|
||||
import { act, renderHook } from '@testing-library/react'
|
||||
import type { ReactNode } from 'react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { MyAbacusProvider, useMyAbacus, type VisionFrameData } from '../MyAbacusContext'
|
||||
|
||||
// Mock localStorage
|
||||
const localStorageMock = (() => {
|
||||
let store: Record<string, string> = {}
|
||||
return {
|
||||
getItem: vi.fn((key: string) => store[key] || null),
|
||||
setItem: vi.fn((key: string, value: string) => {
|
||||
store[key] = value
|
||||
}),
|
||||
removeItem: vi.fn((key: string) => {
|
||||
delete store[key]
|
||||
}),
|
||||
clear: vi.fn(() => {
|
||||
store = {}
|
||||
}),
|
||||
}
|
||||
})()
|
||||
|
||||
Object.defineProperty(window, 'localStorage', { value: localStorageMock })
|
||||
|
||||
describe('MyAbacusContext - vision functionality', () => {
|
||||
const wrapper = ({ children }: { children: ReactNode }) => (
|
||||
<MyAbacusProvider>{children}</MyAbacusProvider>
|
||||
)
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
localStorageMock.clear()
|
||||
})
|
||||
|
||||
describe('visionConfig state', () => {
|
||||
it('starts with vision disabled', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
expect(result.current.visionConfig.enabled).toBe(false)
|
||||
})
|
||||
|
||||
it('starts with null cameraDeviceId', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
expect(result.current.visionConfig.cameraDeviceId).toBeNull()
|
||||
})
|
||||
|
||||
it('starts with null calibration', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
expect(result.current.visionConfig.calibration).toBeNull()
|
||||
})
|
||||
|
||||
it('starts with null remoteCameraSessionId', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
expect(result.current.visionConfig.remoteCameraSessionId).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('isVisionSetupComplete', () => {
|
||||
it('returns false when camera is not set', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
expect(result.current.isVisionSetupComplete).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false when calibration is not set', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCamera('camera-123')
|
||||
})
|
||||
|
||||
expect(result.current.isVisionSetupComplete).toBe(false)
|
||||
})
|
||||
|
||||
it('returns true when both camera and calibration are set', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCamera('camera-123')
|
||||
result.current.setVisionCalibration({
|
||||
roi: { x: 0, y: 0, width: 100, height: 100 },
|
||||
columnCount: 5,
|
||||
columnDividers: [],
|
||||
rotation: 0,
|
||||
})
|
||||
})
|
||||
|
||||
expect(result.current.isVisionSetupComplete).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('setVisionEnabled', () => {
|
||||
it('enables vision mode', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionEnabled(true)
|
||||
})
|
||||
|
||||
expect(result.current.visionConfig.enabled).toBe(true)
|
||||
})
|
||||
|
||||
it('disables vision mode', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionEnabled(true)
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionEnabled(false)
|
||||
})
|
||||
|
||||
expect(result.current.visionConfig.enabled).toBe(false)
|
||||
})
|
||||
|
||||
it('persists to localStorage', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionEnabled(true)
|
||||
})
|
||||
|
||||
expect(localStorageMock.setItem).toHaveBeenCalledWith(
|
||||
'abacus-vision-config',
|
||||
expect.stringContaining('"enabled":true')
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('setVisionCamera', () => {
|
||||
it('sets camera device ID', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCamera('camera-device-123')
|
||||
})
|
||||
|
||||
expect(result.current.visionConfig.cameraDeviceId).toBe('camera-device-123')
|
||||
})
|
||||
|
||||
it('clears camera device ID when set to null', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCamera('camera-123')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCamera(null)
|
||||
})
|
||||
|
||||
expect(result.current.visionConfig.cameraDeviceId).toBeNull()
|
||||
})
|
||||
|
||||
it('persists to localStorage', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCamera('camera-abc')
|
||||
})
|
||||
|
||||
expect(localStorageMock.setItem).toHaveBeenCalledWith(
|
||||
'abacus-vision-config',
|
||||
expect.stringContaining('"cameraDeviceId":"camera-abc"')
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('setVisionCalibration', () => {
|
||||
it('sets calibration grid', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
const calibration = {
|
||||
roi: { x: 10, y: 20, width: 200, height: 100 },
|
||||
columnCount: 5,
|
||||
columnDividers: [0.2, 0.4, 0.6, 0.8],
|
||||
rotation: 0,
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCalibration(calibration)
|
||||
})
|
||||
|
||||
expect(result.current.visionConfig.calibration).toEqual(calibration)
|
||||
})
|
||||
|
||||
it('clears calibration when set to null', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCalibration({
|
||||
roi: { x: 0, y: 0, width: 100, height: 100 },
|
||||
columnCount: 5,
|
||||
columnDividers: [],
|
||||
rotation: 0,
|
||||
})
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionCalibration(null)
|
||||
})
|
||||
|
||||
expect(result.current.visionConfig.calibration).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('setVisionRemoteSession', () => {
|
||||
it('sets remote camera session ID', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionRemoteSession('remote-session-456')
|
||||
})
|
||||
|
||||
expect(result.current.visionConfig.remoteCameraSessionId).toBe('remote-session-456')
|
||||
})
|
||||
|
||||
it('clears remote session when set to null', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionRemoteSession('session-123')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionRemoteSession(null)
|
||||
})
|
||||
|
||||
expect(result.current.visionConfig.remoteCameraSessionId).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('vision setup modal', () => {
|
||||
it('starts with modal closed', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
expect(result.current.isVisionSetupOpen).toBe(false)
|
||||
})
|
||||
|
||||
it('opens the setup modal', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.openVisionSetup()
|
||||
})
|
||||
|
||||
expect(result.current.isVisionSetupOpen).toBe(true)
|
||||
})
|
||||
|
||||
it('closes the setup modal', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
act(() => {
|
||||
result.current.openVisionSetup()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.closeVisionSetup()
|
||||
})
|
||||
|
||||
expect(result.current.isVisionSetupOpen).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('vision frame callback', () => {
|
||||
it('setVisionFrameCallback sets the callback', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
const callback = vi.fn()
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionFrameCallback(callback)
|
||||
})
|
||||
|
||||
// The callback should be stored (we can verify by emitting a frame)
|
||||
const frame: VisionFrameData = {
|
||||
imageData: 'test',
|
||||
detectedValue: 123,
|
||||
confidence: 0.9,
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.emitVisionFrame(frame)
|
||||
})
|
||||
|
||||
expect(callback).toHaveBeenCalledWith(frame)
|
||||
})
|
||||
|
||||
it('emitVisionFrame calls the registered callback', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
const callback = vi.fn()
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionFrameCallback(callback)
|
||||
})
|
||||
|
||||
const frame: VisionFrameData = {
|
||||
imageData: 'base64data',
|
||||
detectedValue: 456,
|
||||
confidence: 0.85,
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.emitVisionFrame(frame)
|
||||
})
|
||||
|
||||
expect(callback).toHaveBeenCalledTimes(1)
|
||||
expect(callback).toHaveBeenCalledWith(frame)
|
||||
})
|
||||
|
||||
it('emitVisionFrame does nothing when no callback is set', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
// This should not throw
|
||||
const frame: VisionFrameData = {
|
||||
imageData: 'test',
|
||||
detectedValue: 123,
|
||||
confidence: 0.9,
|
||||
}
|
||||
|
||||
expect(() => {
|
||||
act(() => {
|
||||
result.current.emitVisionFrame(frame)
|
||||
})
|
||||
}).not.toThrow()
|
||||
})
|
||||
|
||||
it('clearing callback stops emissions', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
const callback = vi.fn()
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionFrameCallback(callback)
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionFrameCallback(null)
|
||||
})
|
||||
|
||||
const frame: VisionFrameData = {
|
||||
imageData: 'test',
|
||||
detectedValue: 123,
|
||||
confidence: 0.9,
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.emitVisionFrame(frame)
|
||||
})
|
||||
|
||||
expect(callback).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('handles null detectedValue in frame', () => {
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
const callback = vi.fn()
|
||||
|
||||
act(() => {
|
||||
result.current.setVisionFrameCallback(callback)
|
||||
})
|
||||
|
||||
const frame: VisionFrameData = {
|
||||
imageData: 'test',
|
||||
detectedValue: null,
|
||||
confidence: 0,
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.emitVisionFrame(frame)
|
||||
})
|
||||
|
||||
expect(callback).toHaveBeenCalledWith({
|
||||
imageData: 'test',
|
||||
detectedValue: null,
|
||||
confidence: 0,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('localStorage persistence', () => {
|
||||
it('loads saved config from localStorage on mount', () => {
|
||||
const savedConfig = {
|
||||
enabled: false, // Always starts disabled per the code logic
|
||||
cameraDeviceId: 'saved-camera',
|
||||
calibration: {
|
||||
roi: { x: 0, y: 0, width: 100, height: 100 },
|
||||
columnCount: 5,
|
||||
columnDividers: [],
|
||||
rotation: 0,
|
||||
},
|
||||
remoteCameraSessionId: 'saved-session',
|
||||
}
|
||||
|
||||
localStorageMock.getItem.mockReturnValueOnce(JSON.stringify(savedConfig))
|
||||
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
// Wait for effect to run
|
||||
expect(result.current.visionConfig.cameraDeviceId).toBe('saved-camera')
|
||||
// Note: enabled is always false on load per the implementation
|
||||
expect(result.current.visionConfig.enabled).toBe(false)
|
||||
})
|
||||
|
||||
it('handles corrupted localStorage gracefully', () => {
|
||||
localStorageMock.getItem.mockReturnValueOnce('invalid json {{{')
|
||||
|
||||
// Should not throw
|
||||
const { result } = renderHook(() => useMyAbacus(), { wrapper })
|
||||
|
||||
expect(result.current.visionConfig).toBeDefined()
|
||||
expect(result.current.visionConfig.enabled).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('negative cases', () => {
|
||||
it('throws when useMyAbacus is used outside provider', () => {
|
||||
// Using renderHook without the wrapper should throw
|
||||
expect(() => {
|
||||
renderHook(() => useMyAbacus())
|
||||
}).toThrow('useMyAbacus must be used within MyAbacusProvider')
|
||||
})
|
||||
})
|
||||
})
|
||||
498
apps/web/src/hooks/__tests__/useRemoteCameraDesktop.test.ts
Normal file
498
apps/web/src/hooks/__tests__/useRemoteCameraDesktop.test.ts
Normal file
@@ -0,0 +1,498 @@
|
||||
/**
|
||||
* Tests for useRemoteCameraDesktop hook
|
||||
*
|
||||
* Tests session persistence, auto-reconnection, and Socket.IO event handling.
|
||||
*/
|
||||
|
||||
import { act, renderHook, waitFor } from '@testing-library/react'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { useRemoteCameraDesktop } from '../useRemoteCameraDesktop'
|
||||
|
||||
// Mock socket.io-client - use vi.hoisted for variables referenced in vi.mock
|
||||
const { mockSocket, mockIo } = vi.hoisted(() => {
|
||||
const socket = {
|
||||
id: 'test-socket-id',
|
||||
on: vi.fn(),
|
||||
off: vi.fn(),
|
||||
emit: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
connected: true,
|
||||
}
|
||||
return {
|
||||
mockSocket: socket,
|
||||
mockIo: vi.fn(() => socket),
|
||||
}
|
||||
})
|
||||
|
||||
vi.mock('socket.io-client', () => ({
|
||||
io: mockIo,
|
||||
}))
|
||||
|
||||
// Mock localStorage
|
||||
const localStorageMock = (() => {
|
||||
let store: Record<string, string> = {}
|
||||
return {
|
||||
getItem: vi.fn((key: string) => store[key] || null),
|
||||
setItem: vi.fn((key: string, value: string) => {
|
||||
store[key] = value
|
||||
}),
|
||||
removeItem: vi.fn((key: string) => {
|
||||
delete store[key]
|
||||
}),
|
||||
clear: vi.fn(() => {
|
||||
store = {}
|
||||
}),
|
||||
}
|
||||
})()
|
||||
|
||||
Object.defineProperty(window, 'localStorage', {
|
||||
value: localStorageMock,
|
||||
})
|
||||
|
||||
describe('useRemoteCameraDesktop', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
localStorageMock.clear()
|
||||
// Reset mock socket handlers
|
||||
mockIo.mockClear()
|
||||
mockSocket.on.mockClear()
|
||||
mockSocket.off.mockClear()
|
||||
mockSocket.emit.mockClear()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('initialization', () => {
|
||||
it('should initialize with default state', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
expect(result.current.isPhoneConnected).toBe(false)
|
||||
expect(result.current.latestFrame).toBeNull()
|
||||
expect(result.current.frameRate).toBe(0)
|
||||
expect(result.current.error).toBeNull()
|
||||
expect(result.current.currentSessionId).toBeNull()
|
||||
expect(result.current.isReconnecting).toBe(false)
|
||||
})
|
||||
|
||||
it('should set up socket with reconnection config', () => {
|
||||
renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
expect(mockIo).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
path: '/api/socket',
|
||||
reconnection: true,
|
||||
reconnectionDelay: 1000,
|
||||
reconnectionDelayMax: 5000,
|
||||
reconnectionAttempts: 10,
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('localStorage persistence', () => {
|
||||
it('should persist session ID when subscribing', async () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
// Simulate socket connect
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
if (connectHandler) {
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
}
|
||||
|
||||
// Subscribe to a session
|
||||
act(() => {
|
||||
result.current.subscribe('test-session-123')
|
||||
})
|
||||
|
||||
expect(localStorageMock.setItem).toHaveBeenCalledWith(
|
||||
'remote-camera-session-id',
|
||||
'test-session-123'
|
||||
)
|
||||
})
|
||||
|
||||
it('should return persisted session ID from getPersistedSessionId', () => {
|
||||
localStorageMock.getItem.mockReturnValue('persisted-session-456')
|
||||
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const persistedId = result.current.getPersistedSessionId()
|
||||
|
||||
expect(persistedId).toBe('persisted-session-456')
|
||||
})
|
||||
|
||||
it('should clear persisted session ID on clearSession', async () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
act(() => {
|
||||
result.current.clearSession()
|
||||
})
|
||||
|
||||
expect(localStorageMock.removeItem).toHaveBeenCalledWith('remote-camera-session-id')
|
||||
})
|
||||
})
|
||||
|
||||
describe('auto-reconnect on socket reconnect', () => {
|
||||
it('should re-subscribe to persisted session on socket connect', () => {
|
||||
localStorageMock.getItem.mockReturnValue('persisted-session-789')
|
||||
|
||||
renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
// Find the connect handler
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
expect(connectHandler).toBeDefined()
|
||||
|
||||
// Simulate socket connect
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
// Should emit subscribe with persisted session
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:subscribe', {
|
||||
sessionId: 'persisted-session-789',
|
||||
})
|
||||
})
|
||||
|
||||
it('should not subscribe if no persisted session', () => {
|
||||
localStorageMock.getItem.mockReturnValue(null)
|
||||
|
||||
renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
// Should not emit subscribe
|
||||
expect(mockSocket.emit).not.toHaveBeenCalledWith('remote-camera:subscribe', expect.anything())
|
||||
})
|
||||
})
|
||||
|
||||
describe('session subscription', () => {
|
||||
it('should emit subscribe event with session ID', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
// Simulate connection
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.subscribe('new-session-id')
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:subscribe', {
|
||||
sessionId: 'new-session-id',
|
||||
})
|
||||
})
|
||||
|
||||
it('should update currentSessionId on subscribe', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
// Simulate connection
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.subscribe('my-session')
|
||||
})
|
||||
|
||||
expect(result.current.currentSessionId).toBe('my-session')
|
||||
})
|
||||
})
|
||||
|
||||
describe('event handling', () => {
|
||||
it('should handle phone connected event', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
// Find the event handler setup
|
||||
const setupHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:connected'
|
||||
)?.[1]
|
||||
|
||||
if (setupHandler) {
|
||||
act(() => {
|
||||
setupHandler({ phoneConnected: true })
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.isPhoneConnected).toBe(true)
|
||||
})
|
||||
|
||||
it('should handle phone disconnected event', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
// Set connected first
|
||||
const connectedHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:connected'
|
||||
)?.[1]
|
||||
if (connectedHandler) {
|
||||
act(() => {
|
||||
connectedHandler({ phoneConnected: true })
|
||||
})
|
||||
}
|
||||
|
||||
// Then disconnect
|
||||
const disconnectedHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:disconnected'
|
||||
)?.[1]
|
||||
if (disconnectedHandler) {
|
||||
act(() => {
|
||||
disconnectedHandler({ phoneConnected: false })
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.isPhoneConnected).toBe(false)
|
||||
})
|
||||
|
||||
it('should handle frame events', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const frameHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:frame'
|
||||
)?.[1]
|
||||
|
||||
const testFrame = {
|
||||
imageData: 'base64-image-data',
|
||||
timestamp: Date.now(),
|
||||
mode: 'cropped' as const,
|
||||
}
|
||||
|
||||
if (frameHandler) {
|
||||
act(() => {
|
||||
frameHandler(testFrame)
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.latestFrame).toEqual(testFrame)
|
||||
})
|
||||
|
||||
it('should handle error events and clear invalid sessions', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const errorHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:error'
|
||||
)?.[1]
|
||||
|
||||
if (errorHandler) {
|
||||
act(() => {
|
||||
errorHandler({ error: 'Invalid session' })
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.error).toBe('Invalid session')
|
||||
expect(localStorageMock.removeItem).toHaveBeenCalledWith('remote-camera-session-id')
|
||||
})
|
||||
|
||||
it('should handle torch state events', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const torchHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:torch-state'
|
||||
)?.[1]
|
||||
|
||||
if (torchHandler) {
|
||||
act(() => {
|
||||
torchHandler({ isTorchOn: true, isTorchAvailable: true })
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.isTorchOn).toBe(true)
|
||||
expect(result.current.isTorchAvailable).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('calibration commands', () => {
|
||||
it('should emit calibration to phone', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
// Simulate connection and subscription
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
act(() => {
|
||||
result.current.subscribe('calibration-session')
|
||||
})
|
||||
|
||||
const corners = {
|
||||
topLeft: { x: 0, y: 0 },
|
||||
topRight: { x: 100, y: 0 },
|
||||
bottomLeft: { x: 0, y: 100 },
|
||||
bottomRight: { x: 100, y: 100 },
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.sendCalibration(corners)
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:set-calibration', {
|
||||
sessionId: 'calibration-session',
|
||||
corners,
|
||||
})
|
||||
})
|
||||
|
||||
it('should emit clear calibration to phone', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
act(() => {
|
||||
result.current.subscribe('clear-cal-session')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.clearCalibration()
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:clear-calibration', {
|
||||
sessionId: 'clear-cal-session',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('frame mode control', () => {
|
||||
it('should emit frame mode change to phone', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
act(() => {
|
||||
result.current.subscribe('mode-session')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setPhoneFrameMode('raw')
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:set-mode', {
|
||||
sessionId: 'mode-session',
|
||||
mode: 'raw',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('torch control', () => {
|
||||
it('should emit torch command to phone', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
act(() => {
|
||||
result.current.subscribe('torch-session')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setRemoteTorch(true)
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:set-torch', {
|
||||
sessionId: 'torch-session',
|
||||
on: true,
|
||||
})
|
||||
})
|
||||
|
||||
it('should optimistically update torch state', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
act(() => {
|
||||
result.current.subscribe('torch-session-2')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setRemoteTorch(true)
|
||||
})
|
||||
|
||||
expect(result.current.isTorchOn).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('cleanup', () => {
|
||||
it('should emit leave on unsubscribe', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
act(() => {
|
||||
result.current.subscribe('leave-session')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.unsubscribe()
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:leave', {
|
||||
sessionId: 'leave-session',
|
||||
})
|
||||
})
|
||||
|
||||
it('should reset state on unsubscribe', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
act(() => {
|
||||
result.current.subscribe('reset-session')
|
||||
})
|
||||
|
||||
// Set some state
|
||||
const connectedHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:connected'
|
||||
)?.[1]
|
||||
if (connectedHandler) {
|
||||
act(() => {
|
||||
connectedHandler({ phoneConnected: true })
|
||||
})
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.unsubscribe()
|
||||
})
|
||||
|
||||
expect(result.current.isPhoneConnected).toBe(false)
|
||||
expect(result.current.latestFrame).toBeNull()
|
||||
expect(result.current.frameRate).toBe(0)
|
||||
})
|
||||
|
||||
it('should clear all state on clearSession', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraDesktop())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
act(() => {
|
||||
result.current.subscribe('clear-session')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.clearSession()
|
||||
})
|
||||
|
||||
expect(result.current.currentSessionId).toBeNull()
|
||||
expect(result.current.isPhoneConnected).toBe(false)
|
||||
expect(result.current.isReconnecting).toBe(false)
|
||||
expect(localStorageMock.removeItem).toHaveBeenCalledWith('remote-camera-session-id')
|
||||
})
|
||||
})
|
||||
})
|
||||
498
apps/web/src/hooks/__tests__/useRemoteCameraPhone.test.ts
Normal file
498
apps/web/src/hooks/__tests__/useRemoteCameraPhone.test.ts
Normal file
@@ -0,0 +1,498 @@
|
||||
/**
|
||||
* Tests for useRemoteCameraPhone hook
|
||||
*
|
||||
* Tests socket connection, auto-reconnection, and frame sending behavior.
|
||||
*/
|
||||
|
||||
import { act, renderHook } from '@testing-library/react'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { useRemoteCameraPhone } from '../useRemoteCameraPhone'
|
||||
|
||||
// Mock socket.io-client - use vi.hoisted for variables referenced in vi.mock
|
||||
const { mockSocket, mockIo } = vi.hoisted(() => {
|
||||
const socket = {
|
||||
id: 'test-phone-socket-id',
|
||||
on: vi.fn(),
|
||||
off: vi.fn(),
|
||||
emit: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
connected: true,
|
||||
}
|
||||
return {
|
||||
mockSocket: socket,
|
||||
mockIo: vi.fn(() => socket),
|
||||
}
|
||||
})
|
||||
|
||||
vi.mock('socket.io-client', () => ({
|
||||
io: mockIo,
|
||||
}))
|
||||
|
||||
// Mock OpenCV loading
|
||||
vi.mock('@/lib/vision/perspectiveTransform', () => ({
|
||||
loadOpenCV: vi.fn(() => Promise.resolve()),
|
||||
isOpenCVReady: vi.fn(() => true),
|
||||
rectifyQuadrilateralToBase64: vi.fn(() => 'mock-base64-image'),
|
||||
}))
|
||||
|
||||
describe('useRemoteCameraPhone', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
mockIo.mockClear()
|
||||
mockSocket.on.mockClear()
|
||||
mockSocket.off.mockClear()
|
||||
mockSocket.emit.mockClear()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('initialization', () => {
|
||||
it('should initialize with default state', async () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
expect(result.current.isConnected).toBe(false)
|
||||
expect(result.current.isSending).toBe(false)
|
||||
expect(result.current.frameMode).toBe('raw')
|
||||
expect(result.current.desktopCalibration).toBeNull()
|
||||
expect(result.current.error).toBeNull()
|
||||
})
|
||||
|
||||
it('should set up socket with reconnection config', () => {
|
||||
renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
expect(mockIo).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
path: '/api/socket',
|
||||
reconnection: true,
|
||||
reconnectionDelay: 1000,
|
||||
reconnectionDelayMax: 5000,
|
||||
reconnectionAttempts: 10,
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('session connection', () => {
|
||||
it('should emit join event when connecting', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
// Simulate socket connect
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
if (connectHandler) {
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.connect('phone-session-123')
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:join', {
|
||||
sessionId: 'phone-session-123',
|
||||
})
|
||||
})
|
||||
|
||||
it('should update isConnected on connect', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
if (connectHandler) {
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.connect('connect-session')
|
||||
})
|
||||
|
||||
expect(result.current.isConnected).toBe(true)
|
||||
})
|
||||
|
||||
it('should set error if socket not connected', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
// Don't simulate connect - socket is not connected
|
||||
|
||||
act(() => {
|
||||
result.current.connect('fail-session')
|
||||
})
|
||||
|
||||
expect(result.current.error).toBe('Socket not connected')
|
||||
})
|
||||
})
|
||||
|
||||
describe('auto-reconnect on socket reconnect', () => {
|
||||
it('should re-join session on socket reconnect', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
// Initial connect
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
// Connect to session
|
||||
act(() => {
|
||||
result.current.connect('reconnect-session')
|
||||
})
|
||||
|
||||
// Clear emit calls
|
||||
mockSocket.emit.mockClear()
|
||||
|
||||
// Simulate socket reconnect (connect event fires again)
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
// Should auto-rejoin
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:join', {
|
||||
sessionId: 'reconnect-session',
|
||||
})
|
||||
})
|
||||
|
||||
it('should not rejoin if no session was set', () => {
|
||||
renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
mockSocket.emit.mockClear()
|
||||
|
||||
// Simulate reconnect without ever connecting to a session
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).not.toHaveBeenCalledWith('remote-camera:join', expect.anything())
|
||||
})
|
||||
})
|
||||
|
||||
describe('socket disconnect handling', () => {
|
||||
it('should not clear session on temporary disconnect', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.connect('persist-session')
|
||||
})
|
||||
|
||||
// Simulate temporary disconnect
|
||||
const disconnectHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'disconnect'
|
||||
)?.[1]
|
||||
act(() => {
|
||||
disconnectHandler('transport close')
|
||||
})
|
||||
|
||||
// Session ref should still be set (will reconnect)
|
||||
// isConnected might be false but session should persist internally
|
||||
})
|
||||
|
||||
it('should clear state on server disconnect', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.connect('server-disconnect-session')
|
||||
})
|
||||
|
||||
const disconnectHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'disconnect'
|
||||
)?.[1]
|
||||
act(() => {
|
||||
disconnectHandler('io server disconnect')
|
||||
})
|
||||
|
||||
expect(result.current.isConnected).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('desktop commands', () => {
|
||||
it('should handle set-mode command from desktop', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const setModeHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:set-mode'
|
||||
)?.[1]
|
||||
|
||||
if (setModeHandler) {
|
||||
act(() => {
|
||||
setModeHandler({ mode: 'cropped' })
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.frameMode).toBe('cropped')
|
||||
})
|
||||
|
||||
it('should handle set-calibration command from desktop', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const calibrationHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:set-calibration'
|
||||
)?.[1]
|
||||
|
||||
const corners = {
|
||||
topLeft: { x: 10, y: 10 },
|
||||
topRight: { x: 100, y: 10 },
|
||||
bottomLeft: { x: 10, y: 100 },
|
||||
bottomRight: { x: 100, y: 100 },
|
||||
}
|
||||
|
||||
if (calibrationHandler) {
|
||||
act(() => {
|
||||
calibrationHandler({ corners })
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.desktopCalibration).toEqual(corners)
|
||||
// Should auto-switch to cropped mode
|
||||
expect(result.current.frameMode).toBe('cropped')
|
||||
})
|
||||
|
||||
it('should handle clear-calibration command from desktop', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
// First set calibration
|
||||
const calibrationHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:set-calibration'
|
||||
)?.[1]
|
||||
if (calibrationHandler) {
|
||||
act(() => {
|
||||
calibrationHandler({
|
||||
corners: {
|
||||
topLeft: { x: 0, y: 0 },
|
||||
topRight: { x: 100, y: 0 },
|
||||
bottomLeft: { x: 0, y: 100 },
|
||||
bottomRight: { x: 100, y: 100 },
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.desktopCalibration).not.toBeNull()
|
||||
|
||||
// Then clear it
|
||||
const clearHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:clear-calibration'
|
||||
)?.[1]
|
||||
if (clearHandler) {
|
||||
act(() => {
|
||||
clearHandler()
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.desktopCalibration).toBeNull()
|
||||
})
|
||||
|
||||
it('should handle set-torch command from desktop', () => {
|
||||
const torchCallback = vi.fn()
|
||||
renderHook(() => useRemoteCameraPhone({ onTorchRequest: torchCallback }))
|
||||
|
||||
const torchHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:set-torch'
|
||||
)?.[1]
|
||||
|
||||
if (torchHandler) {
|
||||
act(() => {
|
||||
torchHandler({ on: true })
|
||||
})
|
||||
}
|
||||
|
||||
expect(torchCallback).toHaveBeenCalledWith(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('frame mode', () => {
|
||||
it('should allow setting frame mode locally', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
act(() => {
|
||||
result.current.setFrameMode('cropped')
|
||||
})
|
||||
|
||||
expect(result.current.frameMode).toBe('cropped')
|
||||
})
|
||||
})
|
||||
|
||||
describe('torch state emission', () => {
|
||||
it('should emit torch state to desktop', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.connect('torch-emit-session')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.emitTorchState(true, true)
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:torch-state', {
|
||||
sessionId: 'torch-emit-session',
|
||||
isTorchOn: true,
|
||||
isTorchAvailable: true,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('disconnect', () => {
|
||||
it('should emit leave event on disconnect', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.connect('disconnect-session')
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.disconnect()
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith('remote-camera:leave', {
|
||||
sessionId: 'disconnect-session',
|
||||
})
|
||||
})
|
||||
|
||||
it('should reset state on disconnect', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.connect('reset-disconnect-session')
|
||||
})
|
||||
|
||||
expect(result.current.isConnected).toBe(true)
|
||||
|
||||
act(() => {
|
||||
result.current.disconnect()
|
||||
})
|
||||
|
||||
expect(result.current.isConnected).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should handle error events', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const errorHandler = mockSocket.on.mock.calls.find(
|
||||
(call) => call[0] === 'remote-camera:error'
|
||||
)?.[1]
|
||||
|
||||
if (errorHandler) {
|
||||
act(() => {
|
||||
errorHandler({ error: 'Session expired' })
|
||||
})
|
||||
}
|
||||
|
||||
expect(result.current.error).toBe('Session expired')
|
||||
expect(result.current.isConnected).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('calibration update', () => {
|
||||
it('should update calibration for frame processing', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const newCalibration = {
|
||||
topLeft: { x: 20, y: 20 },
|
||||
topRight: { x: 200, y: 20 },
|
||||
bottomLeft: { x: 20, y: 200 },
|
||||
bottomRight: { x: 200, y: 200 },
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.updateCalibration(newCalibration)
|
||||
})
|
||||
|
||||
// The calibration is stored in a ref for frame processing
|
||||
// We can verify by checking that no error is thrown
|
||||
})
|
||||
})
|
||||
|
||||
describe('sending frames', () => {
|
||||
it('should set isSending when startSending is called', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.connect('sending-session')
|
||||
})
|
||||
|
||||
// Create mock video element
|
||||
const mockVideo = document.createElement('video')
|
||||
|
||||
act(() => {
|
||||
result.current.startSending(mockVideo)
|
||||
})
|
||||
|
||||
expect(result.current.isSending).toBe(true)
|
||||
})
|
||||
|
||||
it('should set error if not connected when starting to send', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const mockVideo = document.createElement('video')
|
||||
|
||||
act(() => {
|
||||
result.current.startSending(mockVideo)
|
||||
})
|
||||
|
||||
expect(result.current.error).toBe('Not connected to session')
|
||||
})
|
||||
|
||||
it('should reset isSending on stopSending', () => {
|
||||
const { result } = renderHook(() => useRemoteCameraPhone())
|
||||
|
||||
const connectHandler = mockSocket.on.mock.calls.find((call) => call[0] === 'connect')?.[1]
|
||||
act(() => {
|
||||
connectHandler()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.connect('stop-sending-session')
|
||||
})
|
||||
|
||||
const mockVideo = document.createElement('video')
|
||||
act(() => {
|
||||
result.current.startSending(mockVideo)
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.stopSending()
|
||||
})
|
||||
|
||||
expect(result.current.isSending).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
218
apps/web/src/hooks/__tests__/useSessionBroadcast.vision.test.ts
Normal file
218
apps/web/src/hooks/__tests__/useSessionBroadcast.vision.test.ts
Normal file
@@ -0,0 +1,218 @@
|
||||
/**
|
||||
* Unit tests for useSessionBroadcast vision frame broadcasting
|
||||
*/
|
||||
import { act, renderHook } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import type { BroadcastState } from '@/components/practice'
|
||||
import { useSessionBroadcast } from '../useSessionBroadcast'
|
||||
|
||||
// Mock socket.io-client
|
||||
const mockSocket = {
|
||||
on: vi.fn(),
|
||||
off: vi.fn(),
|
||||
emit: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
connected: true,
|
||||
}
|
||||
|
||||
vi.mock('socket.io-client', () => ({
|
||||
io: vi.fn(() => mockSocket),
|
||||
}))
|
||||
|
||||
describe('useSessionBroadcast - vision frame broadcasting', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
mockSocket.on.mockReset()
|
||||
mockSocket.emit.mockReset()
|
||||
})
|
||||
|
||||
const createMockBroadcastState = (): BroadcastState => ({
|
||||
currentProblem: { terms: [5, 3], answer: 8 },
|
||||
phase: 'problem',
|
||||
studentAnswer: '',
|
||||
isCorrect: null,
|
||||
startedAt: Date.now(),
|
||||
purpose: 'focus',
|
||||
complexity: undefined,
|
||||
currentProblemNumber: 1,
|
||||
totalProblems: 10,
|
||||
sessionParts: [],
|
||||
currentPartIndex: 0,
|
||||
currentSlotIndex: 0,
|
||||
slotResults: [],
|
||||
})
|
||||
|
||||
describe('sendVisionFrame', () => {
|
||||
it('returns sendVisionFrame function', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionBroadcast('session-123', 'player-456', createMockBroadcastState())
|
||||
)
|
||||
|
||||
expect(result.current.sendVisionFrame).toBeDefined()
|
||||
expect(typeof result.current.sendVisionFrame).toBe('function')
|
||||
})
|
||||
|
||||
it('emits vision-frame event with correct payload when connected', async () => {
|
||||
// Simulate connection
|
||||
let connectHandler: (() => void) | undefined
|
||||
mockSocket.on.mockImplementation((event: string, handler: unknown) => {
|
||||
if (event === 'connect') {
|
||||
connectHandler = handler as () => void
|
||||
}
|
||||
return mockSocket
|
||||
})
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useSessionBroadcast('session-123', 'player-456', createMockBroadcastState())
|
||||
)
|
||||
|
||||
// Trigger connect
|
||||
act(() => {
|
||||
connectHandler?.()
|
||||
})
|
||||
|
||||
// Send vision frame
|
||||
const imageData = 'base64ImageData=='
|
||||
const detectedValue = 456
|
||||
const confidence = 0.92
|
||||
|
||||
act(() => {
|
||||
result.current.sendVisionFrame(imageData, detectedValue, confidence)
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith(
|
||||
'vision-frame',
|
||||
expect.objectContaining({
|
||||
sessionId: 'session-123',
|
||||
imageData: 'base64ImageData==',
|
||||
detectedValue: 456,
|
||||
confidence: 0.92,
|
||||
timestamp: expect.any(Number),
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
it('includes timestamp in vision-frame event', async () => {
|
||||
const now = Date.now()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
let connectHandler: (() => void) | undefined
|
||||
mockSocket.on.mockImplementation((event: string, handler: unknown) => {
|
||||
if (event === 'connect') {
|
||||
connectHandler = handler as () => void
|
||||
}
|
||||
return mockSocket
|
||||
})
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useSessionBroadcast('session-123', 'player-456', createMockBroadcastState())
|
||||
)
|
||||
|
||||
act(() => {
|
||||
connectHandler?.()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.sendVisionFrame('imageData', 123, 0.95)
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith(
|
||||
'vision-frame',
|
||||
expect.objectContaining({
|
||||
timestamp: now,
|
||||
})
|
||||
)
|
||||
|
||||
vi.useRealTimers()
|
||||
})
|
||||
|
||||
it('handles null detectedValue', async () => {
|
||||
let connectHandler: (() => void) | undefined
|
||||
mockSocket.on.mockImplementation((event: string, handler: unknown) => {
|
||||
if (event === 'connect') {
|
||||
connectHandler = handler as () => void
|
||||
}
|
||||
return mockSocket
|
||||
})
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useSessionBroadcast('session-123', 'player-456', createMockBroadcastState())
|
||||
)
|
||||
|
||||
act(() => {
|
||||
connectHandler?.()
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.sendVisionFrame('imageData', null, 0)
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).toHaveBeenCalledWith(
|
||||
'vision-frame',
|
||||
expect.objectContaining({
|
||||
detectedValue: null,
|
||||
confidence: 0,
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('negative cases', () => {
|
||||
it('does not emit when sessionId is undefined', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionBroadcast(undefined, 'player-456', createMockBroadcastState())
|
||||
)
|
||||
|
||||
act(() => {
|
||||
result.current.sendVisionFrame('imageData', 123, 0.95)
|
||||
})
|
||||
|
||||
expect(mockSocket.emit).not.toHaveBeenCalledWith('vision-frame', expect.anything())
|
||||
})
|
||||
|
||||
it('does not emit when not connected', () => {
|
||||
// Don't trigger connect handler
|
||||
const { result } = renderHook(() =>
|
||||
useSessionBroadcast('session-123', 'player-456', createMockBroadcastState())
|
||||
)
|
||||
|
||||
act(() => {
|
||||
result.current.sendVisionFrame('imageData', 123, 0.95)
|
||||
})
|
||||
|
||||
// The join-session emit happens on connect, but vision-frame should not
|
||||
const visionFrameCalls = mockSocket.emit.mock.calls.filter(
|
||||
([event]) => event === 'vision-frame'
|
||||
)
|
||||
expect(visionFrameCalls).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('does not emit when state is null', () => {
|
||||
const { result } = renderHook(() => useSessionBroadcast('session-123', 'player-456', null))
|
||||
|
||||
act(() => {
|
||||
result.current.sendVisionFrame('imageData', 123, 0.95)
|
||||
})
|
||||
|
||||
// Should still not emit vision-frame (no connection due to null state cleanup logic)
|
||||
const visionFrameCalls = mockSocket.emit.mock.calls.filter(
|
||||
([event]) => event === 'vision-frame'
|
||||
)
|
||||
expect(visionFrameCalls).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe('result interface', () => {
|
||||
it('includes sendVisionFrame in the result', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionBroadcast('session-123', 'player-456', createMockBroadcastState())
|
||||
)
|
||||
|
||||
expect(result.current).toHaveProperty('sendVisionFrame')
|
||||
expect(result.current).toHaveProperty('isConnected')
|
||||
expect(result.current).toHaveProperty('isBroadcasting')
|
||||
expect(result.current).toHaveProperty('sendPartTransition')
|
||||
expect(result.current).toHaveProperty('sendPartTransitionComplete')
|
||||
})
|
||||
})
|
||||
})
|
||||
255
apps/web/src/hooks/__tests__/useSessionObserver.vision.test.ts
Normal file
255
apps/web/src/hooks/__tests__/useSessionObserver.vision.test.ts
Normal file
@@ -0,0 +1,255 @@
|
||||
/**
|
||||
* Unit tests for useSessionObserver vision frame receiving
|
||||
*/
|
||||
import { act, renderHook, waitFor } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import type { VisionFrameEvent } from '@/lib/classroom/socket-events'
|
||||
import { useSessionObserver } from '../useSessionObserver'
|
||||
|
||||
// Mock socket.io-client
|
||||
const mockSocket = {
|
||||
on: vi.fn(),
|
||||
off: vi.fn(),
|
||||
emit: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
connected: true,
|
||||
}
|
||||
|
||||
vi.mock('socket.io-client', () => ({
|
||||
io: vi.fn(() => mockSocket),
|
||||
}))
|
||||
|
||||
describe('useSessionObserver - vision frame receiving', () => {
|
||||
let eventHandlers: Map<string, (data: unknown) => void>
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
eventHandlers = new Map()
|
||||
|
||||
// Capture event handlers
|
||||
mockSocket.on.mockImplementation((event: string, handler: unknown) => {
|
||||
eventHandlers.set(event, handler as (data: unknown) => void)
|
||||
return mockSocket
|
||||
})
|
||||
})
|
||||
|
||||
describe('visionFrame state', () => {
|
||||
it('initially returns null visionFrame', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver('session-123', 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
expect(result.current.visionFrame).toBeNull()
|
||||
})
|
||||
|
||||
it('updates visionFrame when vision-frame event is received', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver('session-123', 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
// Simulate receiving a vision frame event
|
||||
const visionFrameData: VisionFrameEvent = {
|
||||
sessionId: 'session-123',
|
||||
imageData: 'base64ImageData==',
|
||||
detectedValue: 456,
|
||||
confidence: 0.92,
|
||||
timestamp: Date.now(),
|
||||
}
|
||||
|
||||
act(() => {
|
||||
const handler = eventHandlers.get('vision-frame')
|
||||
handler?.(visionFrameData)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.visionFrame).not.toBeNull()
|
||||
expect(result.current.visionFrame?.imageData).toBe('base64ImageData==')
|
||||
expect(result.current.visionFrame?.detectedValue).toBe(456)
|
||||
expect(result.current.visionFrame?.confidence).toBe(0.92)
|
||||
expect(result.current.visionFrame?.receivedAt).toBeDefined()
|
||||
})
|
||||
})
|
||||
|
||||
it('sets receivedAt to current time when frame is received', async () => {
|
||||
const now = Date.now()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver('session-123', 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
const visionFrameData: VisionFrameEvent = {
|
||||
sessionId: 'session-123',
|
||||
imageData: 'imageData',
|
||||
detectedValue: 123,
|
||||
confidence: 0.9,
|
||||
timestamp: now - 100, // Sent 100ms ago
|
||||
}
|
||||
|
||||
act(() => {
|
||||
const handler = eventHandlers.get('vision-frame')
|
||||
handler?.(visionFrameData)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.visionFrame?.receivedAt).toBe(now)
|
||||
})
|
||||
|
||||
vi.useRealTimers()
|
||||
})
|
||||
|
||||
it('updates visionFrame with new frames', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver('session-123', 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
// First frame
|
||||
act(() => {
|
||||
const handler = eventHandlers.get('vision-frame')
|
||||
handler?.({
|
||||
sessionId: 'session-123',
|
||||
imageData: 'firstFrame',
|
||||
detectedValue: 100,
|
||||
confidence: 0.8,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.visionFrame?.detectedValue).toBe(100)
|
||||
})
|
||||
|
||||
// Second frame
|
||||
act(() => {
|
||||
const handler = eventHandlers.get('vision-frame')
|
||||
handler?.({
|
||||
sessionId: 'session-123',
|
||||
imageData: 'secondFrame',
|
||||
detectedValue: 200,
|
||||
confidence: 0.95,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.visionFrame?.detectedValue).toBe(200)
|
||||
expect(result.current.visionFrame?.imageData).toBe('secondFrame')
|
||||
})
|
||||
})
|
||||
|
||||
it('handles null detectedValue in frames', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver('session-123', 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
const visionFrameData: VisionFrameEvent = {
|
||||
sessionId: 'session-123',
|
||||
imageData: 'imageData',
|
||||
detectedValue: null,
|
||||
confidence: 0,
|
||||
timestamp: Date.now(),
|
||||
}
|
||||
|
||||
act(() => {
|
||||
const handler = eventHandlers.get('vision-frame')
|
||||
handler?.(visionFrameData)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.visionFrame?.detectedValue).toBeNull()
|
||||
expect(result.current.visionFrame?.confidence).toBe(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('cleanup', () => {
|
||||
it('clears visionFrame on stopObserving', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver('session-123', 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
// Receive a frame
|
||||
act(() => {
|
||||
const handler = eventHandlers.get('vision-frame')
|
||||
handler?.({
|
||||
sessionId: 'session-123',
|
||||
imageData: 'imageData',
|
||||
detectedValue: 123,
|
||||
confidence: 0.9,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.visionFrame).not.toBeNull()
|
||||
})
|
||||
|
||||
// Stop observing
|
||||
act(() => {
|
||||
result.current.stopObserving()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.visionFrame).toBeNull()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('result interface', () => {
|
||||
it('includes visionFrame in the result', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver('session-123', 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
expect(result.current).toHaveProperty('visionFrame')
|
||||
expect(result.current).toHaveProperty('state')
|
||||
expect(result.current).toHaveProperty('results')
|
||||
expect(result.current).toHaveProperty('transitionState')
|
||||
expect(result.current).toHaveProperty('isConnected')
|
||||
expect(result.current).toHaveProperty('isObserving')
|
||||
expect(result.current).toHaveProperty('error')
|
||||
})
|
||||
})
|
||||
|
||||
describe('negative cases', () => {
|
||||
it('does not update visionFrame when observer is disabled', () => {
|
||||
const { result } = renderHook(
|
||||
() => useSessionObserver('session-123', 'observer-456', 'player-789', false) // disabled
|
||||
)
|
||||
|
||||
// The socket won't be created when disabled
|
||||
expect(eventHandlers.size).toBe(0)
|
||||
expect(result.current.visionFrame).toBeNull()
|
||||
})
|
||||
|
||||
it('does not update visionFrame when sessionId is undefined', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver(undefined, 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
expect(result.current.visionFrame).toBeNull()
|
||||
expect(result.current.isObserving).toBe(false)
|
||||
})
|
||||
|
||||
it('handles empty imageData gracefully', async () => {
|
||||
const { result } = renderHook(() =>
|
||||
useSessionObserver('session-123', 'observer-456', 'player-789', true)
|
||||
)
|
||||
|
||||
act(() => {
|
||||
const handler = eventHandlers.get('vision-frame')
|
||||
handler?.({
|
||||
sessionId: 'session-123',
|
||||
imageData: '',
|
||||
detectedValue: 123,
|
||||
confidence: 0.9,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.visionFrame?.imageData).toBe('')
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -8,6 +8,11 @@ import {
|
||||
isArucoAvailable,
|
||||
loadAruco,
|
||||
} from '@/lib/vision/arucoDetection'
|
||||
import {
|
||||
analyzeColumns,
|
||||
analysesToDigits,
|
||||
digitsToNumber as cvDigitsToNumber,
|
||||
} from '@/lib/vision/beadDetector'
|
||||
import { digitsToNumber, getMinConfidence, processVideoFrame } from '@/lib/vision/frameProcessor'
|
||||
import type {
|
||||
CalibrationGrid,
|
||||
@@ -83,6 +88,10 @@ export function useAbacusVision(options: UseAbacusVisionOptions = {}): UseAbacus
|
||||
// Track previous stable value to avoid duplicate callbacks
|
||||
const lastStableValueRef = useRef<number | null>(null)
|
||||
|
||||
// Throttle detection (CV is fast, 10fps is plenty)
|
||||
const lastInferenceTimeRef = useRef<number>(0)
|
||||
const INFERENCE_INTERVAL_MS = 100 // 10fps
|
||||
|
||||
// Ref for calibration functions to avoid infinite loop in auto-calibration effect
|
||||
const calibrationRef = useRef(calibration)
|
||||
calibrationRef.current = calibration
|
||||
@@ -271,9 +280,16 @@ export function useAbacusVision(options: UseAbacusVisionOptions = {}): UseAbacus
|
||||
}, [calibration])
|
||||
|
||||
/**
|
||||
* Process a video frame for detection using TensorFlow.js classifier
|
||||
* Process a video frame for detection using CV-based bead detection
|
||||
*/
|
||||
const processFrame = useCallback(async () => {
|
||||
// Throttle inference for performance (10fps)
|
||||
const now = performance.now()
|
||||
if (now - lastInferenceTimeRef.current < INFERENCE_INTERVAL_MS) {
|
||||
return
|
||||
}
|
||||
lastInferenceTimeRef.current = now
|
||||
|
||||
// Get video element from camera stream
|
||||
const videoElements = document.querySelectorAll('video')
|
||||
let video: HTMLVideoElement | null = null
|
||||
@@ -292,24 +308,33 @@ export function useAbacusVision(options: UseAbacusVisionOptions = {}): UseAbacus
|
||||
|
||||
// Process video frame into column strips
|
||||
const columnImages = processVideoFrame(video, calibration.calibration)
|
||||
|
||||
if (columnImages.length === 0) return
|
||||
|
||||
// Run classification
|
||||
const result = await classifier.classifyColumns(columnImages)
|
||||
// Use CV-based bead detection instead of ML
|
||||
const analyses = analyzeColumns(columnImages)
|
||||
const { digits, confidences, minConfidence } = analysesToDigits(analyses)
|
||||
|
||||
if (!result) return
|
||||
// Log analysis for debugging
|
||||
console.log(
|
||||
'[CV] Bead analysis:',
|
||||
analyses.map((a) => ({
|
||||
digit: a.digit,
|
||||
conf: a.confidence.toFixed(2),
|
||||
heaven: a.heavenActive ? '5' : '0',
|
||||
earth: a.earthActiveCount,
|
||||
bar: a.reckoningBarPosition.toFixed(2),
|
||||
}))
|
||||
)
|
||||
|
||||
// Update column confidences
|
||||
setColumnConfidences(result.confidences)
|
||||
setColumnConfidences(confidences)
|
||||
|
||||
// Convert digits to number
|
||||
const detectedValue = digitsToNumber(result.digits)
|
||||
const minConfidence = getMinConfidence(result.confidences)
|
||||
const detectedValue = cvDigitsToNumber(digits)
|
||||
|
||||
// Push to stability buffer
|
||||
stability.pushFrame(detectedValue, minConfidence)
|
||||
}, [camera.videoStream, calibration.isCalibrated, calibration.calibration, stability, classifier])
|
||||
}, [camera.videoStream, calibration.isCalibrated, calibration.calibration, stability])
|
||||
|
||||
/**
|
||||
* Detection loop
|
||||
|
||||
@@ -86,7 +86,6 @@ export function useColumnClassifier(): UseColumnClassifierReturn {
|
||||
setIsModelLoaded(true)
|
||||
return true
|
||||
} else {
|
||||
// Model doesn't exist - not an error, just unavailable
|
||||
setIsModelUnavailable(true)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -162,6 +162,9 @@ export function useDeskViewCamera(): UseDeskViewCameraReturn {
|
||||
video: {
|
||||
width: { ideal: 1920 },
|
||||
height: { ideal: 1440 },
|
||||
// Prefer widest angle lens (zoom: 1 = no zoom = widest)
|
||||
// @ts-expect-error - zoom is valid but not in TS types
|
||||
zoom: { ideal: 1 },
|
||||
// Try to disable face-tracking auto-focus (not all cameras support this)
|
||||
// @ts-expect-error - focusMode is valid but not in TS types
|
||||
focusMode: 'continuous',
|
||||
|
||||
@@ -117,11 +117,14 @@ export function usePhoneCamera(options: UsePhoneCameraOptions = {}): UsePhoneCam
|
||||
}
|
||||
|
||||
// Request camera with specified facing mode
|
||||
// Prefer widest angle lens (zoom: 1 = no zoom = widest)
|
||||
const constraints: MediaStreamConstraints = {
|
||||
video: {
|
||||
facingMode: { ideal: targetFacingMode },
|
||||
width: { ideal: 1280 },
|
||||
height: { ideal: 720 },
|
||||
// @ts-expect-error - zoom is valid but not in TS types
|
||||
zoom: { ideal: 1 },
|
||||
},
|
||||
audio: false,
|
||||
}
|
||||
|
||||
@@ -7,6 +7,9 @@ import type { QuadCorners } from '@/types/vision'
|
||||
/** Frame mode: raw sends uncropped frames, cropped applies calibration */
|
||||
export type FrameMode = 'raw' | 'cropped'
|
||||
|
||||
/** LocalStorage key for persisting session ID */
|
||||
const STORAGE_KEY = 'remote-camera-session-id'
|
||||
|
||||
interface RemoteCameraFrame {
|
||||
imageData: string // Base64 JPEG
|
||||
timestamp: number
|
||||
@@ -31,6 +34,10 @@ interface UseRemoteCameraDesktopReturn {
|
||||
isTorchAvailable: boolean
|
||||
/** Error message if connection failed */
|
||||
error: string | null
|
||||
/** Current session ID (null if not subscribed) */
|
||||
currentSessionId: string | null
|
||||
/** Whether actively trying to reconnect */
|
||||
isReconnecting: boolean
|
||||
/** Subscribe to receive frames for a session */
|
||||
subscribe: (sessionId: string) => void
|
||||
/** Unsubscribe from the session */
|
||||
@@ -43,6 +50,10 @@ interface UseRemoteCameraDesktopReturn {
|
||||
clearCalibration: () => void
|
||||
/** Set phone's torch state */
|
||||
setRemoteTorch: (on: boolean) => void
|
||||
/** Get the persisted session ID (if any) */
|
||||
getPersistedSessionId: () => string | null
|
||||
/** Clear persisted session and disconnect */
|
||||
clearSession: () => void
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -66,24 +77,69 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
const [isTorchOn, setIsTorchOn] = useState(false)
|
||||
const [isTorchAvailable, setIsTorchAvailable] = useState(false)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
const currentSessionId = useRef<string | null>(null)
|
||||
const [currentSessionId, setCurrentSessionId] = useState<string | null>(null)
|
||||
const [isReconnecting, setIsReconnecting] = useState(false)
|
||||
|
||||
// Refs for values needed in callbacks
|
||||
const currentSessionIdRef = useRef<string | null>(null)
|
||||
const reconnectAttemptRef = useRef(0)
|
||||
const reconnectTimeoutRef = useRef<NodeJS.Timeout | null>(null)
|
||||
|
||||
// Frame rate calculation
|
||||
const frameTimestamps = useRef<number[]>([])
|
||||
|
||||
// Initialize socket connection
|
||||
// Helper to persist session ID
|
||||
const persistSessionId = useCallback((sessionId: string | null) => {
|
||||
if (sessionId) {
|
||||
localStorage.setItem(STORAGE_KEY, sessionId)
|
||||
} else {
|
||||
localStorage.removeItem(STORAGE_KEY)
|
||||
}
|
||||
}, [])
|
||||
|
||||
// Helper to get persisted session ID
|
||||
const getPersistedSessionId = useCallback((): string | null => {
|
||||
if (typeof window === 'undefined') return null
|
||||
return localStorage.getItem(STORAGE_KEY)
|
||||
}, [])
|
||||
|
||||
// Initialize socket connection with reconnection support
|
||||
useEffect(() => {
|
||||
console.log('[RemoteCameraDesktop] Initializing socket connection...')
|
||||
const socketInstance = io({
|
||||
path: '/api/socket',
|
||||
autoConnect: true,
|
||||
reconnection: true,
|
||||
reconnectionDelay: 1000,
|
||||
reconnectionDelayMax: 5000,
|
||||
reconnectionAttempts: 10,
|
||||
})
|
||||
|
||||
socketInstance.on('connect', () => {
|
||||
console.log('[RemoteCameraDesktop] Socket connected! ID:', socketInstance.id)
|
||||
setIsConnected(true)
|
||||
|
||||
// If we have a session ID (either from state or localStorage), re-subscribe
|
||||
const sessionId = currentSessionIdRef.current || getPersistedSessionId()
|
||||
if (sessionId) {
|
||||
console.log('[RemoteCameraDesktop] Re-subscribing to session after reconnect:', sessionId)
|
||||
setIsReconnecting(true)
|
||||
socketInstance.emit('remote-camera:subscribe', { sessionId })
|
||||
}
|
||||
})
|
||||
|
||||
socketInstance.on('disconnect', () => {
|
||||
socketInstance.on('connect_error', (error) => {
|
||||
console.error('[RemoteCameraDesktop] Socket connect error:', error)
|
||||
})
|
||||
|
||||
socketInstance.on('disconnect', (reason) => {
|
||||
console.log('[RemoteCameraDesktop] Socket disconnected:', reason)
|
||||
setIsConnected(false)
|
||||
// Don't clear phone connected state immediately - might reconnect
|
||||
if (reason === 'io server disconnect') {
|
||||
// Server forced disconnect - clear state
|
||||
setIsPhoneConnected(false)
|
||||
}
|
||||
})
|
||||
|
||||
setSocket(socketInstance)
|
||||
@@ -91,7 +147,7 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
return () => {
|
||||
socketInstance.disconnect()
|
||||
}
|
||||
}, [])
|
||||
}, [getPersistedSessionId])
|
||||
|
||||
const calculateFrameRate = useCallback(() => {
|
||||
const now = Date.now()
|
||||
@@ -105,18 +161,25 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
if (!socket) return
|
||||
|
||||
const handleConnected = ({ phoneConnected }: { phoneConnected: boolean }) => {
|
||||
console.log('[RemoteCameraDesktop] Phone connected event:', phoneConnected)
|
||||
setIsPhoneConnected(phoneConnected)
|
||||
setIsReconnecting(false)
|
||||
setError(null)
|
||||
reconnectAttemptRef.current = 0
|
||||
}
|
||||
|
||||
const handleDisconnected = ({ phoneConnected }: { phoneConnected: boolean }) => {
|
||||
console.log('[RemoteCameraDesktop] Phone disconnected event:', phoneConnected)
|
||||
setIsPhoneConnected(phoneConnected)
|
||||
setLatestFrame(null)
|
||||
setFrameRate(0)
|
||||
// Don't clear frame/framerate - keep last state for visual continuity
|
||||
// Phone might reconnect quickly
|
||||
}
|
||||
|
||||
const handleStatus = ({ phoneConnected }: { phoneConnected: boolean }) => {
|
||||
console.log('[RemoteCameraDesktop] Status event:', phoneConnected)
|
||||
setIsPhoneConnected(phoneConnected)
|
||||
setIsReconnecting(false)
|
||||
reconnectAttemptRef.current = 0
|
||||
}
|
||||
|
||||
const handleFrame = (frame: RemoteCameraFrame) => {
|
||||
@@ -135,7 +198,16 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
}
|
||||
|
||||
const handleError = ({ error: errorMsg }: { error: string }) => {
|
||||
console.log('[RemoteCameraDesktop] Error event:', errorMsg)
|
||||
// If session is invalid/expired, clear the persisted session
|
||||
if (errorMsg.includes('Invalid') || errorMsg.includes('expired')) {
|
||||
console.log('[RemoteCameraDesktop] Session invalid, clearing persisted session')
|
||||
persistSessionId(null)
|
||||
setCurrentSessionId(null)
|
||||
currentSessionIdRef.current = null
|
||||
}
|
||||
setError(errorMsg)
|
||||
setIsReconnecting(false)
|
||||
}
|
||||
|
||||
const handleTorchState = ({
|
||||
@@ -164,7 +236,7 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
socket.off('remote-camera:error', handleError)
|
||||
socket.off('remote-camera:torch-state', handleTorchState)
|
||||
}
|
||||
}, [socket, calculateFrameRate])
|
||||
}, [socket, calculateFrameRate, persistSessionId])
|
||||
|
||||
// Frame rate update interval
|
||||
useEffect(() => {
|
||||
@@ -174,23 +246,39 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
|
||||
const subscribe = useCallback(
|
||||
(sessionId: string) => {
|
||||
console.log(
|
||||
'[RemoteCameraDesktop] Subscribing to session:',
|
||||
sessionId,
|
||||
'socket:',
|
||||
!!socket,
|
||||
'connected:',
|
||||
isConnected
|
||||
)
|
||||
if (!socket || !isConnected) {
|
||||
console.error('[RemoteCameraDesktop] Socket not connected!')
|
||||
setError('Socket not connected')
|
||||
return
|
||||
}
|
||||
|
||||
currentSessionId.current = sessionId
|
||||
currentSessionIdRef.current = sessionId
|
||||
setCurrentSessionId(sessionId)
|
||||
persistSessionId(sessionId)
|
||||
setError(null)
|
||||
console.log('[RemoteCameraDesktop] Emitting remote-camera:subscribe')
|
||||
socket.emit('remote-camera:subscribe', { sessionId })
|
||||
},
|
||||
[socket, isConnected]
|
||||
[socket, isConnected, persistSessionId]
|
||||
)
|
||||
|
||||
const unsubscribe = useCallback(() => {
|
||||
if (!socket || !currentSessionId.current) return
|
||||
if (!socket || !currentSessionIdRef.current) return
|
||||
|
||||
socket.emit('remote-camera:leave', { sessionId: currentSessionId.current })
|
||||
currentSessionId.current = null
|
||||
socket.emit('remote-camera:leave', {
|
||||
sessionId: currentSessionIdRef.current,
|
||||
})
|
||||
currentSessionIdRef.current = null
|
||||
setCurrentSessionId(null)
|
||||
// Don't clear persisted session - unsubscribe is for temporary disconnect
|
||||
setIsPhoneConnected(false)
|
||||
setLatestFrame(null)
|
||||
setFrameRate(0)
|
||||
@@ -201,6 +289,30 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
setIsTorchAvailable(false)
|
||||
}, [socket])
|
||||
|
||||
/**
|
||||
* Clear session completely (forget persisted session)
|
||||
* Use when user explicitly wants to start fresh
|
||||
*/
|
||||
const clearSession = useCallback(() => {
|
||||
if (socket && currentSessionIdRef.current) {
|
||||
socket.emit('remote-camera:leave', {
|
||||
sessionId: currentSessionIdRef.current,
|
||||
})
|
||||
}
|
||||
currentSessionIdRef.current = null
|
||||
setCurrentSessionId(null)
|
||||
persistSessionId(null)
|
||||
setIsPhoneConnected(false)
|
||||
setLatestFrame(null)
|
||||
setFrameRate(0)
|
||||
setError(null)
|
||||
setVideoDimensions(null)
|
||||
setFrameMode('raw')
|
||||
setIsTorchOn(false)
|
||||
setIsTorchAvailable(false)
|
||||
setIsReconnecting(false)
|
||||
}, [socket, persistSessionId])
|
||||
|
||||
/**
|
||||
* Set the phone's frame mode
|
||||
* - raw: Phone sends uncropped frames (for calibration)
|
||||
@@ -208,10 +320,10 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
*/
|
||||
const setPhoneFrameMode = useCallback(
|
||||
(mode: FrameMode) => {
|
||||
if (!socket || !currentSessionId.current) return
|
||||
if (!socket || !currentSessionIdRef.current) return
|
||||
|
||||
socket.emit('remote-camera:set-mode', {
|
||||
sessionId: currentSessionId.current,
|
||||
sessionId: currentSessionIdRef.current,
|
||||
mode,
|
||||
})
|
||||
setFrameMode(mode)
|
||||
@@ -225,10 +337,10 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
*/
|
||||
const sendCalibration = useCallback(
|
||||
(corners: QuadCorners) => {
|
||||
if (!socket || !currentSessionId.current) return
|
||||
if (!socket || !currentSessionIdRef.current) return
|
||||
|
||||
socket.emit('remote-camera:set-calibration', {
|
||||
sessionId: currentSessionId.current,
|
||||
sessionId: currentSessionIdRef.current,
|
||||
corners,
|
||||
})
|
||||
// Phone will automatically switch to cropped mode when it receives calibration
|
||||
@@ -242,10 +354,10 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
* This tells the phone to forget the desktop calibration and go back to auto-detection
|
||||
*/
|
||||
const clearCalibration = useCallback(() => {
|
||||
if (!socket || !currentSessionId.current) return
|
||||
if (!socket || !currentSessionIdRef.current) return
|
||||
|
||||
socket.emit('remote-camera:clear-calibration', {
|
||||
sessionId: currentSessionId.current,
|
||||
sessionId: currentSessionIdRef.current,
|
||||
})
|
||||
}, [socket])
|
||||
|
||||
@@ -254,10 +366,10 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
*/
|
||||
const setRemoteTorch = useCallback(
|
||||
(on: boolean) => {
|
||||
if (!socket || !currentSessionId.current) return
|
||||
if (!socket || !currentSessionIdRef.current) return
|
||||
|
||||
socket.emit('remote-camera:set-torch', {
|
||||
sessionId: currentSessionId.current,
|
||||
sessionId: currentSessionIdRef.current,
|
||||
on,
|
||||
})
|
||||
// Optimistically update local state
|
||||
@@ -269,9 +381,9 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
// Cleanup on unmount
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
if (socket && currentSessionId.current) {
|
||||
if (socket && currentSessionIdRef.current) {
|
||||
socket.emit('remote-camera:leave', {
|
||||
sessionId: currentSessionId.current,
|
||||
sessionId: currentSessionIdRef.current,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -286,11 +398,15 @@ export function useRemoteCameraDesktop(): UseRemoteCameraDesktopReturn {
|
||||
isTorchOn,
|
||||
isTorchAvailable,
|
||||
error,
|
||||
currentSessionId,
|
||||
isReconnecting,
|
||||
subscribe,
|
||||
unsubscribe,
|
||||
setPhoneFrameMode,
|
||||
sendCalibration,
|
||||
clearCalibration,
|
||||
setRemoteTorch,
|
||||
getPersistedSessionId,
|
||||
clearSession,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,8 +68,13 @@ interface UseRemoteCameraPhoneReturn {
|
||||
export function useRemoteCameraPhone(
|
||||
options: UseRemoteCameraPhoneOptions = {}
|
||||
): UseRemoteCameraPhoneReturn {
|
||||
const { targetFps = 10, jpegQuality = 0.8, targetWidth = 300, rawWidth = 640, onTorchRequest } =
|
||||
options
|
||||
const {
|
||||
targetFps = 10,
|
||||
jpegQuality = 0.8,
|
||||
targetWidth = 300,
|
||||
rawWidth = 640,
|
||||
onTorchRequest,
|
||||
} = options
|
||||
|
||||
// Keep onTorchRequest in a ref to avoid stale closures
|
||||
const onTorchRequestRef = useRef(onTorchRequest)
|
||||
@@ -113,21 +118,48 @@ export function useRemoteCameraPhone(
|
||||
frameModeRef.current = frameMode
|
||||
}, [frameMode])
|
||||
|
||||
// Initialize socket connection
|
||||
// Initialize socket connection with reconnection support
|
||||
useEffect(() => {
|
||||
console.log('[RemoteCameraPhone] Initializing socket connection...')
|
||||
const socketInstance = io({
|
||||
path: '/api/socket',
|
||||
autoConnect: true,
|
||||
reconnection: true,
|
||||
reconnectionDelay: 1000,
|
||||
reconnectionDelayMax: 5000,
|
||||
reconnectionAttempts: 10,
|
||||
})
|
||||
|
||||
socketInstance.on('connect', () => {
|
||||
console.log('[RemoteCameraPhone] Socket connected! ID:', socketInstance.id)
|
||||
setIsSocketConnected(true)
|
||||
|
||||
// Auto-reconnect to session if we have one
|
||||
const sessionId = sessionIdRef.current
|
||||
if (sessionId) {
|
||||
console.log(
|
||||
'[RemoteCameraPhone] Auto-reconnecting to session after socket reconnect:',
|
||||
sessionId
|
||||
)
|
||||
socketInstance.emit('remote-camera:join', { sessionId })
|
||||
setIsConnected(true)
|
||||
isConnectedRef.current = true
|
||||
}
|
||||
})
|
||||
|
||||
socketInstance.on('disconnect', () => {
|
||||
socketInstance.on('connect_error', (error) => {
|
||||
console.error('[RemoteCameraPhone] Socket connect error:', error)
|
||||
})
|
||||
|
||||
socketInstance.on('disconnect', (reason) => {
|
||||
console.log('[RemoteCameraPhone] Socket disconnected:', reason)
|
||||
setIsSocketConnected(false)
|
||||
setIsConnected(false)
|
||||
isConnectedRef.current = false
|
||||
// Don't clear isConnected or sessionIdRef - we want to auto-reconnect
|
||||
// Only clear if server explicitly disconnected us
|
||||
if (reason === 'io server disconnect') {
|
||||
setIsConnected(false)
|
||||
isConnectedRef.current = false
|
||||
}
|
||||
})
|
||||
|
||||
socketRef.current = socketInstance
|
||||
@@ -314,7 +346,16 @@ export function useRemoteCameraPhone(
|
||||
const connect = useCallback(
|
||||
(sessionId: string) => {
|
||||
const socket = socketRef.current
|
||||
console.log(
|
||||
'[RemoteCameraPhone] Connecting to session:',
|
||||
sessionId,
|
||||
'socket:',
|
||||
!!socket,
|
||||
'connected:',
|
||||
isSocketConnected
|
||||
)
|
||||
if (!socket || !isSocketConnected) {
|
||||
console.error('[RemoteCameraPhone] Socket not connected!')
|
||||
setError('Socket not connected')
|
||||
return
|
||||
}
|
||||
@@ -322,6 +363,7 @@ export function useRemoteCameraPhone(
|
||||
sessionIdRef.current = sessionId
|
||||
setError(null)
|
||||
|
||||
console.log('[RemoteCameraPhone] Emitting remote-camera:join')
|
||||
socket.emit('remote-camera:join', { sessionId })
|
||||
setIsConnected(true)
|
||||
isConnectedRef.current = true
|
||||
|
||||
@@ -11,6 +11,7 @@ import type {
|
||||
PracticeStateEvent,
|
||||
SessionPausedEvent,
|
||||
SessionResumedEvent,
|
||||
VisionFrameEvent,
|
||||
} from '@/lib/classroom/socket-events'
|
||||
|
||||
/**
|
||||
@@ -64,6 +65,8 @@ export interface UseSessionBroadcastResult {
|
||||
) => void
|
||||
/** Send part transition complete event to observers */
|
||||
sendPartTransitionComplete: () => void
|
||||
/** Send vision frame to observers (when student has vision mode enabled) */
|
||||
sendVisionFrame: (imageData: string, detectedValue: number | null, confidence: number) => void
|
||||
}
|
||||
|
||||
export function useSessionBroadcast(
|
||||
@@ -271,10 +274,31 @@ export function useSessionBroadcast(
|
||||
console.log('[SessionBroadcast] Emitted part-transition-complete')
|
||||
}, [sessionId])
|
||||
|
||||
// Broadcast vision frame to observers
|
||||
const sendVisionFrame = useCallback(
|
||||
(imageData: string, detectedValue: number | null, confidence: number) => {
|
||||
if (!socketRef.current || !isConnectedRef.current || !sessionId) {
|
||||
return
|
||||
}
|
||||
|
||||
const event: VisionFrameEvent = {
|
||||
sessionId,
|
||||
imageData,
|
||||
detectedValue,
|
||||
confidence,
|
||||
timestamp: Date.now(),
|
||||
}
|
||||
|
||||
socketRef.current.emit('vision-frame', event)
|
||||
},
|
||||
[sessionId]
|
||||
)
|
||||
|
||||
return {
|
||||
isConnected: isConnectedRef.current,
|
||||
isBroadcasting: isConnectedRef.current && !!state,
|
||||
sendPartTransition,
|
||||
sendPartTransitionComplete,
|
||||
sendVisionFrame,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import type {
|
||||
PracticeStateEvent,
|
||||
SessionPausedEvent,
|
||||
SessionResumedEvent,
|
||||
VisionFrameEvent,
|
||||
} from '@/lib/classroom/socket-events'
|
||||
|
||||
/**
|
||||
@@ -110,6 +111,20 @@ export interface ObservedResult {
|
||||
recordedAt: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Vision frame received from student's abacus camera
|
||||
*/
|
||||
export interface ObservedVisionFrame {
|
||||
/** Base64-encoded JPEG image data */
|
||||
imageData: string
|
||||
/** Detected abacus value (null if not yet detected) */
|
||||
detectedValue: number | null
|
||||
/** Detection confidence (0-1) */
|
||||
confidence: number
|
||||
/** When this frame was received by observer */
|
||||
receivedAt: number
|
||||
}
|
||||
|
||||
interface UseSessionObserverResult {
|
||||
/** Current observed state (null if not yet received) */
|
||||
state: ObservedSessionState | null
|
||||
@@ -117,6 +132,8 @@ interface UseSessionObserverResult {
|
||||
results: ObservedResult[]
|
||||
/** Current part transition state (null if not in transition) */
|
||||
transitionState: ObservedTransitionState | null
|
||||
/** Latest vision frame from student's camera (null if vision not enabled) */
|
||||
visionFrame: ObservedVisionFrame | null
|
||||
/** Whether connected to the session channel */
|
||||
isConnected: boolean
|
||||
/** Whether actively observing (connected and joined session) */
|
||||
@@ -155,6 +172,7 @@ export function useSessionObserver(
|
||||
const [state, setState] = useState<ObservedSessionState | null>(null)
|
||||
const [results, setResults] = useState<ObservedResult[]>([])
|
||||
const [transitionState, setTransitionState] = useState<ObservedTransitionState | null>(null)
|
||||
const [visionFrame, setVisionFrame] = useState<ObservedVisionFrame | null>(null)
|
||||
const [isConnected, setIsConnected] = useState(false)
|
||||
const [isObserving, setIsObserving] = useState(false)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
@@ -174,6 +192,8 @@ export function useSessionObserver(
|
||||
setIsObserving(false)
|
||||
setState(null)
|
||||
setResults([])
|
||||
setTransitionState(null)
|
||||
setVisionFrame(null)
|
||||
recordedProblemsRef.current.clear()
|
||||
hasSeededHistoryRef.current = false
|
||||
}
|
||||
@@ -354,6 +374,16 @@ export function useSessionObserver(
|
||||
setTransitionState(null)
|
||||
})
|
||||
|
||||
// Listen for vision frames from student's camera
|
||||
socket.on('vision-frame', (data: VisionFrameEvent) => {
|
||||
setVisionFrame({
|
||||
imageData: data.imageData,
|
||||
detectedValue: data.detectedValue,
|
||||
confidence: data.confidence,
|
||||
receivedAt: Date.now(),
|
||||
})
|
||||
})
|
||||
|
||||
// Listen for session ended event
|
||||
socket.on('session-ended', () => {
|
||||
console.log('[SessionObserver] Session ended')
|
||||
@@ -445,6 +475,7 @@ export function useSessionObserver(
|
||||
state,
|
||||
results,
|
||||
transitionState,
|
||||
visionFrame,
|
||||
isConnected,
|
||||
isObserving,
|
||||
error,
|
||||
|
||||
@@ -268,6 +268,22 @@ export interface PartTransitionCompleteEvent {
|
||||
sessionId: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Vision frame from student's abacus camera.
|
||||
* Sent when student has vision mode enabled during practice.
|
||||
*/
|
||||
export interface VisionFrameEvent {
|
||||
sessionId: string
|
||||
/** Base64-encoded JPEG image data */
|
||||
imageData: string
|
||||
/** Detected abacus value (null if not yet detected) */
|
||||
detectedValue: number | null
|
||||
/** Detection confidence (0-1) */
|
||||
confidence: number
|
||||
/** Timestamp when frame was captured */
|
||||
timestamp: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Sent when a student starts a practice session while present in a classroom.
|
||||
* Allows teacher to see session status update in real-time.
|
||||
@@ -401,6 +417,7 @@ export interface ClassroomServerToClientEvents {
|
||||
'session-resumed': (data: SessionResumedEvent) => void
|
||||
'part-transition': (data: PartTransitionEvent) => void
|
||||
'part-transition-complete': (data: PartTransitionCompleteEvent) => void
|
||||
'vision-frame': (data: VisionFrameEvent) => void
|
||||
|
||||
// Session status events (classroom channel - for teacher's active sessions view)
|
||||
'session-started': (data: SessionStartedEvent) => void
|
||||
@@ -427,6 +444,7 @@ export interface ClassroomClientToServerEvents {
|
||||
// Session state broadcasts (from student client)
|
||||
'practice-state': (data: PracticeStateEvent) => void
|
||||
'tutorial-state': (data: TutorialStateEvent) => void
|
||||
'vision-frame': (data: VisionFrameEvent) => void
|
||||
|
||||
// Observer controls
|
||||
'tutorial-control': (data: TutorialControlEvent) => void
|
||||
|
||||
328
apps/web/src/lib/remote-camera/__tests__/session-manager.test.ts
Normal file
328
apps/web/src/lib/remote-camera/__tests__/session-manager.test.ts
Normal file
@@ -0,0 +1,328 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*
|
||||
* Tests for Remote Camera Session Manager
|
||||
*
|
||||
* Tests session creation, TTL management, activity-based renewal,
|
||||
* and calibration persistence.
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createRemoteCameraSession,
|
||||
deleteRemoteCameraSession,
|
||||
getOrCreateSession,
|
||||
getRemoteCameraSession,
|
||||
getSessionCalibration,
|
||||
getSessionCount,
|
||||
markPhoneConnected,
|
||||
markPhoneDisconnected,
|
||||
renewSessionTTL,
|
||||
setSessionCalibration,
|
||||
} from '../session-manager'
|
||||
|
||||
describe('Remote Camera Session Manager', () => {
|
||||
beforeEach(() => {
|
||||
// Clear all sessions before each test
|
||||
// Access the global sessions map directly
|
||||
if (globalThis.__remoteCameraSessions) {
|
||||
globalThis.__remoteCameraSessions.clear()
|
||||
}
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('createRemoteCameraSession', () => {
|
||||
it('should create a new session with unique ID', () => {
|
||||
const session = createRemoteCameraSession()
|
||||
|
||||
expect(session.id).toBeDefined()
|
||||
expect(session.id.length).toBeGreaterThan(0)
|
||||
expect(session.phoneConnected).toBe(false)
|
||||
})
|
||||
|
||||
it('should set correct timestamps on creation', () => {
|
||||
const now = new Date()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const session = createRemoteCameraSession()
|
||||
|
||||
expect(session.createdAt.getTime()).toBe(now.getTime())
|
||||
expect(session.lastActivityAt.getTime()).toBe(now.getTime())
|
||||
// TTL should be 60 minutes
|
||||
expect(session.expiresAt.getTime()).toBe(now.getTime() + 60 * 60 * 1000)
|
||||
})
|
||||
|
||||
it('should create multiple sessions with unique IDs', () => {
|
||||
const session1 = createRemoteCameraSession()
|
||||
const session2 = createRemoteCameraSession()
|
||||
|
||||
expect(session1.id).not.toBe(session2.id)
|
||||
expect(getSessionCount()).toBe(2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getRemoteCameraSession', () => {
|
||||
it('should retrieve an existing session', () => {
|
||||
const created = createRemoteCameraSession()
|
||||
const retrieved = getRemoteCameraSession(created.id)
|
||||
|
||||
expect(retrieved).not.toBeNull()
|
||||
expect(retrieved?.id).toBe(created.id)
|
||||
})
|
||||
|
||||
it('should return null for non-existent session', () => {
|
||||
const session = getRemoteCameraSession('non-existent-id')
|
||||
expect(session).toBeNull()
|
||||
})
|
||||
|
||||
it('should return null for expired session', () => {
|
||||
const session = createRemoteCameraSession()
|
||||
const sessionId = session.id
|
||||
|
||||
// Advance time past expiration (61 minutes)
|
||||
vi.setSystemTime(new Date(Date.now() + 61 * 60 * 1000))
|
||||
|
||||
const retrieved = getRemoteCameraSession(sessionId)
|
||||
expect(retrieved).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('getOrCreateSession', () => {
|
||||
it('should create new session with provided ID if not exists', () => {
|
||||
const customId = 'my-custom-session-id'
|
||||
const session = getOrCreateSession(customId)
|
||||
|
||||
expect(session.id).toBe(customId)
|
||||
expect(session.phoneConnected).toBe(false)
|
||||
})
|
||||
|
||||
it('should return existing session if not expired', () => {
|
||||
const customId = 'existing-session'
|
||||
const original = getOrCreateSession(customId)
|
||||
|
||||
// Mark phone connected to verify we get same session
|
||||
markPhoneConnected(customId)
|
||||
|
||||
const retrieved = getOrCreateSession(customId)
|
||||
|
||||
expect(retrieved.id).toBe(original.id)
|
||||
expect(retrieved.phoneConnected).toBe(true)
|
||||
})
|
||||
|
||||
it('should renew TTL when accessing existing session', () => {
|
||||
const now = new Date()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const customId = 'session-to-renew'
|
||||
const original = getOrCreateSession(customId)
|
||||
const originalExpiry = original.expiresAt.getTime()
|
||||
|
||||
// Advance time by 30 minutes
|
||||
vi.setSystemTime(new Date(now.getTime() + 30 * 60 * 1000))
|
||||
|
||||
const retrieved = getOrCreateSession(customId)
|
||||
|
||||
// Expiry should be extended from current time
|
||||
expect(retrieved.expiresAt.getTime()).toBeGreaterThan(originalExpiry)
|
||||
})
|
||||
|
||||
it('should create new session if existing one expired', () => {
|
||||
const customId = 'expired-session'
|
||||
const original = getOrCreateSession(customId)
|
||||
markPhoneConnected(customId) // Mark to distinguish
|
||||
|
||||
// Advance time past expiration
|
||||
vi.setSystemTime(new Date(Date.now() + 61 * 60 * 1000))
|
||||
|
||||
const newSession = getOrCreateSession(customId)
|
||||
|
||||
// Should be a fresh session (not phone connected)
|
||||
expect(newSession.id).toBe(customId)
|
||||
expect(newSession.phoneConnected).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('renewSessionTTL', () => {
|
||||
it('should extend session expiration time', () => {
|
||||
const now = new Date()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const session = createRemoteCameraSession()
|
||||
const originalExpiry = session.expiresAt.getTime()
|
||||
|
||||
// Advance time by 30 minutes
|
||||
vi.setSystemTime(new Date(now.getTime() + 30 * 60 * 1000))
|
||||
|
||||
const renewed = renewSessionTTL(session.id)
|
||||
|
||||
expect(renewed).toBe(true)
|
||||
|
||||
const updatedSession = getRemoteCameraSession(session.id)
|
||||
expect(updatedSession?.expiresAt.getTime()).toBeGreaterThan(originalExpiry)
|
||||
})
|
||||
|
||||
it('should update lastActivityAt', () => {
|
||||
const now = new Date()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const session = createRemoteCameraSession()
|
||||
|
||||
// Advance time
|
||||
const later = new Date(now.getTime() + 10 * 60 * 1000)
|
||||
vi.setSystemTime(later)
|
||||
|
||||
renewSessionTTL(session.id)
|
||||
|
||||
const updatedSession = getRemoteCameraSession(session.id)
|
||||
expect(updatedSession?.lastActivityAt.getTime()).toBe(later.getTime())
|
||||
})
|
||||
|
||||
it('should return false for non-existent session', () => {
|
||||
const result = renewSessionTTL('non-existent')
|
||||
expect(result).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('calibration persistence', () => {
|
||||
const testCalibration = {
|
||||
corners: {
|
||||
topLeft: { x: 10, y: 10 },
|
||||
topRight: { x: 100, y: 10 },
|
||||
bottomLeft: { x: 10, y: 100 },
|
||||
bottomRight: { x: 100, y: 100 },
|
||||
},
|
||||
}
|
||||
|
||||
it('should store calibration data', () => {
|
||||
const session = createRemoteCameraSession()
|
||||
|
||||
const result = setSessionCalibration(session.id, testCalibration)
|
||||
|
||||
expect(result).toBe(true)
|
||||
})
|
||||
|
||||
it('should retrieve calibration data', () => {
|
||||
const session = createRemoteCameraSession()
|
||||
setSessionCalibration(session.id, testCalibration)
|
||||
|
||||
const retrieved = getSessionCalibration(session.id)
|
||||
|
||||
expect(retrieved).toEqual(testCalibration)
|
||||
})
|
||||
|
||||
it('should return null for session without calibration', () => {
|
||||
const session = createRemoteCameraSession()
|
||||
|
||||
const calibration = getSessionCalibration(session.id)
|
||||
|
||||
expect(calibration).toBeNull()
|
||||
})
|
||||
|
||||
it('should return null for non-existent session', () => {
|
||||
const calibration = getSessionCalibration('non-existent')
|
||||
expect(calibration).toBeNull()
|
||||
})
|
||||
|
||||
it('should renew TTL when setting calibration', () => {
|
||||
const now = new Date()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const session = createRemoteCameraSession()
|
||||
const originalExpiry = session.expiresAt.getTime()
|
||||
|
||||
// Advance time
|
||||
vi.setSystemTime(new Date(now.getTime() + 30 * 60 * 1000))
|
||||
|
||||
setSessionCalibration(session.id, testCalibration)
|
||||
|
||||
const updatedSession = getRemoteCameraSession(session.id)
|
||||
expect(updatedSession?.expiresAt.getTime()).toBeGreaterThan(originalExpiry)
|
||||
})
|
||||
|
||||
it('should persist calibration across session retrievals', () => {
|
||||
const customId = 'calibrated-session'
|
||||
const session = getOrCreateSession(customId)
|
||||
setSessionCalibration(session.id, testCalibration)
|
||||
|
||||
// Simulate reconnection by getting session again
|
||||
const reconnected = getOrCreateSession(customId)
|
||||
|
||||
expect(reconnected.calibration).toEqual(testCalibration)
|
||||
})
|
||||
})
|
||||
|
||||
describe('phone connection state', () => {
|
||||
it('should mark phone as connected', () => {
|
||||
const session = createRemoteCameraSession()
|
||||
|
||||
const result = markPhoneConnected(session.id)
|
||||
|
||||
expect(result).toBe(true)
|
||||
const updated = getRemoteCameraSession(session.id)
|
||||
expect(updated?.phoneConnected).toBe(true)
|
||||
})
|
||||
|
||||
it('should mark phone as disconnected', () => {
|
||||
const session = createRemoteCameraSession()
|
||||
markPhoneConnected(session.id)
|
||||
|
||||
const result = markPhoneDisconnected(session.id)
|
||||
|
||||
expect(result).toBe(true)
|
||||
const updated = getRemoteCameraSession(session.id)
|
||||
expect(updated?.phoneConnected).toBe(false)
|
||||
})
|
||||
|
||||
it('should extend TTL when phone connects', () => {
|
||||
const now = new Date()
|
||||
vi.setSystemTime(now)
|
||||
|
||||
const session = createRemoteCameraSession()
|
||||
|
||||
// Advance time
|
||||
vi.setSystemTime(new Date(now.getTime() + 30 * 60 * 1000))
|
||||
|
||||
markPhoneConnected(session.id)
|
||||
|
||||
const updated = getRemoteCameraSession(session.id)
|
||||
// Expiry should be 60 mins from now (not from creation)
|
||||
expect(updated?.expiresAt.getTime()).toBeGreaterThan(now.getTime() + 60 * 60 * 1000)
|
||||
})
|
||||
|
||||
it('should return false for non-existent session', () => {
|
||||
expect(markPhoneConnected('non-existent')).toBe(false)
|
||||
expect(markPhoneDisconnected('non-existent')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('deleteRemoteCameraSession', () => {
|
||||
it('should delete existing session', () => {
|
||||
const session = createRemoteCameraSession()
|
||||
|
||||
const result = deleteRemoteCameraSession(session.id)
|
||||
|
||||
expect(result).toBe(true)
|
||||
expect(getRemoteCameraSession(session.id)).toBeNull()
|
||||
})
|
||||
|
||||
it('should return false for non-existent session', () => {
|
||||
const result = deleteRemoteCameraSession('non-existent')
|
||||
expect(result).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('session count', () => {
|
||||
it('should track total sessions', () => {
|
||||
expect(getSessionCount()).toBe(0)
|
||||
|
||||
createRemoteCameraSession()
|
||||
expect(getSessionCount()).toBe(1)
|
||||
|
||||
createRemoteCameraSession()
|
||||
expect(getSessionCount()).toBe(2)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -2,7 +2,8 @@
|
||||
* Remote Camera Session Manager
|
||||
*
|
||||
* Manages in-memory sessions for phone-to-desktop camera streaming.
|
||||
* Sessions are short-lived (10 minute TTL) and stored in memory.
|
||||
* Sessions have a 60-minute TTL but are renewed on activity.
|
||||
* Sessions persist across page reloads via session ID stored client-side.
|
||||
*/
|
||||
|
||||
import { createId } from '@paralleldrive/cuid2'
|
||||
@@ -11,7 +12,17 @@ export interface RemoteCameraSession {
|
||||
id: string
|
||||
createdAt: Date
|
||||
expiresAt: Date
|
||||
lastActivityAt: Date
|
||||
phoneConnected: boolean
|
||||
/** Calibration data sent from desktop (persists for reconnects) */
|
||||
calibration?: {
|
||||
corners: {
|
||||
topLeft: { x: number; y: number }
|
||||
topRight: { x: number; y: number }
|
||||
bottomLeft: { x: number; y: number }
|
||||
bottomRight: { x: number; y: number }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In-memory session storage
|
||||
@@ -21,7 +32,7 @@ declare global {
|
||||
var __remoteCameraSessions: Map<string, RemoteCameraSession> | undefined
|
||||
}
|
||||
|
||||
const SESSION_TTL_MS = 10 * 60 * 1000 // 10 minutes
|
||||
const SESSION_TTL_MS = 60 * 60 * 1000 // 60 minutes
|
||||
const CLEANUP_INTERVAL_MS = 60 * 1000 // 1 minute
|
||||
|
||||
function getSessions(): Map<string, RemoteCameraSession> {
|
||||
@@ -44,6 +55,7 @@ export function createRemoteCameraSession(): RemoteCameraSession {
|
||||
id: createId(),
|
||||
createdAt: now,
|
||||
expiresAt: new Date(now.getTime() + SESSION_TTL_MS),
|
||||
lastActivityAt: now,
|
||||
phoneConnected: false,
|
||||
}
|
||||
|
||||
@@ -51,6 +63,84 @@ export function createRemoteCameraSession(): RemoteCameraSession {
|
||||
return session
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create a session by ID
|
||||
* If the session exists and isn't expired, returns it (renewed)
|
||||
* If the session doesn't exist, creates a new one with the given ID
|
||||
*/
|
||||
export function getOrCreateSession(sessionId: string): RemoteCameraSession {
|
||||
const sessions = getSessions()
|
||||
const existing = sessions.get(sessionId)
|
||||
const now = new Date()
|
||||
|
||||
if (existing && now <= existing.expiresAt) {
|
||||
// Renew TTL on access
|
||||
existing.expiresAt = new Date(now.getTime() + SESSION_TTL_MS)
|
||||
existing.lastActivityAt = now
|
||||
return existing
|
||||
}
|
||||
|
||||
// Create new session with provided ID
|
||||
const session: RemoteCameraSession = {
|
||||
id: sessionId,
|
||||
createdAt: now,
|
||||
expiresAt: new Date(now.getTime() + SESSION_TTL_MS),
|
||||
lastActivityAt: now,
|
||||
phoneConnected: false,
|
||||
}
|
||||
|
||||
sessions.set(session.id, session)
|
||||
return session
|
||||
}
|
||||
|
||||
/**
|
||||
* Renew session TTL (call on activity to keep session alive)
|
||||
*/
|
||||
export function renewSessionTTL(sessionId: string): boolean {
|
||||
const sessions = getSessions()
|
||||
const session = sessions.get(sessionId)
|
||||
|
||||
if (!session) return false
|
||||
|
||||
const now = new Date()
|
||||
session.expiresAt = new Date(now.getTime() + SESSION_TTL_MS)
|
||||
session.lastActivityAt = now
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Store calibration data in session (persists for reconnects)
|
||||
*/
|
||||
export function setSessionCalibration(
|
||||
sessionId: string,
|
||||
calibration: RemoteCameraSession['calibration']
|
||||
): boolean {
|
||||
const sessions = getSessions()
|
||||
const session = sessions.get(sessionId)
|
||||
|
||||
if (!session) return false
|
||||
|
||||
session.calibration = calibration
|
||||
// Also renew TTL
|
||||
const now = new Date()
|
||||
session.expiresAt = new Date(now.getTime() + SESSION_TTL_MS)
|
||||
session.lastActivityAt = now
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Get calibration data from session
|
||||
*/
|
||||
export function getSessionCalibration(
|
||||
sessionId: string
|
||||
): RemoteCameraSession['calibration'] | null {
|
||||
const sessions = getSessions()
|
||||
const session = sessions.get(sessionId)
|
||||
|
||||
if (!session) return null
|
||||
return session.calibration || null
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a session by ID
|
||||
*/
|
||||
|
||||
203
apps/web/src/lib/vision/beadDetector.ts
Normal file
203
apps/web/src/lib/vision/beadDetector.ts
Normal file
@@ -0,0 +1,203 @@
|
||||
/**
|
||||
* Traditional CV-based bead detection for abacus columns
|
||||
*
|
||||
* Uses edge detection and contour analysis instead of ML.
|
||||
* Works by detecting the reckoning bar and analyzing bead positions
|
||||
* relative to it.
|
||||
*/
|
||||
|
||||
export interface BeadAnalysis {
|
||||
/** Detected digit value (0-9) */
|
||||
digit: number
|
||||
/** Confidence based on detection clarity */
|
||||
confidence: number
|
||||
/** Position of reckoning bar (0-1, relative to column height) */
|
||||
reckoningBarPosition: number
|
||||
/** Number of beads detected above bar */
|
||||
heavenBeadsDetected: number
|
||||
/** Whether heaven bead is active (touching bar) */
|
||||
heavenActive: boolean
|
||||
/** Number of beads detected below bar */
|
||||
earthBeadsDetected: number
|
||||
/** Number of active earth beads (touching bar) */
|
||||
earthActiveCount: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze a single column image to detect bead positions
|
||||
*
|
||||
* @param imageData - Grayscale image data of a single column
|
||||
* @returns Analysis result with detected digit
|
||||
*/
|
||||
export function analyzeColumn(imageData: ImageData): BeadAnalysis {
|
||||
const { width, height, data } = imageData
|
||||
|
||||
// Step 1: Create vertical intensity profile (average each row)
|
||||
const rowIntensities = new Float32Array(height)
|
||||
for (let y = 0; y < height; y++) {
|
||||
let sum = 0
|
||||
for (let x = 0; x < width; x++) {
|
||||
const idx = (y * width + x) * 4
|
||||
sum += data[idx] // Use red channel (grayscale)
|
||||
}
|
||||
rowIntensities[y] = sum / width
|
||||
}
|
||||
|
||||
// Step 2: Find reckoning bar (darkest horizontal region)
|
||||
// The bar is typically a dark horizontal line in the middle third
|
||||
const searchStart = Math.floor(height * 0.25)
|
||||
const searchEnd = Math.floor(height * 0.75)
|
||||
|
||||
let darkestRow = searchStart
|
||||
let darkestValue = 255
|
||||
|
||||
// Use a sliding window to find the darkest band
|
||||
const windowSize = Math.max(3, Math.floor(height * 0.03))
|
||||
for (let y = searchStart; y < searchEnd - windowSize; y++) {
|
||||
let windowSum = 0
|
||||
for (let i = 0; i < windowSize; i++) {
|
||||
windowSum += rowIntensities[y + i]
|
||||
}
|
||||
const windowAvg = windowSum / windowSize
|
||||
if (windowAvg < darkestValue) {
|
||||
darkestValue = windowAvg
|
||||
darkestRow = y + Math.floor(windowSize / 2)
|
||||
}
|
||||
}
|
||||
|
||||
const reckoningBarPosition = darkestRow / height
|
||||
|
||||
// Step 3: Analyze heaven section (above bar)
|
||||
// Find peaks in intensity (beads are darker than background)
|
||||
const heavenStart = 0
|
||||
const heavenEnd = darkestRow - windowSize
|
||||
const heavenPeaks = findPeaks(rowIntensities, heavenStart, heavenEnd, height)
|
||||
|
||||
// Heaven bead is active if it's close to the reckoning bar
|
||||
const heavenActiveThreshold = height * 0.15 // Within 15% of bar
|
||||
const heavenActive =
|
||||
heavenPeaks.length > 0 &&
|
||||
darkestRow - heavenPeaks[heavenPeaks.length - 1] < heavenActiveThreshold
|
||||
|
||||
// Step 4: Analyze earth section (below bar)
|
||||
const earthStart = darkestRow + windowSize
|
||||
const earthEnd = height
|
||||
const earthPeaks = findPeaks(rowIntensities, earthStart, earthEnd, height)
|
||||
|
||||
// Earth beads are active if they're close to the reckoning bar
|
||||
const earthActiveCount = earthPeaks.filter(
|
||||
(peak) => peak - darkestRow < heavenActiveThreshold
|
||||
).length
|
||||
|
||||
// Step 5: Calculate digit value
|
||||
// Heaven bead = 5, each earth bead = 1
|
||||
const heavenValue = heavenActive ? 5 : 0
|
||||
const earthValue = Math.min(earthActiveCount, 4) // Max 4 earth beads
|
||||
const digit = heavenValue + earthValue
|
||||
|
||||
// Step 6: Calculate confidence based on detection quality
|
||||
// Higher confidence if we found expected number of beads and clear bar
|
||||
const expectedHeavenBeads = 1
|
||||
const expectedEarthBeads = 4
|
||||
const heavenConfidence = heavenPeaks.length === expectedHeavenBeads ? 1.0 : 0.5
|
||||
const earthConfidence =
|
||||
earthPeaks.length >= expectedEarthBeads ? 1.0 : earthPeaks.length / expectedEarthBeads
|
||||
const barContrast = (255 - darkestValue) / 255 // How dark is the bar?
|
||||
|
||||
const confidence = (heavenConfidence + earthConfidence + barContrast) / 3
|
||||
|
||||
return {
|
||||
digit,
|
||||
confidence,
|
||||
reckoningBarPosition,
|
||||
heavenBeadsDetected: heavenPeaks.length,
|
||||
heavenActive,
|
||||
earthBeadsDetected: earthPeaks.length,
|
||||
earthActiveCount,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find peaks (local minima = dark beads) in intensity profile
|
||||
*/
|
||||
function findPeaks(
|
||||
intensities: Float32Array,
|
||||
start: number,
|
||||
end: number,
|
||||
totalHeight: number
|
||||
): number[] {
|
||||
const peaks: number[] = []
|
||||
const minPeakDistance = Math.floor(totalHeight * 0.05) // Min 5% height between peaks
|
||||
const threshold = calculateAdaptiveThreshold(intensities, start, end)
|
||||
|
||||
let lastPeak = -minPeakDistance * 2
|
||||
|
||||
for (let y = start + 2; y < end - 2; y++) {
|
||||
const current = intensities[y]
|
||||
|
||||
// Local minimum (darker than neighbors)
|
||||
if (
|
||||
current < intensities[y - 1] &&
|
||||
current < intensities[y + 1] &&
|
||||
current < intensities[y - 2] &&
|
||||
current < intensities[y + 2] &&
|
||||
current < threshold &&
|
||||
y - lastPeak >= minPeakDistance
|
||||
) {
|
||||
peaks.push(y)
|
||||
lastPeak = y
|
||||
}
|
||||
}
|
||||
|
||||
return peaks
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate adaptive threshold for peak detection
|
||||
*/
|
||||
function calculateAdaptiveThreshold(intensities: Float32Array, start: number, end: number): number {
|
||||
let sum = 0
|
||||
let min = 255
|
||||
let max = 0
|
||||
|
||||
for (let y = start; y < end; y++) {
|
||||
sum += intensities[y]
|
||||
min = Math.min(min, intensities[y])
|
||||
max = Math.max(max, intensities[y])
|
||||
}
|
||||
|
||||
const avg = sum / (end - start)
|
||||
|
||||
// Threshold halfway between average and minimum
|
||||
return (avg + min) / 2
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze multiple columns
|
||||
*/
|
||||
export function analyzeColumns(columnImages: ImageData[]): BeadAnalysis[] {
|
||||
return columnImages.map(analyzeColumn)
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert bead analyses to digits
|
||||
*/
|
||||
export function analysesToDigits(analyses: BeadAnalysis[]): {
|
||||
digits: number[]
|
||||
confidences: number[]
|
||||
minConfidence: number
|
||||
} {
|
||||
const digits = analyses.map((a) => a.digit)
|
||||
const confidences = analyses.map((a) => a.confidence)
|
||||
const minConfidence = confidences.length > 0 ? Math.min(...confidences) : 0
|
||||
|
||||
return { digits, confidences, minConfidence }
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert digits to number
|
||||
*/
|
||||
export function digitsToNumber(digits: number[]): number {
|
||||
if (digits.length === 0) return 0
|
||||
return digits.reduce((acc, d) => acc * 10 + d, 0)
|
||||
}
|
||||
@@ -214,3 +214,59 @@ export function getMinConfidence(confidences: number[]): number {
|
||||
if (confidences.length === 0) return 0
|
||||
return Math.min(...confidences)
|
||||
}
|
||||
|
||||
/**
|
||||
* Process an image frame for classification (for remote camera frames)
|
||||
*
|
||||
* @param image - Image element with the frame
|
||||
* @param calibration - Calibration grid (if null, assumes entire image is the abacus)
|
||||
* @param columnCount - Number of columns to slice into
|
||||
* @param columnWidth - Target column width for model input
|
||||
* @param columnHeight - Target column height for model input
|
||||
* @returns Array of preprocessed column ImageData ready for classification
|
||||
*/
|
||||
export function processImageFrame(
|
||||
image: HTMLImageElement,
|
||||
calibration: CalibrationGrid | null,
|
||||
columnCount: number,
|
||||
columnWidth: number = 64,
|
||||
columnHeight: number = 128
|
||||
): ImageData[] {
|
||||
// Create canvas for image frame
|
||||
const canvas = document.createElement('canvas')
|
||||
canvas.width = image.naturalWidth || image.width
|
||||
canvas.height = image.naturalHeight || image.height
|
||||
const ctx = canvas.getContext('2d')!
|
||||
|
||||
// Draw image frame
|
||||
ctx.drawImage(image, 0, 0)
|
||||
|
||||
let roiData: ImageData
|
||||
|
||||
if (calibration) {
|
||||
// Extract ROI using calibration
|
||||
roiData = extractROI(ctx, calibration.roi)
|
||||
} else {
|
||||
// No calibration - use entire image as ROI (already cropped by phone)
|
||||
roiData = ctx.getImageData(0, 0, canvas.width, canvas.height)
|
||||
}
|
||||
|
||||
// Create a synthetic calibration for slicing if none provided
|
||||
const sliceCalibration: CalibrationGrid = calibration ?? {
|
||||
roi: { x: 0, y: 0, width: canvas.width, height: canvas.height },
|
||||
columnCount,
|
||||
columnDividers: Array.from({ length: columnCount - 1 }, (_, i) => (i + 1) / columnCount),
|
||||
rotation: 0,
|
||||
}
|
||||
|
||||
// Slice into columns
|
||||
const columns = sliceIntoColumns(roiData, sliceCalibration)
|
||||
|
||||
// Preprocess each column
|
||||
return columns.map((col) => {
|
||||
// Convert to grayscale
|
||||
const gray = toGrayscale(col)
|
||||
// Resize to model input size
|
||||
return resizeImageData(gray, columnWidth, columnHeight)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -978,6 +978,21 @@ export function initializeSocketServer(httpServer: HTTPServer) {
|
||||
io!.to(`session:${data.sessionId}`).emit('session-resumed', data)
|
||||
})
|
||||
|
||||
// Session Observation: Broadcast vision frame from student's abacus camera
|
||||
socket.on(
|
||||
'vision-frame',
|
||||
(data: {
|
||||
sessionId: string
|
||||
imageData: string
|
||||
detectedValue: number | null
|
||||
confidence: number
|
||||
timestamp: number
|
||||
}) => {
|
||||
// Broadcast to all observers in the session channel
|
||||
socket.to(`session:${data.sessionId}`).emit('vision-frame', data)
|
||||
}
|
||||
)
|
||||
|
||||
// Skill Tutorial: Broadcast state from student to classroom (for teacher observation)
|
||||
// The student joins the classroom channel and emits their tutorial state
|
||||
socket.on(
|
||||
|
||||
@@ -1 +1,21 @@
|
||||
import '@testing-library/jest-dom'
|
||||
|
||||
// Mock canvas Image constructor to prevent jsdom errors when rendering
|
||||
// images with data URIs (e.g., data:image/jpeg;base64,...)
|
||||
// This works by patching HTMLImageElement.prototype before jsdom uses it
|
||||
// Guard for node environment where HTMLImageElement doesn't exist
|
||||
if (typeof HTMLImageElement !== 'undefined') {
|
||||
const originalSetAttribute = HTMLImageElement.prototype.setAttribute
|
||||
HTMLImageElement.prototype.setAttribute = function (name: string, value: string) {
|
||||
if (name === 'src' && value.startsWith('data:image/')) {
|
||||
// Store the value but don't trigger jsdom's image loading
|
||||
Object.defineProperty(this, 'src', {
|
||||
value,
|
||||
writable: true,
|
||||
configurable: true,
|
||||
})
|
||||
return
|
||||
}
|
||||
return originalSetAttribute.call(this, name, value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,8 +187,8 @@ export interface FrameStabilityConfig {
|
||||
* Default stability configuration
|
||||
*/
|
||||
export const DEFAULT_STABILITY_CONFIG: FrameStabilityConfig = {
|
||||
minConsecutiveFrames: 10, // ~300ms at 30fps
|
||||
minConfidence: 0.7,
|
||||
minConsecutiveFrames: 3, // 600ms at 5fps inference rate
|
||||
minConfidence: 0.5, // Lower threshold - model confidence is often 60-80%
|
||||
handMotionThreshold: 0.3,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,102 +1,118 @@
|
||||
# [2.19.0](https://github.com/antialias/soroban-abacus-flashcards/compare/abacus-react-v2.18.0...abacus-react-v2.19.0) (2026-01-01)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **vision:** add physical abacus column setting and fix remote flash toggle ([b206eb3](https://github.com/antialias/soroban-abacus-flashcards/commit/b206eb30712e4b98525a9fa2544c2b5a235a8b72))
|
||||
* **vision:** improve remote camera calibration and UX ([8846cec](https://github.com/antialias/soroban-abacus-flashcards/commit/8846cece93941a36c187abd4ecee9cc88de0c2ec))
|
||||
|
||||
# [2.18.0](https://github.com/antialias/soroban-abacus-flashcards/compare/abacus-react-v2.17.0...abacus-react-v2.18.0) (2026-01-01)
|
||||
# [2.20.0](https://github.com/antialias/soroban-abacus-flashcards/compare/abacus-react-v2.19.0...abacus-react-v2.20.0) (2026-01-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* allow teacher-parents to enroll their children in other classrooms ([52df7f4](https://github.com/antialias/soroban-abacus-flashcards/commit/52df7f469718128fd3d8933941ffb8d4bb8db208))
|
||||
* **bkt:** handle missing helpLevelUsed in legacy data causing NaN ([b300ed9](https://github.com/antialias/soroban-abacus-flashcards/commit/b300ed9f5cc3bfb0c7b28faafe81c80a59444998))
|
||||
* **camera:** handle race condition in camera initialization ([2a24700](https://github.com/antialias/soroban-abacus-flashcards/commit/2a24700e6cb6efe0ae35d9ebd6c428e3a1a1a736))
|
||||
* **classroom:** auto-transition tutorial→session observation + fix NaN display ([962a52d](https://github.com/antialias/soroban-abacus-flashcards/commit/962a52d7562f566e78f6272816b049bf77daa7c9))
|
||||
* **classroom:** broadcast digit-by-digit answer and correct phase indicator ([fb73e85](https://github.com/antialias/soroban-abacus-flashcards/commit/fb73e85f2daacefafa572e03c16b10fab619ea57))
|
||||
* **dashboard:** compute skill stats from session results in curriculum API ([11d4846](https://github.com/antialias/soroban-abacus-flashcards/commit/11d48465d710d0293ebf41f64b4fd0f1f03d8bf8))
|
||||
* **db:** add missing is_paused column to session_plans ([9d8b5e1](https://github.com/antialias/soroban-abacus-flashcards/commit/9d8b5e1148911f881d08d07608debaaef91609c2))
|
||||
* **db:** add missing journal entries for migrations 0041-0042 ([398603c](https://github.com/antialias/soroban-abacus-flashcards/commit/398603c75a094e28122c5ccdced5b82badc7fbfb))
|
||||
* **docker:** add canvas native deps for jsdom/vitest ([5f51bc1](https://github.com/antialias/soroban-abacus-flashcards/commit/5f51bc1871aec325feb32a0b29edabb3b6c5dd1f))
|
||||
* **docker:** override canvas with mock package for Alpine/musl ([8be1995](https://github.com/antialias/soroban-abacus-flashcards/commit/8be19958af624d22fa2c6cb48f5723f5efc820c3))
|
||||
* **docker:** skip canvas native build (optional jsdom dep) ([d717f44](https://github.com/antialias/soroban-abacus-flashcards/commit/d717f44fccb8ed2baa30499df65784a4b89c6ffc))
|
||||
* **observer:** seed results panel with full session history ([aab7469](https://github.com/antialias/soroban-abacus-flashcards/commit/aab7469d9ea87c91a0165e4c48a60ac130cdc1b2))
|
||||
* only show session stats when there are actual problems ([62aefad](https://github.com/antialias/soroban-abacus-flashcards/commit/62aefad6766ba32ad27e8ed3db621a6f77520cbe))
|
||||
* **practice:** allow teachers to create student profiles ([5fee129](https://github.com/antialias/soroban-abacus-flashcards/commit/5fee1297e1775b5e6133919d179e23b6e70b2518))
|
||||
* **practice:** always show add student FAB button ([a658414](https://github.com/antialias/soroban-abacus-flashcards/commit/a6584143ebf1f3e5b3c9f3283e690458a06beb60))
|
||||
* **practice:** real-time progress in observer modal + numeric answer comparison ([c0e63ff](https://github.com/antialias/soroban-abacus-flashcards/commit/c0e63ff68b26fd37eedd657504f7f79e5ce40a10))
|
||||
* **practice:** show active sessions for teacher's own children ([ece3197](https://github.com/antialias/soroban-abacus-flashcards/commit/ece319738b6ab1882469d79ea24b604316d28b34))
|
||||
* **practice:** use Next.js Link for student tiles + fix session observer z-index ([6def610](https://github.com/antialias/soroban-abacus-flashcards/commit/6def6108771b427e4885bebd23cecdad7a50efb0))
|
||||
* **seed:** accurate BKT simulation for developing classifications ([d5e4c85](https://github.com/antialias/soroban-abacus-flashcards/commit/d5e4c858db8866e5177b8fa2317aba42b30171e8))
|
||||
* **share:** use getShareUrl for correct production URLs ([98a69f1](https://github.com/antialias/soroban-abacus-flashcards/commit/98a69f1f80e465415edce49043e2c019a856f8e5))
|
||||
* **vision:** fix manual calibration overlay not showing on remote camera ([44dcb01](https://github.com/antialias/soroban-abacus-flashcards/commit/44dcb01473bac00c09dddbbefd77dd26b3a27817))
|
||||
* **vision:** fix remote camera calibration coordinate system ([e52f94e](https://github.com/antialias/soroban-abacus-flashcards/commit/e52f94e4b476658c41f23668d2941af1288e4ed8))
|
||||
* **vision:** swap corners diagonally for webcam orientation ([dd8efe3](https://github.com/antialias/soroban-abacus-flashcards/commit/dd8efe379d4bbcfc4b60f7c00ad6180465b7e7b6))
|
||||
* **vision:** clear config when switching camera sources ([ff59612](https://github.com/antialias/soroban-abacus-flashcards/commit/ff59612e7b9bab3ef4a8fba3c60e9dbcb37a140a))
|
||||
* **vision:** hide flip camera button when only one camera available ([7a9185e](https://github.com/antialias/soroban-abacus-flashcards/commit/7a9185eadb3609de596e3d150090af19225fdab6))
|
||||
* **vision:** include remote camera in isVisionSetupComplete check ([a8fb77e](https://github.com/antialias/soroban-abacus-flashcards/commit/a8fb77e8e3f2f4293c2dab99ca1ec1de78b1e37c))
|
||||
* **vision:** remote camera persistence and UI bugs ([d90d263](https://github.com/antialias/soroban-abacus-flashcards/commit/d90d263b2a2a5f228d93af2217bb11241ee8f0f5))
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* API authorization audit + teacher enrollment UI + share codes ([d6e369f](https://github.com/antialias/soroban-abacus-flashcards/commit/d6e369f9dc9b963938ca8de4562c87f9f1b6d389))
|
||||
* **camera:** auto-start camera when opening camera modal ([f3bb0ae](https://github.com/antialias/soroban-abacus-flashcards/commit/f3bb0aee4fe23eeffc7b7099981f51ec54636a35))
|
||||
* **camera:** fullscreen modal with edge-to-edge preview ([db17c96](https://github.com/antialias/soroban-abacus-flashcards/commit/db17c96168078f2d0d723b24395096756a2f63ec))
|
||||
* **chart:** add grouped structure to chart hover tooltip ([594e22c](https://github.com/antialias/soroban-abacus-flashcards/commit/594e22c428e0a4ee4322c233f127f9250e88b5fa))
|
||||
* **chart:** improve skill classification visual hierarchy with colors and patterns ([c9518a6](https://github.com/antialias/soroban-abacus-flashcards/commit/c9518a6b9952bda60ab2663d7655092637139fec))
|
||||
* **classroom:** add active sessions API endpoint ([07f6bb7](https://github.com/antialias/soroban-abacus-flashcards/commit/07f6bb7f9cc2dfbe6da8d16361e89b698405e1c0))
|
||||
* **classroom:** add real-time enrollment/unenrollment reactivity ([a0693e9](https://github.com/antialias/soroban-abacus-flashcards/commit/a0693e90840f651094f852a6a6f523013786b322))
|
||||
* **classroom:** add session broadcast and active session indicators ([9636f7f](https://github.com/antialias/soroban-abacus-flashcards/commit/9636f7f44a71da022352c19e80f9ec147dd3af5f))
|
||||
* **classroom:** add unified add-student modal with two-column layout ([dca696a](https://github.com/antialias/soroban-abacus-flashcards/commit/dca696a29fc20a2697b491c0d2efbe036569a716))
|
||||
* **classroom:** add unified TeacherClassroomCard with auto-enrollment ([4d6adf3](https://github.com/antialias/soroban-abacus-flashcards/commit/4d6adf359ede5d17c2decd9275ba68635ee0bd4f))
|
||||
* **classroom:** complete reactivity fixes (Steps 7-11) ([2015494](https://github.com/antialias/soroban-abacus-flashcards/commit/2015494c0eca28457031aa39490d70a2af3da4df))
|
||||
* **classroom:** consolidate filter pill to single-row design ([78a63e3](https://github.com/antialias/soroban-abacus-flashcards/commit/78a63e35e39948729cbf41e6c5af4e688a506c8d))
|
||||
* **classroom:** implement enrollment system (Phase 4) ([1952a41](https://github.com/antialias/soroban-abacus-flashcards/commit/1952a412edcd04b332655199737c340a4389d174))
|
||||
* **classroom:** implement entry prompts system ([de39ab5](https://github.com/antialias/soroban-abacus-flashcards/commit/de39ab52cc60f5782fc291246f98013ae15142ca))
|
||||
* **classroom:** implement real-time enrollment updates ([bbe0500](https://github.com/antialias/soroban-abacus-flashcards/commit/bbe0500fe9000d0d016417c1b586e9569e3eb888))
|
||||
* **classroom:** implement real-time presence with WebSocket (Phase 6) ([629bfcf](https://github.com/antialias/soroban-abacus-flashcards/commit/629bfcfc03c611cd3928bb98a67bace485ee3a7b))
|
||||
* **classroom:** implement real-time session observation (Step 3) ([2feb684](https://github.com/antialias/soroban-abacus-flashcards/commit/2feb6844a4fce48ba7a87d2a77769783c4e8b2f9))
|
||||
* **classroom:** implement real-time skill tutorial observation ([4b73879](https://github.com/antialias/soroban-abacus-flashcards/commit/4b7387905d2b050327f9b67b834d4e9dfc0b19cb))
|
||||
* **classroom:** implement teacher classroom dashboard (Phase 3) ([2202716](https://github.com/antialias/soroban-abacus-flashcards/commit/2202716f563053624dbe5c6abb969a3b0d452fd1))
|
||||
* **classroom:** implement teacher-initiated pause and fix manual pause ([ccea0f8](https://github.com/antialias/soroban-abacus-flashcards/commit/ccea0f86ac213b32cac7363f28e193b1976bd553))
|
||||
* **classroom:** implement two-way abacus sync for session observation (Step 5) ([2f7002e](https://github.com/antialias/soroban-abacus-flashcards/commit/2f7002e5759db705e213eb9f8474589c8e6149e7))
|
||||
* **classroom:** improve enrollment reactivity and UX ([77336be](https://github.com/antialias/soroban-abacus-flashcards/commit/77336bea5b5bbf16b393da13588de6e5082e818f))
|
||||
* **classroom:** integrate create student form into unified add-student modal ([da92289](https://github.com/antialias/soroban-abacus-flashcards/commit/da92289ed1ae570ff48cc28818122d4640d6c84c))
|
||||
* **classroom:** integrate Enter Classroom into StudentActionMenu ([2f1b9df](https://github.com/antialias/soroban-abacus-flashcards/commit/2f1b9df9d9d605b0c120af6961670ae84718c8d7))
|
||||
* **dashboard:** add skill progress chart with trend analysis and timing awareness ([1fc8949](https://github.com/antialias/soroban-abacus-flashcards/commit/1fc8949b0664591aa1b0cfcd7c7abd2a4c586281))
|
||||
* enable parents to observe children's practice sessions ([7b82995](https://github.com/antialias/soroban-abacus-flashcards/commit/7b829956644d369dfdfb0789a33e0b857958e84f))
|
||||
* **family:** implement parent-to-parent family code sharing (Phase 2) ([0284227](https://github.com/antialias/soroban-abacus-flashcards/commit/02842270c9278174934407a9620777589f79ee1e))
|
||||
* improve session summary header and add practice type badges ([518fe15](https://github.com/antialias/soroban-abacus-flashcards/commit/518fe153c9fc2ae2f2f7fc0ed4de27ee1c5c5646))
|
||||
* **observer:** add live active session item to history list ([91d6d6a](https://github.com/antialias/soroban-abacus-flashcards/commit/91d6d6a1b6938b559d8488fe296d562695cf16d1))
|
||||
* **observer:** add live results panel and session progress indicator ([8527f89](https://github.com/antialias/soroban-abacus-flashcards/commit/8527f892e2b300d51d83056d779474592a2fd955))
|
||||
* **observer:** implement shareable session observation links ([3ac7b46](https://github.com/antialias/soroban-abacus-flashcards/commit/3ac7b460ec0dc207a5691fbed8d539b484374fe7))
|
||||
* **practice:** add auto-rotation for captured documents ([ff79a28](https://github.com/antialias/soroban-abacus-flashcards/commit/ff79a28c657fb0a19752990e23f9bb0ced4e9343))
|
||||
* **practice:** add document adjustment UI and auto-capture ([473b7db](https://github.com/antialias/soroban-abacus-flashcards/commit/473b7dbd7cd15be511351a1fd303a0fc32b9d941))
|
||||
* **practice:** add document scanning with multi-quad tracking ([5f4f1fd](https://github.com/antialias/soroban-abacus-flashcards/commit/5f4f1fde3372e5d65d3f399216b04ab0e4c9972e))
|
||||
* **practice:** add fixed filter bar, sticky headers, and shared EmojiPicker ([0e03561](https://github.com/antialias/soroban-abacus-flashcards/commit/0e0356113ddef1ec92cd0b3fda0852d99c6067d2))
|
||||
* **practice:** add intervention system and improve skill chart hierarchy ([bf5b99a](https://github.com/antialias/soroban-abacus-flashcards/commit/bf5b99afe967c0b17765a7e6f1911d03201eed95))
|
||||
* **practice:** add mini start practice banner to QuickLook modal ([d1176da](https://github.com/antialias/soroban-abacus-flashcards/commit/d1176da9aa8bd926ca96699d1091e65f4a34d782))
|
||||
* **practice:** add Needs Attention to unified compact layout ([8727782](https://github.com/antialias/soroban-abacus-flashcards/commit/8727782e45c7ac269c4dbcc223b2a8be57be8bb2))
|
||||
* **practice:** add photo attachments for practice sessions ([9b85311](https://github.com/antialias/soroban-abacus-flashcards/commit/9b853116ecfbb19bec39923da635374963cf002c))
|
||||
* **practice:** add photo editing with rotation persistence and auto-detect ([156a0df](https://github.com/antialias/soroban-abacus-flashcards/commit/156a0dfe967a48c211be527da27c92ef8b1ab20c))
|
||||
* **practice:** add smooth fullscreen transition from QuickLook to dashboard ([cb8b0df](https://github.com/antialias/soroban-abacus-flashcards/commit/cb8b0dff676d48bcba4775c5981ac357d573ab27))
|
||||
* **practice:** add student organization with filtering and archiving ([538718a](https://github.com/antialias/soroban-abacus-flashcards/commit/538718a814402bd9c83b3c354c5a3386ff69104d))
|
||||
* **practice:** add StudentActionMenu to dashboard + fix z-index layering ([bf262e7](https://github.com/antialias/soroban-abacus-flashcards/commit/bf262e7d5305e2358d3a2464db10bc3b0866104c))
|
||||
* **practice:** compact single-student categories and UI improvements ([0e7f326](https://github.com/antialias/soroban-abacus-flashcards/commit/0e7f3265fe2de3b693c47a8a556d3e7cbc726ef4))
|
||||
* **practice:** implement measurement-based compact layout ([1656b93](https://github.com/antialias/soroban-abacus-flashcards/commit/1656b9324f6fb24a318820e04559c480c99762f5))
|
||||
* **practice:** implement retry wrong problems system ([474c4da](https://github.com/antialias/soroban-abacus-flashcards/commit/474c4da05a8d761e63a32187f5c301b57fb6aae4))
|
||||
* **practice:** parent session observation + relationship UI + error boundaries ([07484fd](https://github.com/antialias/soroban-abacus-flashcards/commit/07484fdfac3c6613a6a7709bdee25e1f8e047227))
|
||||
* **practice:** polish unified student list with keyboard nav and mobile UX ([0ba1551](https://github.com/antialias/soroban-abacus-flashcards/commit/0ba1551feaa30d8f41ec5d771c00561396b043f3))
|
||||
* **seed:** add category field to all mock student profiles ([f883fbf](https://github.com/antialias/soroban-abacus-flashcards/commit/f883fbfe233b7fb3d366062e7c156e3fc8e0e3a7))
|
||||
* **session-summary:** redesign ProblemToReview with BKT integration and animations ([430c46a](https://github.com/antialias/soroban-abacus-flashcards/commit/430c46adb929a6c0ce7c67da4b1df7d3e2846cfd))
|
||||
* **storybook:** add TeacherClassroomCard stories ([a5e5788](https://github.com/antialias/soroban-abacus-flashcards/commit/a5e5788fa96f57e0d918620e357f7920ef792b19))
|
||||
* **vision:** add AbacusVisionBridge for physical soroban detection ([47088e4](https://github.com/antialias/soroban-abacus-flashcards/commit/47088e4850c25e76fe49879587227b46f699ba91))
|
||||
* **vision:** add ArUco marker auto-calibration for abacus detection ([9e9a06f](https://github.com/antialias/soroban-abacus-flashcards/commit/9e9a06f2e4dc37d208ac19259be9b9830c7ad949))
|
||||
* **vision:** add remote phone camera support for abacus detection ([8e4975d](https://github.com/antialias/soroban-abacus-flashcards/commit/8e4975d395c4b10bc40ae2c71473fdb1a50c114c))
|
||||
* **vision:** add activeCameraSource tracking and simplify calibration UI ([1be6151](https://github.com/antialias/soroban-abacus-flashcards/commit/1be6151bae0f2ffc0781792bf002cb7672635842))
|
||||
* **vision:** add CV-based bead detection and fix remote camera connection ([005140a](https://github.com/antialias/soroban-abacus-flashcards/commit/005140a1e72238459ea987e57f83e169b213d7b9))
|
||||
* **vision:** add TensorFlow.js column classifier model and improve detection ([5d0ac65](https://github.com/antialias/soroban-abacus-flashcards/commit/5d0ac65bdd2bd22c8e2d586add3a0aba8dd82426))
|
||||
* **vision:** broadcast vision frames to observers (Phase 5) ([b3b769c](https://github.com/antialias/soroban-abacus-flashcards/commit/b3b769c0e2e15d4a0f4e70219982dc78c72e4e2b))
|
||||
* **vision:** disable auto-detection with feature flag ([a5025f0](https://github.com/antialias/soroban-abacus-flashcards/commit/a5025f01bc759de1b87c06a2a9d2d94344adc790))
|
||||
* **vision:** integrate vision feed into docked abacus ([d8c7645](https://github.com/antialias/soroban-abacus-flashcards/commit/d8c764595d34dabb4b836e2eea93e0b869f09cd2))
|
||||
|
||||
# [2.19.0](https://github.com/antialias/soroban-abacus-flashcards/compare/abacus-react-v2.18.0...abacus-react-v2.19.0) (2026-01-01)
|
||||
|
||||
### Features
|
||||
|
||||
- **vision:** add physical abacus column setting and fix remote flash toggle ([b206eb3](https://github.com/antialias/soroban-abacus-flashcards/commit/b206eb30712e4b98525a9fa2544c2b5a235a8b72))
|
||||
- **vision:** improve remote camera calibration and UX ([8846cec](https://github.com/antialias/soroban-abacus-flashcards/commit/8846cece93941a36c187abd4ecee9cc88de0c2ec))
|
||||
|
||||
# [2.18.0](https://github.com/antialias/soroban-abacus-flashcards/compare/abacus-react-v2.17.0...abacus-react-v2.18.0) (2026-01-01)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- allow teacher-parents to enroll their children in other classrooms ([52df7f4](https://github.com/antialias/soroban-abacus-flashcards/commit/52df7f469718128fd3d8933941ffb8d4bb8db208))
|
||||
- **bkt:** handle missing helpLevelUsed in legacy data causing NaN ([b300ed9](https://github.com/antialias/soroban-abacus-flashcards/commit/b300ed9f5cc3bfb0c7b28faafe81c80a59444998))
|
||||
- **camera:** handle race condition in camera initialization ([2a24700](https://github.com/antialias/soroban-abacus-flashcards/commit/2a24700e6cb6efe0ae35d9ebd6c428e3a1a1a736))
|
||||
- **classroom:** auto-transition tutorial→session observation + fix NaN display ([962a52d](https://github.com/antialias/soroban-abacus-flashcards/commit/962a52d7562f566e78f6272816b049bf77daa7c9))
|
||||
- **classroom:** broadcast digit-by-digit answer and correct phase indicator ([fb73e85](https://github.com/antialias/soroban-abacus-flashcards/commit/fb73e85f2daacefafa572e03c16b10fab619ea57))
|
||||
- **dashboard:** compute skill stats from session results in curriculum API ([11d4846](https://github.com/antialias/soroban-abacus-flashcards/commit/11d48465d710d0293ebf41f64b4fd0f1f03d8bf8))
|
||||
- **db:** add missing is_paused column to session_plans ([9d8b5e1](https://github.com/antialias/soroban-abacus-flashcards/commit/9d8b5e1148911f881d08d07608debaaef91609c2))
|
||||
- **db:** add missing journal entries for migrations 0041-0042 ([398603c](https://github.com/antialias/soroban-abacus-flashcards/commit/398603c75a094e28122c5ccdced5b82badc7fbfb))
|
||||
- **docker:** add canvas native deps for jsdom/vitest ([5f51bc1](https://github.com/antialias/soroban-abacus-flashcards/commit/5f51bc1871aec325feb32a0b29edabb3b6c5dd1f))
|
||||
- **docker:** override canvas with mock package for Alpine/musl ([8be1995](https://github.com/antialias/soroban-abacus-flashcards/commit/8be19958af624d22fa2c6cb48f5723f5efc820c3))
|
||||
- **docker:** skip canvas native build (optional jsdom dep) ([d717f44](https://github.com/antialias/soroban-abacus-flashcards/commit/d717f44fccb8ed2baa30499df65784a4b89c6ffc))
|
||||
- **observer:** seed results panel with full session history ([aab7469](https://github.com/antialias/soroban-abacus-flashcards/commit/aab7469d9ea87c91a0165e4c48a60ac130cdc1b2))
|
||||
- only show session stats when there are actual problems ([62aefad](https://github.com/antialias/soroban-abacus-flashcards/commit/62aefad6766ba32ad27e8ed3db621a6f77520cbe))
|
||||
- **practice:** allow teachers to create student profiles ([5fee129](https://github.com/antialias/soroban-abacus-flashcards/commit/5fee1297e1775b5e6133919d179e23b6e70b2518))
|
||||
- **practice:** always show add student FAB button ([a658414](https://github.com/antialias/soroban-abacus-flashcards/commit/a6584143ebf1f3e5b3c9f3283e690458a06beb60))
|
||||
- **practice:** real-time progress in observer modal + numeric answer comparison ([c0e63ff](https://github.com/antialias/soroban-abacus-flashcards/commit/c0e63ff68b26fd37eedd657504f7f79e5ce40a10))
|
||||
- **practice:** show active sessions for teacher's own children ([ece3197](https://github.com/antialias/soroban-abacus-flashcards/commit/ece319738b6ab1882469d79ea24b604316d28b34))
|
||||
- **practice:** use Next.js Link for student tiles + fix session observer z-index ([6def610](https://github.com/antialias/soroban-abacus-flashcards/commit/6def6108771b427e4885bebd23cecdad7a50efb0))
|
||||
- **seed:** accurate BKT simulation for developing classifications ([d5e4c85](https://github.com/antialias/soroban-abacus-flashcards/commit/d5e4c858db8866e5177b8fa2317aba42b30171e8))
|
||||
- **share:** use getShareUrl for correct production URLs ([98a69f1](https://github.com/antialias/soroban-abacus-flashcards/commit/98a69f1f80e465415edce49043e2c019a856f8e5))
|
||||
- **vision:** fix manual calibration overlay not showing on remote camera ([44dcb01](https://github.com/antialias/soroban-abacus-flashcards/commit/44dcb01473bac00c09dddbbefd77dd26b3a27817))
|
||||
- **vision:** fix remote camera calibration coordinate system ([e52f94e](https://github.com/antialias/soroban-abacus-flashcards/commit/e52f94e4b476658c41f23668d2941af1288e4ed8))
|
||||
- **vision:** swap corners diagonally for webcam orientation ([dd8efe3](https://github.com/antialias/soroban-abacus-flashcards/commit/dd8efe379d4bbcfc4b60f7c00ad6180465b7e7b6))
|
||||
|
||||
### Features
|
||||
|
||||
- API authorization audit + teacher enrollment UI + share codes ([d6e369f](https://github.com/antialias/soroban-abacus-flashcards/commit/d6e369f9dc9b963938ca8de4562c87f9f1b6d389))
|
||||
- **camera:** auto-start camera when opening camera modal ([f3bb0ae](https://github.com/antialias/soroban-abacus-flashcards/commit/f3bb0aee4fe23eeffc7b7099981f51ec54636a35))
|
||||
- **camera:** fullscreen modal with edge-to-edge preview ([db17c96](https://github.com/antialias/soroban-abacus-flashcards/commit/db17c96168078f2d0d723b24395096756a2f63ec))
|
||||
- **chart:** add grouped structure to chart hover tooltip ([594e22c](https://github.com/antialias/soroban-abacus-flashcards/commit/594e22c428e0a4ee4322c233f127f9250e88b5fa))
|
||||
- **chart:** improve skill classification visual hierarchy with colors and patterns ([c9518a6](https://github.com/antialias/soroban-abacus-flashcards/commit/c9518a6b9952bda60ab2663d7655092637139fec))
|
||||
- **classroom:** add active sessions API endpoint ([07f6bb7](https://github.com/antialias/soroban-abacus-flashcards/commit/07f6bb7f9cc2dfbe6da8d16361e89b698405e1c0))
|
||||
- **classroom:** add real-time enrollment/unenrollment reactivity ([a0693e9](https://github.com/antialias/soroban-abacus-flashcards/commit/a0693e90840f651094f852a6a6f523013786b322))
|
||||
- **classroom:** add session broadcast and active session indicators ([9636f7f](https://github.com/antialias/soroban-abacus-flashcards/commit/9636f7f44a71da022352c19e80f9ec147dd3af5f))
|
||||
- **classroom:** add unified add-student modal with two-column layout ([dca696a](https://github.com/antialias/soroban-abacus-flashcards/commit/dca696a29fc20a2697b491c0d2efbe036569a716))
|
||||
- **classroom:** add unified TeacherClassroomCard with auto-enrollment ([4d6adf3](https://github.com/antialias/soroban-abacus-flashcards/commit/4d6adf359ede5d17c2decd9275ba68635ee0bd4f))
|
||||
- **classroom:** complete reactivity fixes (Steps 7-11) ([2015494](https://github.com/antialias/soroban-abacus-flashcards/commit/2015494c0eca28457031aa39490d70a2af3da4df))
|
||||
- **classroom:** consolidate filter pill to single-row design ([78a63e3](https://github.com/antialias/soroban-abacus-flashcards/commit/78a63e35e39948729cbf41e6c5af4e688a506c8d))
|
||||
- **classroom:** implement enrollment system (Phase 4) ([1952a41](https://github.com/antialias/soroban-abacus-flashcards/commit/1952a412edcd04b332655199737c340a4389d174))
|
||||
- **classroom:** implement entry prompts system ([de39ab5](https://github.com/antialias/soroban-abacus-flashcards/commit/de39ab52cc60f5782fc291246f98013ae15142ca))
|
||||
- **classroom:** implement real-time enrollment updates ([bbe0500](https://github.com/antialias/soroban-abacus-flashcards/commit/bbe0500fe9000d0d016417c1b586e9569e3eb888))
|
||||
- **classroom:** implement real-time presence with WebSocket (Phase 6) ([629bfcf](https://github.com/antialias/soroban-abacus-flashcards/commit/629bfcfc03c611cd3928bb98a67bace485ee3a7b))
|
||||
- **classroom:** implement real-time session observation (Step 3) ([2feb684](https://github.com/antialias/soroban-abacus-flashcards/commit/2feb6844a4fce48ba7a87d2a77769783c4e8b2f9))
|
||||
- **classroom:** implement real-time skill tutorial observation ([4b73879](https://github.com/antialias/soroban-abacus-flashcards/commit/4b7387905d2b050327f9b67b834d4e9dfc0b19cb))
|
||||
- **classroom:** implement teacher classroom dashboard (Phase 3) ([2202716](https://github.com/antialias/soroban-abacus-flashcards/commit/2202716f563053624dbe5c6abb969a3b0d452fd1))
|
||||
- **classroom:** implement teacher-initiated pause and fix manual pause ([ccea0f8](https://github.com/antialias/soroban-abacus-flashcards/commit/ccea0f86ac213b32cac7363f28e193b1976bd553))
|
||||
- **classroom:** implement two-way abacus sync for session observation (Step 5) ([2f7002e](https://github.com/antialias/soroban-abacus-flashcards/commit/2f7002e5759db705e213eb9f8474589c8e6149e7))
|
||||
- **classroom:** improve enrollment reactivity and UX ([77336be](https://github.com/antialias/soroban-abacus-flashcards/commit/77336bea5b5bbf16b393da13588de6e5082e818f))
|
||||
- **classroom:** integrate create student form into unified add-student modal ([da92289](https://github.com/antialias/soroban-abacus-flashcards/commit/da92289ed1ae570ff48cc28818122d4640d6c84c))
|
||||
- **classroom:** integrate Enter Classroom into StudentActionMenu ([2f1b9df](https://github.com/antialias/soroban-abacus-flashcards/commit/2f1b9df9d9d605b0c120af6961670ae84718c8d7))
|
||||
- **dashboard:** add skill progress chart with trend analysis and timing awareness ([1fc8949](https://github.com/antialias/soroban-abacus-flashcards/commit/1fc8949b0664591aa1b0cfcd7c7abd2a4c586281))
|
||||
- enable parents to observe children's practice sessions ([7b82995](https://github.com/antialias/soroban-abacus-flashcards/commit/7b829956644d369dfdfb0789a33e0b857958e84f))
|
||||
- **family:** implement parent-to-parent family code sharing (Phase 2) ([0284227](https://github.com/antialias/soroban-abacus-flashcards/commit/02842270c9278174934407a9620777589f79ee1e))
|
||||
- improve session summary header and add practice type badges ([518fe15](https://github.com/antialias/soroban-abacus-flashcards/commit/518fe153c9fc2ae2f2f7fc0ed4de27ee1c5c5646))
|
||||
- **observer:** add live active session item to history list ([91d6d6a](https://github.com/antialias/soroban-abacus-flashcards/commit/91d6d6a1b6938b559d8488fe296d562695cf16d1))
|
||||
- **observer:** add live results panel and session progress indicator ([8527f89](https://github.com/antialias/soroban-abacus-flashcards/commit/8527f892e2b300d51d83056d779474592a2fd955))
|
||||
- **observer:** implement shareable session observation links ([3ac7b46](https://github.com/antialias/soroban-abacus-flashcards/commit/3ac7b460ec0dc207a5691fbed8d539b484374fe7))
|
||||
- **practice:** add auto-rotation for captured documents ([ff79a28](https://github.com/antialias/soroban-abacus-flashcards/commit/ff79a28c657fb0a19752990e23f9bb0ced4e9343))
|
||||
- **practice:** add document adjustment UI and auto-capture ([473b7db](https://github.com/antialias/soroban-abacus-flashcards/commit/473b7dbd7cd15be511351a1fd303a0fc32b9d941))
|
||||
- **practice:** add document scanning with multi-quad tracking ([5f4f1fd](https://github.com/antialias/soroban-abacus-flashcards/commit/5f4f1fde3372e5d65d3f399216b04ab0e4c9972e))
|
||||
- **practice:** add fixed filter bar, sticky headers, and shared EmojiPicker ([0e03561](https://github.com/antialias/soroban-abacus-flashcards/commit/0e0356113ddef1ec92cd0b3fda0852d99c6067d2))
|
||||
- **practice:** add intervention system and improve skill chart hierarchy ([bf5b99a](https://github.com/antialias/soroban-abacus-flashcards/commit/bf5b99afe967c0b17765a7e6f1911d03201eed95))
|
||||
- **practice:** add mini start practice banner to QuickLook modal ([d1176da](https://github.com/antialias/soroban-abacus-flashcards/commit/d1176da9aa8bd926ca96699d1091e65f4a34d782))
|
||||
- **practice:** add Needs Attention to unified compact layout ([8727782](https://github.com/antialias/soroban-abacus-flashcards/commit/8727782e45c7ac269c4dbcc223b2a8be57be8bb2))
|
||||
- **practice:** add photo attachments for practice sessions ([9b85311](https://github.com/antialias/soroban-abacus-flashcards/commit/9b853116ecfbb19bec39923da635374963cf002c))
|
||||
- **practice:** add photo editing with rotation persistence and auto-detect ([156a0df](https://github.com/antialias/soroban-abacus-flashcards/commit/156a0dfe967a48c211be527da27c92ef8b1ab20c))
|
||||
- **practice:** add smooth fullscreen transition from QuickLook to dashboard ([cb8b0df](https://github.com/antialias/soroban-abacus-flashcards/commit/cb8b0dff676d48bcba4775c5981ac357d573ab27))
|
||||
- **practice:** add student organization with filtering and archiving ([538718a](https://github.com/antialias/soroban-abacus-flashcards/commit/538718a814402bd9c83b3c354c5a3386ff69104d))
|
||||
- **practice:** add StudentActionMenu to dashboard + fix z-index layering ([bf262e7](https://github.com/antialias/soroban-abacus-flashcards/commit/bf262e7d5305e2358d3a2464db10bc3b0866104c))
|
||||
- **practice:** compact single-student categories and UI improvements ([0e7f326](https://github.com/antialias/soroban-abacus-flashcards/commit/0e7f3265fe2de3b693c47a8a556d3e7cbc726ef4))
|
||||
- **practice:** implement measurement-based compact layout ([1656b93](https://github.com/antialias/soroban-abacus-flashcards/commit/1656b9324f6fb24a318820e04559c480c99762f5))
|
||||
- **practice:** implement retry wrong problems system ([474c4da](https://github.com/antialias/soroban-abacus-flashcards/commit/474c4da05a8d761e63a32187f5c301b57fb6aae4))
|
||||
- **practice:** parent session observation + relationship UI + error boundaries ([07484fd](https://github.com/antialias/soroban-abacus-flashcards/commit/07484fdfac3c6613a6a7709bdee25e1f8e047227))
|
||||
- **practice:** polish unified student list with keyboard nav and mobile UX ([0ba1551](https://github.com/antialias/soroban-abacus-flashcards/commit/0ba1551feaa30d8f41ec5d771c00561396b043f3))
|
||||
- **seed:** add category field to all mock student profiles ([f883fbf](https://github.com/antialias/soroban-abacus-flashcards/commit/f883fbfe233b7fb3d366062e7c156e3fc8e0e3a7))
|
||||
- **session-summary:** redesign ProblemToReview with BKT integration and animations ([430c46a](https://github.com/antialias/soroban-abacus-flashcards/commit/430c46adb929a6c0ce7c67da4b1df7d3e2846cfd))
|
||||
- **storybook:** add TeacherClassroomCard stories ([a5e5788](https://github.com/antialias/soroban-abacus-flashcards/commit/a5e5788fa96f57e0d918620e357f7920ef792b19))
|
||||
- **vision:** add AbacusVisionBridge for physical soroban detection ([47088e4](https://github.com/antialias/soroban-abacus-flashcards/commit/47088e4850c25e76fe49879587227b46f699ba91))
|
||||
- **vision:** add ArUco marker auto-calibration for abacus detection ([9e9a06f](https://github.com/antialias/soroban-abacus-flashcards/commit/9e9a06f2e4dc37d208ac19259be9b9830c7ad949))
|
||||
- **vision:** add remote phone camera support for abacus detection ([8e4975d](https://github.com/antialias/soroban-abacus-flashcards/commit/8e4975d395c4b10bc40ae2c71473fdb1a50c114c))
|
||||
|
||||
### Performance Improvements
|
||||
|
||||
* reduce practice page dev bundle from 47MB to 115KB ([fd1df93](https://github.com/antialias/soroban-abacus-flashcards/commit/fd1df93a8fa320800275c135d5dd89390eb72c19))
|
||||
- reduce practice page dev bundle from 47MB to 115KB ([fd1df93](https://github.com/antialias/soroban-abacus-flashcards/commit/fd1df93a8fa320800275c135d5dd89390eb72c19))
|
||||
|
||||
# [2.17.0](https://github.com/antialias/soroban-abacus-flashcards/compare/abacus-react-v2.16.0...abacus-react-v2.17.0) (2025-12-20)
|
||||
|
||||
|
||||
22745
pnpm-lock.yaml
generated
22745
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user