diff --git a/App/func_loader/fl.c b/App/func_loader/fl.c index ffbe945..e344b79 100644 --- a/App/func_loader/fl.c +++ b/App/func_loader/fl.c @@ -213,17 +213,55 @@ static bool fl_check_addr_range(uintptr_t addr, size_t len) { return true; } -static void cmd_ping(fl_context_t* ctx) { +/* =========================== + COMMAND ARGS & HANDLER TYPE + =========================== */ + +/** + * @brief Parsed command arguments (shared by all handlers) + */ +typedef struct { + const char* cmd; + const char* data; + uintptr_t addr; + uintptr_t orig; + uintptr_t target; + int crc; /* -1 = no CRC provided */ + int len; + int size; + int comp; + int all; + int enable; /* -1 = not specified, 0 = disable, 1 = enable */ + int force; + const char* path; + const char* newpath; + const char* mode; +} cmd_args_t; + +/** + * @brief Command handler function pointer + * @return 0 on success, -1 on argument validation error + */ +typedef int (*cmd_handler_t)(fl_context_t* ctx, const cmd_args_t* args); + +/* =========================== + COMMAND IMPLEMENTATIONS + =========================== */ + +static int cmd_ping(fl_context_t* ctx, const cmd_args_t* args) { (void)ctx; + (void)args; fl_response(true, "PONG"); + return 0; } -static void cmd_echo(fl_context_t* ctx, const char* data_str) { +static int cmd_echo(fl_context_t* ctx, const cmd_args_t* args) { (void)ctx; /* Echo command for serial throughput testing. * Echoes back the data length and CRC for verification. * The data is hex-encoded, so actual byte count is strlen/2. */ + const char* data_str = args->data; size_t len = data_str ? strlen(data_str) / 2 : 0; uint16_t crc = 0; @@ -233,9 +271,44 @@ static void cmd_echo(fl_context_t* ctx, const char* data_str) { } fl_response(true, "ECHO %u Bytes, CRC 0x%04X", (unsigned)len, crc); + return 0; } -static void cmd_info(fl_context_t* ctx) { +static int cmd_echoback(fl_context_t* ctx, const cmd_args_t* args) { + /* Echoback command for download direction throughput testing. + * Fills the send buffer with a deterministic pattern (i % 256), + * base64-encodes it, and sends it back with CRC. + * PC sends: fl -c echoback --len N + */ + int len = args->len; + if (len <= 0 || (size_t)len > FL_BUF_SIZE) { + fl_response(false, "Invalid length %d (max %d)", len, (int)FL_BUF_SIZE); + return 0; + } + + /* Fill buffer with deterministic pattern */ + for (int i = 0; i < len; i++) { + ctx->buf[i] = (uint8_t)(i % 256); + } + + /* Base64 encode */ + if (bytes_to_base64(ctx->buf, len, ctx->b64_buf, FL_B64_BUF_SIZE) < 0) { + fl_response(false, "Base64 encode failed"); + return 0; + } + + /* CRC over raw pattern bytes */ + uint16_t crc = calc_crc16(ctx->buf, len); + + /* Output in parts to avoid buffer overflow */ + fl_print("[FLOK] ECHOBACK %d bytes crc=0x%04X data=", len, (unsigned)crc); + fl_print_raw(ctx->b64_buf); + fl_print_raw("\n[FLEND]\n"); + return 0; +} + +static int cmd_info(fl_context_t* ctx, const cmd_args_t* args) { + (void)args; const fpb_state_t* fpb = fpb_get_state(); fpb_info_t fpb_info; uint32_t num_comps = fpb->num_code_comp; @@ -292,12 +365,18 @@ static void cmd_info(fl_context_t* ctx) { } fl_response(true, "Info complete"); + return 0; } -static void cmd_alloc(fl_context_t* ctx, size_t size) { +static int cmd_alloc(fl_context_t* ctx, const cmd_args_t* args) { + if (args->size == 0) { + fl_response(false, "Missing --size"); + return -1; + } + if (!ctx->malloc_cb) { fl_response(false, "No malloc_cb"); - return; + return 0; } /* Free previous allocation if any */ @@ -307,52 +386,59 @@ static void cmd_alloc(fl_context_t* ctx, size_t size) { ctx->last_alloc_size = 0; } - void* p = ctx->malloc_cb(size); + void* p = ctx->malloc_cb(args->size); if (!p) { fl_response(false, "Alloc failed"); - return; + return 0; } ctx->last_alloc = (uintptr_t)p; - ctx->last_alloc_size = size; - fl_response(true, "Allocated %u at 0x%08lX", (unsigned)size, (unsigned long)p); + ctx->last_alloc_size = args->size; + fl_response(true, "Allocated %u at 0x%08lX", (unsigned)args->size, (unsigned long)p); + return 0; } -static void cmd_upload(fl_context_t* ctx, uintptr_t offset, const char* data_str, uintptr_t crc, bool verify) { +static int cmd_upload(fl_context_t* ctx, const cmd_args_t* args) { + if (!args->data) { + fl_response(false, "Missing --data"); + return -1; + } + uint8_t* buf = ctx->buf; + bool verify = args->crc >= 0; - int n = base64_to_bytes(data_str, buf, FL_BUF_SIZE); + int n = base64_to_bytes(args->data, buf, FL_BUF_SIZE); if (n < 0) { fl_response(false, "Invalid base64 data"); - return; + return 0; } if (verify) { /* CRC covers: offset(4B) + len(4B) + data payload */ - uint32_t offset32 = (uint32_t)offset; + uint32_t offset32 = (uint32_t)args->addr; uint32_t len32 = (uint32_t)n; uint16_t calc = 0xFFFF; calc = calc_crc16_base(calc, &offset32, sizeof(offset32)); calc = calc_crc16_base(calc, &len32, sizeof(len32)); calc = calc_crc16_base(calc, buf, n); - if (calc != (uint16_t)crc) { + if (calc != (uint16_t)args->crc) { /* CRC mismatch - free last_alloc in dynamic mode to prevent leak */ if (ctx->last_alloc != 0 && ctx->free_cb) { ctx->free_cb((void*)ctx->last_alloc); ctx->last_alloc = 0; ctx->last_alloc_size = 0; } - fl_response(false, "CRC mismatch: 0x%04X != 0x%04X", (unsigned)crc, (unsigned)calc); - return; + fl_response(false, "CRC mismatch: 0x%04X != 0x%04X", (unsigned)args->crc, (unsigned)calc); + return 0; } } /* Upload to last_alloc */ if (ctx->last_alloc == 0) { fl_response(false, "No allocation, call alloc first"); - return; + return 0; } - uint8_t* dest = (uint8_t*)(ctx->last_alloc + offset); + uint8_t* dest = (uint8_t*)(ctx->last_alloc + args->addr); memcpy(dest, buf, n); @@ -360,47 +446,50 @@ static void cmd_upload(fl_context_t* ctx, uintptr_t offset, const char* data_str fl_flush_dcache(ctx, dest, n); fl_response(true, "Uploaded %d bytes to 0x%lX", n, (unsigned long)dest); + return 0; } -static void cmd_read(fl_context_t* ctx, uintptr_t addr, int len, int crc, bool force) { +static int cmd_read(fl_context_t* ctx, const cmd_args_t* args) { uint8_t* buf = ctx->buf; char* b64_buf = ctx->b64_buf; + int len = args->len; if (len <= 0 || (size_t)len > FL_BUF_SIZE) { fl_response(false, "Invalid length %d (max %d)", len, (int)FL_BUF_SIZE); - return; + return 0; } /* Verify request CRC if provided: covers addr(4B) + len(4B) */ - if (crc >= 0) { - uint32_t addr32 = (uint32_t)addr; + if (args->crc >= 0) { + uint32_t addr32 = (uint32_t)args->addr; uint32_t len32 = (uint32_t)len; uint16_t calc = 0xFFFF; calc = calc_crc16_base(calc, &addr32, sizeof(addr32)); calc = calc_crc16_base(calc, &len32, sizeof(len32)); - if (calc != (uint16_t)crc) { - fl_response(false, "Request CRC mismatch: 0x%04X != 0x%04X", (unsigned)crc, (unsigned)calc); - return; + if (calc != (uint16_t)args->crc) { + fl_response(false, "Request CRC mismatch: 0x%04X != 0x%04X", (unsigned)args->crc, (unsigned)calc); + return 0; } } - if (!force && !fl_check_addr_range(addr, len)) { - fl_response(false, "Invalid address range 0x%08lX+%d (use --force to override)", (unsigned long)addr, len); - return; + if (!args->force && !fl_check_addr_range(args->addr, len)) { + fl_response(false, "Invalid address range 0x%08lX+%d (use --force to override)", (unsigned long)args->addr, + len); + return 0; } /* Read memory at the given address */ - const uint8_t* src = (const uint8_t*)addr; + const uint8_t* src = (const uint8_t*)args->addr; memcpy(buf, src, len); /* Base64 encode */ if (bytes_to_base64(buf, len, b64_buf, FL_B64_BUF_SIZE) < 0) { fl_response(false, "Base64 encode failed"); - return; + return 0; } /* CRC-16 covers: addr(4B) + len(4B) + data payload */ - uint32_t resp_addr32 = (uint32_t)addr; + uint32_t resp_addr32 = (uint32_t)args->addr; uint32_t resp_len32 = (uint32_t)len; uint16_t resp_crc = 0xFFFF; resp_crc = calc_crc16_base(resp_crc, &resp_addr32, sizeof(resp_addr32)); @@ -411,44 +500,52 @@ static void cmd_read(fl_context_t* ctx, uintptr_t addr, int len, int crc, bool f fl_print("[FLOK] READ %d bytes crc=0x%04X data=", len, (unsigned)resp_crc); fl_print_raw(b64_buf); fl_print_raw("\n[FLEND]\n"); + return 0; } -static void cmd_write(fl_context_t* ctx, uintptr_t addr, const char* data_str, uintptr_t crc, bool verify, bool force) { +static int cmd_write(fl_context_t* ctx, const cmd_args_t* args) { + if (!args->data) { + fl_response(false, "Missing --data"); + return -1; + } + uint8_t* buf = ctx->buf; + bool verify = args->crc >= 0; - int n = base64_to_bytes(data_str, buf, FL_BUF_SIZE); + int n = base64_to_bytes(args->data, buf, FL_BUF_SIZE); if (n < 0) { fl_response(false, "Invalid base64 data"); - return; + return 0; } - if (!force && !fl_check_addr_range(addr, n)) { - fl_response(false, "Invalid address range 0x%08lX+%d (use --force to override)", (unsigned long)addr, n); - return; + if (!args->force && !fl_check_addr_range(args->addr, n)) { + fl_response(false, "Invalid address range 0x%08lX+%d (use --force to override)", (unsigned long)args->addr, n); + return 0; } if (verify) { /* CRC covers: addr(4B) + len(4B) + data payload */ - uint32_t addr32 = (uint32_t)addr; + uint32_t addr32 = (uint32_t)args->addr; uint32_t len32 = (uint32_t)n; uint16_t calc = 0xFFFF; calc = calc_crc16_base(calc, &addr32, sizeof(addr32)); calc = calc_crc16_base(calc, &len32, sizeof(len32)); calc = calc_crc16_base(calc, buf, n); - if (calc != (uint16_t)crc) { - fl_response(false, "CRC mismatch: 0x%04X != 0x%04X", (unsigned)crc, (unsigned)calc); - return; + if (calc != (uint16_t)args->crc) { + fl_response(false, "CRC mismatch: 0x%04X != 0x%04X", (unsigned)args->crc, (unsigned)calc); + return 0; } } /* Write to the specified address */ - uint8_t* dest = (uint8_t*)addr; + uint8_t* dest = (uint8_t*)args->addr; memcpy(dest, buf, n); /* Flush data cache */ fl_flush_dcache(ctx, dest, n); - fl_response(true, "WRITE %d bytes to 0x%lX", n, (unsigned long)addr); + fl_response(true, "WRITE %d bytes to 0x%lX", n, (unsigned long)args->addr); + return 0; } /** @@ -472,132 +569,145 @@ static bool verify_patch_crc(int crc, uint32_t comp, uintptr_t orig, uintptr_t t return true; } -static void cmd_patch(fl_context_t* ctx, uint32_t comp, uintptr_t orig, uintptr_t target, int crc) { - if (!verify_patch_crc(crc, comp, orig, target)) - return; +static int cmd_patch(fl_context_t* ctx, const cmd_args_t* args) { + if (args->orig == 0 || args->target == 0) { + fl_response(false, "Missing --orig/--target"); + return -1; + } - if (comp >= fpb_get_state()->num_code_comp || comp >= FL_MAX_SLOTS) { - fl_response(false, "Invalid comp %lu", (unsigned long)comp); - return; + if (!verify_patch_crc(args->crc, args->comp, args->orig, args->target)) + return 0; + + if ((uint32_t)args->comp >= fpb_get_state()->num_code_comp || (uint32_t)args->comp >= FL_MAX_SLOTS) { + fl_response(false, "Invalid comp %lu", (unsigned long)args->comp); + return 0; } - fpb_result_t ret = fpb_set_patch(comp, orig, target); + fpb_result_t ret = fpb_set_patch(args->comp, args->orig, args->target); if (ret != FPB_OK) { fl_response(false, "fpb_set_patch failed: %d", ret); - return; + return 0; } /* Record slot state, transfer last_alloc ownership to slot */ - ctx->slots[comp].active = true; - ctx->slots[comp].orig_addr = orig; - ctx->slots[comp].target_addr = target; - ctx->slots[comp].code_size = ctx->last_alloc_size; - ctx->slots[comp].alloc_addr = ctx->last_alloc; + ctx->slots[args->comp].active = true; + ctx->slots[args->comp].orig_addr = args->orig; + ctx->slots[args->comp].target_addr = args->target; + ctx->slots[args->comp].code_size = ctx->last_alloc_size; + ctx->slots[args->comp].alloc_addr = ctx->last_alloc; ctx->last_alloc = 0; /* Ownership transferred */ ctx->last_alloc_size = 0; - fl_response(true, "Patch %lu: 0x%08lX -> 0x%08lX", (unsigned long)comp, (unsigned long)orig, (unsigned long)target); + fl_response(true, "Patch %lu: 0x%08lX -> 0x%08lX", (unsigned long)args->comp, (unsigned long)args->orig, + (unsigned long)args->target); + return 0; } -static void cmd_tpatch(fl_context_t* ctx, uint32_t comp, uintptr_t orig, uintptr_t target, int crc) { +static int cmd_tpatch(fl_context_t* ctx, const cmd_args_t* args) { + if (args->orig == 0 || args->target == 0) { + fl_response(false, "Missing --orig/--target"); + return -1; + } + #ifndef FPB_NO_TRAMPOLINE - if (!verify_patch_crc(crc, comp, orig, target)) - return; + if (!verify_patch_crc(args->crc, args->comp, args->orig, args->target)) + return 0; - if (comp >= FPB_TRAMPOLINE_COUNT || comp >= FL_MAX_SLOTS) { - fl_response(false, "Invalid comp %lu (max %d)", (unsigned long)comp, FPB_TRAMPOLINE_COUNT - 1); - return; + if ((uint32_t)args->comp >= FPB_TRAMPOLINE_COUNT || (uint32_t)args->comp >= FL_MAX_SLOTS) { + fl_response(false, "Invalid comp %lu (max %d)", (unsigned long)args->comp, FPB_TRAMPOLINE_COUNT - 1); + return 0; } /* Set trampoline target in RAM */ - fpb_trampoline_set_target(comp, target); + fpb_trampoline_set_target(args->comp, args->target); /* Get trampoline address in Flash */ - uint32_t tramp_addr = fpb_trampoline_get_address(comp); + uint32_t tramp_addr = fpb_trampoline_get_address(args->comp); /* Use FPB to redirect original function to trampoline */ - fpb_result_t ret = fpb_set_patch(comp, orig, tramp_addr); + fpb_result_t ret = fpb_set_patch(args->comp, args->orig, tramp_addr); if (ret != FPB_OK) { - fpb_trampoline_clear_target(comp); + fpb_trampoline_clear_target(args->comp); fl_response(false, "fpb_set_patch failed: %d", ret); - return; + return 0; } /* Record slot state, transfer last_alloc ownership to slot */ - ctx->slots[comp].active = true; - ctx->slots[comp].orig_addr = orig; - ctx->slots[comp].target_addr = target; - ctx->slots[comp].code_size = ctx->last_alloc_size; - ctx->slots[comp].alloc_addr = ctx->last_alloc; + ctx->slots[args->comp].active = true; + ctx->slots[args->comp].orig_addr = args->orig; + ctx->slots[args->comp].target_addr = args->target; + ctx->slots[args->comp].code_size = ctx->last_alloc_size; + ctx->slots[args->comp].alloc_addr = ctx->last_alloc; ctx->last_alloc = 0; /* Ownership transferred */ ctx->last_alloc_size = 0; - fl_response(true, "Trampoline %lu: 0x%08lX -> tramp(0x%08lX) -> 0x%08lX", (unsigned long)comp, (unsigned long)orig, - (unsigned long)tramp_addr, (unsigned long)target); + fl_response(true, "Trampoline %lu: 0x%08lX -> tramp(0x%08lX) -> 0x%08lX", (unsigned long)args->comp, + (unsigned long)args->orig, (unsigned long)tramp_addr, (unsigned long)args->target); #else (void)ctx; - (void)comp; - (void)orig; - (void)target; - (void)crc; fl_response(false, "Trampoline disabled (FPB_NO_TRAMPOLINE)"); #endif + return 0; } -static void cmd_dpatch(fl_context_t* ctx, uint32_t comp, uintptr_t orig, uintptr_t target, int crc) { +static int cmd_dpatch(fl_context_t* ctx, const cmd_args_t* args) { + if (args->orig == 0 || args->target == 0) { + fl_response(false, "Missing --orig/--target"); + return -1; + } + #ifndef FPB_NO_DEBUGMON - if (!verify_patch_crc(crc, comp, orig, target)) - return; + if (!verify_patch_crc(args->crc, args->comp, args->orig, args->target)) + return 0; - if (comp >= FPB_DEBUGMON_MAX_REDIRECTS || comp >= FL_MAX_SLOTS) { - fl_response(false, "Invalid comp %lu (max %d)", (unsigned long)comp, FPB_DEBUGMON_MAX_REDIRECTS - 1); - return; + if ((uint32_t)args->comp >= FPB_DEBUGMON_MAX_REDIRECTS || (uint32_t)args->comp >= FL_MAX_SLOTS) { + fl_response(false, "Invalid comp %lu (max %d)", (unsigned long)args->comp, FPB_DEBUGMON_MAX_REDIRECTS - 1); + return 0; } /* Initialize DebugMonitor if not already done */ if (!fpb_debugmon_is_active()) { if (fpb_debugmon_init() != 0) { fl_response(false, "DebugMonitor init failed"); - return; + return 0; } } /* Set redirect via DebugMonitor */ - int ret = fpb_debugmon_set_redirect(comp, orig, target); + int ret = fpb_debugmon_set_redirect(args->comp, args->orig, args->target); if (ret != 0) { fl_response(false, "fpb_debugmon_set_redirect failed: %d", ret); - return; + return 0; } /* Record slot state, transfer last_alloc ownership to slot */ - ctx->slots[comp].active = true; - ctx->slots[comp].orig_addr = orig; - ctx->slots[comp].target_addr = target; - ctx->slots[comp].code_size = ctx->last_alloc_size; - ctx->slots[comp].alloc_addr = ctx->last_alloc; + ctx->slots[args->comp].active = true; + ctx->slots[args->comp].orig_addr = args->orig; + ctx->slots[args->comp].target_addr = args->target; + ctx->slots[args->comp].code_size = ctx->last_alloc_size; + ctx->slots[args->comp].alloc_addr = ctx->last_alloc; ctx->last_alloc = 0; /* Ownership transferred */ ctx->last_alloc_size = 0; - fl_response(true, "DebugMon %lu: 0x%08lX -> 0x%08lX", (unsigned long)comp, (unsigned long)orig, - (unsigned long)target); + fl_response(true, "DebugMon %lu: 0x%08lX -> 0x%08lX", (unsigned long)args->comp, (unsigned long)args->orig, + (unsigned long)args->target); #else (void)ctx; - (void)comp; - (void)orig; - (void)target; - (void)crc; fl_response(false, "DebugMonitor disabled (FPB_NO_DEBUGMON)"); #endif + return 0; } -static void cmd_unpatch(fl_context_t* ctx, uint32_t comp, bool all) { +static int cmd_unpatch(fl_context_t* ctx, const cmd_args_t* args) { + uint32_t comp = (uint32_t)args->comp; + bool all = args->all; uint32_t num_comps = fpb_get_state()->num_code_comp; uint32_t start = all ? 0 : comp; uint32_t end = all ? num_comps : comp + 1; if (!all && (comp >= num_comps || comp >= FL_MAX_SLOTS)) { fl_response(false, "Invalid comp %lu", (unsigned long)comp); - return; + return 0; } uint32_t cleared = 0; @@ -631,87 +741,106 @@ static void cmd_unpatch(fl_context_t* ctx, uint32_t comp, bool all) { } else { fl_response(true, "Cleared slot %lu", (unsigned long)comp); } + return 0; } -static void cmd_enable(fl_context_t* ctx, uint32_t comp, bool enable, bool all) { +static int cmd_enable(fl_context_t* ctx, const cmd_args_t* args) { + if (args->enable < 0) { + fl_response(false, "Missing --enable (0 or 1)"); + return -1; + } + (void)ctx; + bool en = args->enable != 0; + bool all = args->all; + uint32_t comp = (uint32_t)args->comp; uint32_t num_comps = fpb_get_state()->num_code_comp; uint32_t start = all ? 0 : comp; uint32_t end = all ? num_comps : comp + 1; if (!all && (comp >= num_comps || comp >= FL_MAX_SLOTS)) { fl_response(false, "Invalid comp %lu", (unsigned long)comp); - return; + return 0; } uint32_t changed = 0; for (uint32_t i = start; i < end && i < FL_MAX_SLOTS; i++) { - fpb_result_t ret = fpb_enable_patch(i, enable); + fpb_result_t ret = fpb_enable_patch(i, en); if (ret == FPB_OK) { changed++; } } if (all) { - fl_response(true, "%s %u patches", enable ? "Enabled" : "Disabled", (unsigned)changed); + fl_response(true, "%s %u patches", en ? "Enabled" : "Disabled", (unsigned)changed); } else { - fl_response(true, "%s patch %lu", enable ? "Enabled" : "Disabled", (unsigned long)comp); + fl_response(true, "%s patch %lu", en ? "Enabled" : "Disabled", (unsigned long)comp); } + return 0; } __attribute__((noinline)) void fl_hello(void) { fl_response(true, "HELLO from original fl_hello(%p) function!", (void*)fl_hello); } +static int cmd_hello(fl_context_t* ctx, const cmd_args_t* args) { + (void)ctx; + (void)args; + fl_hello(); + return 0; +} + /* =========================== FILE TRANSFER COMMANDS =========================== */ #if FL_USE_FILE -static void cmd_fopen(fl_context_t* ctx, const char* path, const char* mode) { +static int cmd_fopen(fl_context_t* ctx, const cmd_args_t* args) { + const char* mode = args->mode ? args->mode : "r"; if (!ctx->file_ctx.fs) { fl_response(false, "File context not initialized"); - return; + return 0; } - if (!path || !mode) { + if (!args->path || !mode) { fl_response(false, "Missing path or mode"); - return; + return 0; } - if (fl_file_open(&ctx->file_ctx, path, mode) != 0) { - fl_response(false, "Failed to open: %s", path); - return; + if (fl_file_open(&ctx->file_ctx, args->path, mode) != 0) { + fl_response(false, "Failed to open: %s", args->path); + return 0; } - fl_response(true, "FOPEN %s mode=%s", path, mode); + fl_response(true, "FOPEN %s mode=%s", args->path, mode); + return 0; } -static void cmd_fwrite(fl_context_t* ctx, const char* data_str, int crc) { +static int cmd_fwrite(fl_context_t* ctx, const cmd_args_t* args) { if (!ctx->file_ctx.fp) { fl_response(false, "No file open"); - return; + return 0; } - if (!data_str) { + if (!args->data) { fl_response(false, "Missing data"); - return; + return 0; } /* Decode base64 data */ - int n = base64_to_bytes(data_str, ctx->buf, FL_BUF_SIZE); + int n = base64_to_bytes(args->data, ctx->buf, FL_BUF_SIZE); if (n < 0) { fl_response(false, "Invalid base64 data"); - return; + return 0; } /* Verify CRC if provided */ - if (crc >= 0) { + if (args->crc >= 0) { uint16_t calc = calc_crc16(ctx->buf, n); - if (calc != (uint16_t)crc) { - fl_response(false, "CRC mismatch: 0x%04X != 0x%04X", (unsigned)crc, (unsigned)calc); - return; + if (calc != (uint16_t)args->crc) { + fl_response(false, "CRC mismatch: 0x%04X != 0x%04X", (unsigned)args->crc, (unsigned)calc); + return 0; } } @@ -719,16 +848,18 @@ static void cmd_fwrite(fl_context_t* ctx, const char* data_str, int crc) { ssize_t written = fl_file_write(&ctx->file_ctx, ctx->buf, n); if (written < 0) { fl_response(false, "Write failed"); - return; + return 0; } fl_response(true, "FWRITE %d bytes", (int)written); + return 0; } -static void cmd_fread(fl_context_t* ctx, int len) { +static int cmd_fread(fl_context_t* ctx, const cmd_args_t* args) { + int len = args->len; if (!ctx->file_ctx.fp) { fl_response(false, "No file open"); - return; + return 0; } if (len <= 0 || len > (int)FL_BUF_SIZE) { @@ -738,18 +869,18 @@ static void cmd_fread(fl_context_t* ctx, int len) { ssize_t nread = fl_file_read(&ctx->file_ctx, ctx->buf, len); if (nread < 0) { fl_response(false, "Read failed"); - return; + return 0; } if (nread == 0) { fl_response(true, "FREAD 0 bytes EOF"); - return; + return 0; } /* Encode to base64 */ if (bytes_to_base64(ctx->buf, nread, ctx->b64_buf, FL_B64_BUF_SIZE) < 0) { fl_response(false, "Base64 encode failed"); - return; + return 0; } /* Calculate CRC */ @@ -759,39 +890,43 @@ static void cmd_fread(fl_context_t* ctx, int len) { fl_print("[FLOK] FREAD %d bytes crc=0x%04X data=", (int)nread, (unsigned)crc); fl_print_raw(ctx->b64_buf); fl_print_raw("\n[FLEND]\n"); + return 0; } -static void cmd_fclose(fl_context_t* ctx) { +static int cmd_fclose(fl_context_t* ctx, const cmd_args_t* args) { + (void)args; if (!ctx->file_ctx.fp) { fl_response(false, "No file open"); - return; + return 0; } if (fl_file_close(&ctx->file_ctx) != 0) { fl_response(false, "Close failed"); - return; + return 0; } fl_response(true, "FCLOSE"); + return 0; } -static void cmd_fcrc(fl_context_t* ctx, off_t size) { +static int cmd_fcrc(fl_context_t* ctx, const cmd_args_t* args) { + off_t size = (off_t)args->len; if (!ctx->file_ctx.fp) { fl_response(false, "No file open"); - return; + return 0; } /* Save current position */ off_t saved_pos = fl_file_seek(&ctx->file_ctx, 0, FL_SEEK_CUR); if (saved_pos < 0) { fl_response(false, "Failed to get current position"); - return; + return 0; } /* Seek to beginning */ if (fl_file_seek(&ctx->file_ctx, 0, FL_SEEK_SET) < 0) { fl_response(false, "Failed to seek to beginning"); - return; + return 0; } /* Calculate CRC of entire file (or specified size) */ @@ -808,7 +943,7 @@ static void cmd_fcrc(fl_context_t* ctx, off_t size) { ssize_t nread = fl_file_read(&ctx->file_ctx, ctx->buf, to_read); if (nread < 0) { fl_response(false, "Read failed during CRC calculation"); - return; + return 0; } if (nread == 0) { break; /* EOF */ @@ -824,42 +959,45 @@ static void cmd_fcrc(fl_context_t* ctx, off_t size) { fl_file_seek(&ctx->file_ctx, saved_pos, FL_SEEK_SET); fl_response(true, "FCRC size=%ld crc=0x%04X", (long)total_read, (unsigned)crc); + return 0; } -static void cmd_fseek(fl_context_t* ctx, off_t offset) { +static int cmd_fseek(fl_context_t* ctx, const cmd_args_t* args) { if (!ctx->file_ctx.fp) { fl_response(false, "No file open"); - return; + return 0; } - off_t new_pos = fl_file_seek(&ctx->file_ctx, offset, FL_SEEK_SET); + off_t new_pos = fl_file_seek(&ctx->file_ctx, (off_t)args->addr, FL_SEEK_SET); if (new_pos < 0) { fl_response(false, "Seek failed"); - return; + return 0; } fl_response(true, "FSEEK %ld", (long)new_pos); + return 0; } -static void cmd_fstat(fl_context_t* ctx, const char* path) { +static int cmd_fstat(fl_context_t* ctx, const cmd_args_t* args) { if (!ctx->file_ctx.fs) { fl_response(false, "File context not initialized"); - return; + return 0; } - if (!path) { + if (!args->path) { fl_response(false, "Missing path"); - return; + return 0; } fl_file_stat_t st; - if (fl_file_stat(&ctx->file_ctx, path, &st) != 0) { - fl_response(false, "Stat failed: %s", path); - return; + if (fl_file_stat(&ctx->file_ctx, args->path, &st) != 0) { + fl_response(false, "Stat failed: %s", args->path); + return 0; } const char* type_str = (st.type == FL_FILE_TYPE_DIR) ? "dir" : "file"; - fl_response(true, "FSTAT %s size=%u mtime=%u type=%s", path, (unsigned)st.size, (unsigned)st.mtime, type_str); + fl_response(true, "FSTAT %s size=%u mtime=%u type=%s", args->path, (unsigned)st.size, (unsigned)st.mtime, type_str); + return 0; } /* Callback context for flist count pass */ @@ -882,230 +1020,196 @@ static int flist_print_cb(const fl_dirent_t* entry, void* user_data) { return 0; } -static void cmd_flist(fl_context_t* ctx, const char* path) { +static int cmd_flist(fl_context_t* ctx, const cmd_args_t* args) { if (!ctx->file_ctx.fs) { fl_response(false, "File context not initialized"); - return; + return 0; } - if (!path) { + if (!args->path) { fl_response(false, "Missing path"); - return; + return 0; } /* First pass: count dirs and files */ flist_count_ctx_t count_ctx = {0, 0}; - int total = fl_file_list_cb(&ctx->file_ctx, path, flist_print_cb, &count_ctx); + int total = fl_file_list_cb(&ctx->file_ctx, args->path, flist_print_cb, &count_ctx); if (total < 0) { - fl_response(false, "List failed: %s", path); - return; + fl_response(false, "List failed: %s", args->path); + return 0; } fl_response(true, "FLIST dir=%d file=%d", count_ctx.dir_count, count_ctx.file_count); + return 0; } -static void cmd_fremove(fl_context_t* ctx, const char* path) { +static int cmd_fremove(fl_context_t* ctx, const cmd_args_t* args) { if (!ctx->file_ctx.fs) { fl_response(false, "File context not initialized"); - return; + return 0; } - if (!path) { + if (!args->path) { fl_response(false, "Missing path"); - return; + return 0; } - if (fl_file_remove(&ctx->file_ctx, path) != 0) { - fl_response(false, "Remove failed: %s", path); - return; + if (fl_file_remove(&ctx->file_ctx, args->path) != 0) { + fl_response(false, "Remove failed: %s", args->path); + return 0; } - fl_response(true, "FREMOVE %s", path); + fl_response(true, "FREMOVE %s", args->path); + return 0; } -static void cmd_fmkdir(fl_context_t* ctx, const char* path) { +static int cmd_fmkdir(fl_context_t* ctx, const cmd_args_t* args) { if (!ctx->file_ctx.fs) { fl_response(false, "File context not initialized"); - return; + return 0; } - if (!path) { + if (!args->path) { fl_response(false, "Missing path"); - return; + return 0; } - if (fl_file_mkdir(&ctx->file_ctx, path) != 0) { - fl_response(false, "Mkdir failed: %s", path); - return; + if (fl_file_mkdir(&ctx->file_ctx, args->path) != 0) { + fl_response(false, "Mkdir failed: %s", args->path); + return 0; } - fl_response(true, "FMKDIR %s", path); + fl_response(true, "FMKDIR %s", args->path); + return 0; } -static void cmd_frename(fl_context_t* ctx, const char* oldpath, const char* newpath) { +static int cmd_frename(fl_context_t* ctx, const cmd_args_t* args) { if (!ctx->file_ctx.fs) { fl_response(false, "File context not initialized"); - return; + return 0; } - if (!oldpath) { + if (!args->path) { fl_response(false, "Missing path"); - return; + return 0; } - if (!newpath) { + if (!args->newpath) { fl_response(false, "Missing newpath"); - return; + return 0; } - if (fl_file_rename(&ctx->file_ctx, oldpath, newpath) != 0) { - fl_response(false, "Rename failed: %s -> %s", oldpath, newpath); - return; + if (fl_file_rename(&ctx->file_ctx, args->path, args->newpath) != 0) { + fl_response(false, "Rename failed: %s -> %s", args->path, args->newpath); + return 0; } - fl_response(true, "FRENAME %s -> %s", oldpath, newpath); + fl_response(true, "FRENAME %s -> %s", args->path, args->newpath); + return 0; } #endif /* FL_USE_FILE */ +/* =========================== + COMMAND DISPATCH TABLE + =========================== */ + +/** + * @brief Command dispatch table entry + */ +typedef struct { + const char* name; + cmd_handler_t handler; +} cmd_entry_t; + +/* clang-format off */ +static const cmd_entry_t s_cmd_table[] = { + /* Core commands */ + { "ping", cmd_ping }, + { "echo", cmd_echo }, + { "echoback", cmd_echoback }, + { "info", cmd_info }, + { "alloc", cmd_alloc }, + { "upload", cmd_upload }, + { "read", cmd_read }, + { "write", cmd_write }, + { "patch", cmd_patch }, + { "tpatch", cmd_tpatch }, + { "dpatch", cmd_dpatch }, + { "unpatch", cmd_unpatch }, + { "enable", cmd_enable }, + { "hello", cmd_hello }, +#if FL_USE_FILE + /* File transfer commands */ + { "fopen", cmd_fopen }, + { "fwrite", cmd_fwrite }, + { "fread", cmd_fread }, + { "fclose", cmd_fclose }, + { "fcrc", cmd_fcrc }, + { "fseek", cmd_fseek }, + { "fstat", cmd_fstat }, + { "flist", cmd_flist }, + { "fremove", cmd_fremove }, + { "fmkdir", cmd_fmkdir }, + { "frename", cmd_frename }, +#endif +}; +/* clang-format on */ + +#define CMD_TABLE_SIZE (sizeof(s_cmd_table) / sizeof(s_cmd_table[0])) + int fl_exec_cmd(fl_context_t* ctx, int argc, const char** argv) { if (argc == 0) return -1; - const char* cmd = NULL; - const char* data = NULL; - uintptr_t addr = 0; - uintptr_t orig = 0; - uintptr_t target = 0; - int crc = -1; /* -1 = no CRC provided */ - int len = 64; - int size = 0; - int comp = 0; - int all = 0; - int enable = -1; /* -1 = not specified, 0 = disable, 1 = enable */ - int force = 0; - const char* path = NULL; - const char* newpath = NULL; - const char* mode = NULL; + cmd_args_t args = {0}; + args.crc = -1; + args.len = 64; + args.enable = -1; struct argparse_option opts[] = { OPT_HELP(), - OPT_STRING('c', "cmd", &cmd, "Command", NULL, 0, 0), - OPT_INTEGER('s', "size", &size, "Alloc size", NULL, 0, 0), - OPT_POINTER('a', "addr", &addr, "Address/offset (hex)", NULL, 0, 0), - OPT_STRING('d', "data", &data, "Hex data", NULL, 0, 0), - OPT_INTEGER('r', "crc", &crc, "CRC-16 (hex)", NULL, 0, 0), - OPT_INTEGER('l', "len", &len, "Read length", NULL, 0, 0), - OPT_INTEGER(0, "comp", &comp, "Comparator ID", NULL, 0, 0), - OPT_POINTER(0, "orig", &orig, "Original addr", NULL, 0, 0), - OPT_POINTER(0, "target", &target, "Target addr", NULL, 0, 0), - OPT_BOOLEAN(0, "all", &all, "Clear all", NULL, 0, 0), - OPT_INTEGER(0, "enable", &enable, "Enable(1) or disable(0) patch", NULL, 0, 0), - OPT_BOOLEAN(0, "force", &force, "Skip address range check", NULL, 0, 0), - OPT_STRING(0, "path", &path, "File path", NULL, 0, 0), - OPT_STRING(0, "newpath", &newpath, "New file path", NULL, 0, 0), - OPT_STRING('m', "mode", &mode, "File mode (r/w/a)", NULL, 0, 0), + OPT_STRING('c', "cmd", &args.cmd, "Command", NULL, 0, 0), + OPT_INTEGER('s', "size", &args.size, "Alloc size", NULL, 0, 0), + OPT_POINTER('a', "addr", &args.addr, "Address/offset (hex)", NULL, 0, 0), + OPT_STRING('d', "data", &args.data, "Hex data", NULL, 0, 0), + OPT_INTEGER('r', "crc", &args.crc, "CRC-16 (hex)", NULL, 0, 0), + OPT_INTEGER('l', "len", &args.len, "Read length", NULL, 0, 0), + OPT_INTEGER(0, "comp", &args.comp, "Comparator ID", NULL, 0, 0), + OPT_POINTER(0, "orig", &args.orig, "Original addr", NULL, 0, 0), + OPT_POINTER(0, "target", &args.target, "Target addr", NULL, 0, 0), + OPT_BOOLEAN(0, "all", &args.all, "Clear all", NULL, 0, 0), + OPT_INTEGER(0, "enable", &args.enable, "Enable(1) or disable(0) patch", NULL, 0, 0), + OPT_BOOLEAN(0, "force", &args.force, "Skip address range check", NULL, 0, 0), + OPT_STRING(0, "path", &args.path, "File path", NULL, 0, 0), + OPT_STRING(0, "newpath", &args.newpath, "New file path", NULL, 0, 0), + OPT_STRING('m', "mode", &args.mode, "File mode (r/w/a)", NULL, 0, 0), OPT_END(), }; struct argparse ap; - static const char* const usage[] = {"fl --cmd [opts]", NULL}; - - fl_argparse_init(&ap, opts, usage, ARGPARSE_IGNORE_UNKNOWN_ARGS); - int ret = fl_argparse_parse(&ap, argc, argv); - if (ret < 0) { + fl_argparse_init(&ap, opts, NULL, 0); + if (fl_argparse_parse(&ap, argc, argv) > 0) { fl_response(false, "Invalid arguments"); return -1; } - if (!cmd) { + if (!args.cmd) { + fl_println("Available commands:"); + for (size_t i = 0; i < CMD_TABLE_SIZE; i++) { + fl_println(" %s", s_cmd_table[i].name); + } fl_response(false, "Missing --cmd"); return -1; } - if (strcmp(cmd, "ping") == 0) { - cmd_ping(ctx); - } else if (strcmp(cmd, "echo") == 0) { - cmd_echo(ctx, data); - } else if (strcmp(cmd, "info") == 0) { - cmd_info(ctx); - } else if (strcmp(cmd, "alloc") == 0) { - if (size == 0) { - fl_response(false, "Missing --size"); - return -1; - } - cmd_alloc(ctx, size); - } else if (strcmp(cmd, "upload") == 0) { - if (!data) { - fl_response(false, "Missing --data"); - return -1; - } - cmd_upload(ctx, addr, data, crc, crc >= 0); - } else if (strcmp(cmd, "read") == 0) { - cmd_read(ctx, addr, len, crc, force); - } else if (strcmp(cmd, "write") == 0) { - if (!data) { - fl_response(false, "Missing --data"); - return -1; + /* Lookup command in dispatch table */ + for (size_t i = 0; i < CMD_TABLE_SIZE; i++) { + if (strcmp(args.cmd, s_cmd_table[i].name) == 0) { + return s_cmd_table[i].handler(ctx, &args); } - cmd_write(ctx, addr, data, crc, crc >= 0, force); - } else if (strcmp(cmd, "patch") == 0) { - if (orig == 0 || target == 0) { - fl_response(false, "Missing --orig/--target"); - return -1; - } - cmd_patch(ctx, comp, orig, target, crc); - } else if (strcmp(cmd, "tpatch") == 0) { - if (orig == 0 || target == 0) { - fl_response(false, "Missing --orig/--target"); - return -1; - } - cmd_tpatch(ctx, comp, orig, target, crc); - } else if (strcmp(cmd, "dpatch") == 0) { - if (orig == 0 || target == 0) { - fl_response(false, "Missing --orig/--target"); - return -1; - } - cmd_dpatch(ctx, comp, orig, target, crc); - } else if (strcmp(cmd, "unpatch") == 0) { - cmd_unpatch(ctx, comp, all); - } else if (strcmp(cmd, "enable") == 0) { - if (enable < 0) { - fl_response(false, "Missing --enable (0 or 1)"); - return -1; - } - cmd_enable(ctx, comp, enable != 0, all); -#if FL_USE_FILE - /* File transfer commands */ - } else if (strcmp(cmd, "fopen") == 0) { - cmd_fopen(ctx, path, mode ? mode : "r"); - } else if (strcmp(cmd, "fwrite") == 0) { - cmd_fwrite(ctx, data, crc); - } else if (strcmp(cmd, "fread") == 0) { - cmd_fread(ctx, len); - } else if (strcmp(cmd, "fclose") == 0) { - cmd_fclose(ctx); - } else if (strcmp(cmd, "fcrc") == 0) { - cmd_fcrc(ctx, (off_t)len); - } else if (strcmp(cmd, "fseek") == 0) { - cmd_fseek(ctx, (off_t)addr); - } else if (strcmp(cmd, "fstat") == 0) { - cmd_fstat(ctx, path); - } else if (strcmp(cmd, "flist") == 0) { - cmd_flist(ctx, path); - } else if (strcmp(cmd, "fremove") == 0) { - cmd_fremove(ctx, path); - } else if (strcmp(cmd, "fmkdir") == 0) { - cmd_fmkdir(ctx, path); - } else if (strcmp(cmd, "frename") == 0) { - cmd_frename(ctx, path, newpath); -#endif /* FL_USE_FILE */ - } else if (strcmp(cmd, "hello") == 0) { - fl_hello(); - } else { - fl_response(false, "Unknown: %s", cmd); - return -1; } - return 0; + fl_response(false, "Unknown: %s", args.cmd); + return -1; } diff --git a/App/func_loader/fl.h b/App/func_loader/fl.h index 7ad3394..703e19c 100644 --- a/App/func_loader/fl.h +++ b/App/func_loader/fl.h @@ -45,9 +45,9 @@ extern "C" { #ifndef FL_BUF_SIZE #define FL_BUF_SIZE 1024 #endif -#ifndef FL_B64_BUF_SIZE -#define FL_B64_BUF_SIZE 2048 -#endif + +/* Base64 output size: ceil(N/3)*4 + null terminator */ +#define FL_B64_BUF_SIZE ((FL_BUF_SIZE + 2) / 3 * 4 + 1) /* Callback types */ typedef void (*fl_output_cb_t)(void* user, const char* str); diff --git a/App/tests/test_fl.c b/App/tests/test_fl.c index 079658c..b6f475c 100644 --- a/App/tests/test_fl.c +++ b/App/tests/test_fl.c @@ -1387,6 +1387,113 @@ void test_loader_cmd_enable_unset_patch(void) { TEST_ASSERT_EQUAL(0, result); } +/* ============================================================================ + * fl_exec_cmd Tests - Echoback Command + * ============================================================================ */ + +void test_loader_cmd_echoback_basic(void) { + setup_loader(); + fl_init(&test_ctx); + + const char* argv[] = {"fl", "--cmd", "echoback", "--len", "16"}; + int result = fl_exec_cmd(&test_ctx, 5, argv); + + TEST_ASSERT_EQUAL(0, result); + TEST_ASSERT(mock_output_contains("FLOK")); + TEST_ASSERT(mock_output_contains("ECHOBACK 16 bytes")); + TEST_ASSERT(mock_output_contains("crc=0x")); + TEST_ASSERT(mock_output_contains("data=")); +} + +void test_loader_cmd_echoback_verify_pattern(void) { + setup_loader(); + fl_init(&test_ctx); + + /* Request 4 bytes: pattern is {0x00, 0x01, 0x02, 0x03} */ + const char* argv[] = {"fl", "--cmd", "echoback", "--len", "4"}; + int result = fl_exec_cmd(&test_ctx, 5, argv); + + TEST_ASSERT_EQUAL(0, result); + TEST_ASSERT(mock_output_contains("ECHOBACK 4 bytes")); + + /* Verify the base64 data: {0x00, 0x01, 0x02, 0x03} -> "AAECAw==" */ + TEST_ASSERT(mock_output_contains("AAECAw==")); +} + +void test_loader_cmd_echoback_verify_crc(void) { + setup_loader(); + fl_init(&test_ctx); + + /* Request 1 byte: pattern is {0x00}, CRC16 of {0x00} with init 0xFFFF */ + const char* argv[] = {"fl", "--cmd", "echoback", "--len", "1"}; + int result = fl_exec_cmd(&test_ctx, 5, argv); + + TEST_ASSERT_EQUAL(0, result); + TEST_ASSERT(mock_output_contains("ECHOBACK 1 bytes")); + TEST_ASSERT(mock_output_contains("crc=0x")); +} + +void test_loader_cmd_echoback_max_len(void) { + setup_loader(); + fl_init(&test_ctx); + + /* Request FL_BUF_SIZE (1024) bytes — should succeed */ + const char* argv[] = {"fl", "--cmd", "echoback", "--len", "1024"}; + int result = fl_exec_cmd(&test_ctx, 5, argv); + + TEST_ASSERT_EQUAL(0, result); + TEST_ASSERT(mock_output_contains("ECHOBACK 1024 bytes")); +} + +void test_loader_cmd_echoback_zero_len(void) { + setup_loader(); + fl_init(&test_ctx); + + const char* argv[] = {"fl", "--cmd", "echoback", "--len", "0"}; + int result = fl_exec_cmd(&test_ctx, 5, argv); + + TEST_ASSERT_EQUAL(0, result); + TEST_ASSERT(mock_output_contains("FLERR")); + TEST_ASSERT(mock_output_contains("Invalid length")); +} + +void test_loader_cmd_echoback_negative_len(void) { + setup_loader(); + fl_init(&test_ctx); + + const char* argv[] = {"fl", "--cmd", "echoback", "--len", "-1"}; + int result = fl_exec_cmd(&test_ctx, 5, argv); + + TEST_ASSERT_EQUAL(0, result); + TEST_ASSERT(mock_output_contains("FLERR")); + TEST_ASSERT(mock_output_contains("Invalid length")); +} + +void test_loader_cmd_echoback_over_max(void) { + setup_loader(); + fl_init(&test_ctx); + + /* Request FL_BUF_SIZE + 1 (1025) bytes — should fail */ + const char* argv[] = {"fl", "--cmd", "echoback", "--len", "1025"}; + int result = fl_exec_cmd(&test_ctx, 5, argv); + + TEST_ASSERT_EQUAL(0, result); + TEST_ASSERT(mock_output_contains("FLERR")); + TEST_ASSERT(mock_output_contains("Invalid length")); +} + +void test_loader_cmd_echoback_default_len(void) { + setup_loader(); + fl_init(&test_ctx); + + /* Without --len, default is 64 */ + const char* argv[] = {"fl", "--cmd", "echoback"}; + int result = fl_exec_cmd(&test_ctx, 3, argv); + + TEST_ASSERT_EQUAL(0, result); + TEST_ASSERT(mock_output_contains("ECHOBACK 64 bytes")); +} + /* ============================================================================ * Test Runner * ============================================================================ */ @@ -1507,4 +1614,15 @@ void run_loader_tests(void) { RUN_TEST(test_loader_cmd_enable_invalid_comp); RUN_TEST(test_loader_cmd_enable_unset_patch); TEST_SUITE_END(); + + TEST_SUITE_BEGIN("func_loader - Echoback Command"); + RUN_TEST(test_loader_cmd_echoback_basic); + RUN_TEST(test_loader_cmd_echoback_verify_pattern); + RUN_TEST(test_loader_cmd_echoback_verify_crc); + RUN_TEST(test_loader_cmd_echoback_max_len); + RUN_TEST(test_loader_cmd_echoback_zero_len); + RUN_TEST(test_loader_cmd_echoback_negative_len); + RUN_TEST(test_loader_cmd_echoback_over_max); + RUN_TEST(test_loader_cmd_echoback_default_len); + TEST_SUITE_END(); } diff --git a/Tools/WebServer/app/routes/connection.py b/Tools/WebServer/app/routes/connection.py index 7ed790b..b26bb86 100644 --- a/Tools/WebServer/app/routes/connection.py +++ b/Tools/WebServer/app/routes/connection.py @@ -198,7 +198,8 @@ def api_status(): "compile_commands_path": device.compile_commands_path, "watch_dirs": device.watch_dirs, "patch_mode": device.patch_mode, - "chunk_size": device.chunk_size, + "upload_chunk_size": device.upload_chunk_size, + "download_chunk_size": device.download_chunk_size, "auto_connect": device.auto_connect, "auto_compile": device.auto_compile, "enable_decompile": device.enable_decompile, diff --git a/Tools/WebServer/app/routes/fpb.py b/Tools/WebServer/app/routes/fpb.py index 86418be..86d263b 100644 --- a/Tools/WebServer/app/routes/fpb.py +++ b/Tools/WebServer/app/routes/fpb.py @@ -106,10 +106,11 @@ def do_ping(): @bp.route("/fpb/test-serial", methods=["POST"]) def api_fpb_test_serial(): """ - Test serial throughput to find max single-transfer size. + Test serial throughput with 3-phase probing. - Uses x2 stepping to probe device's receive buffer limit. - Returns max working size and recommended chunk size. + Phase 1: TX Fragment probe - detect if fragmentation is needed. + Phase 2: Upload chunk probe - find device shell buffer limit. + Phase 3: Download chunk probe - find max reliable download size. """ log_info, log_success, log_error, _, get_fpb_inject, _ = _get_helpers() @@ -120,30 +121,50 @@ def api_fpb_test_serial(): fpb = get_fpb_inject() - log_info("Starting serial throughput test...") + log_info("Starting 3-phase serial throughput test...") def do_test(): return fpb.test_serial_throughput( start_size=start_size, max_size=max_size, timeout=timeout ) - result = _run_serial_op(do_test, timeout=30.0) + result = _run_serial_op(do_test, timeout=60.0) if "error" in result and result.get("error"): return jsonify({"success": False, "error": result["error"]}) if result.get("success"): + # Phase 1 summary + if result.get("fragment_needed"): + log_info("Phase 1: TX fragmentation may be needed") + else: + log_info("Phase 1: TX fragmentation not needed") + + # Phase 2 summary max_working = result.get("max_working_size", 0) failed_at = result.get("failed_size", 0) - recommended = result.get("recommended_chunk_size", 64) + rec_upload = result.get("recommended_upload_chunk_size", 64) if failed_at > 0: - log_info( - f"Max working size: {max_working} bytes, failed at: {failed_at} bytes" - ) + log_info(f"Phase 2: Upload max={max_working}B, failed at {failed_at}B") else: - log_success(f"All tests passed up to {max_working} bytes") - log_info(f"Recommended chunk size: {recommended} bytes") + log_success(f"Phase 2: All upload tests passed up to {max_working}B") + log_info(f"Recommended upload chunk: {rec_upload}B") + + # Phase 3 summary + rec_download = result.get("recommended_download_chunk_size", 1024) + phases = result.get("phases", {}) + dl_phase = phases.get("download", {}) + if dl_phase.get("skipped"): + log_info(f"Phase 3: Skipped ({dl_phase.get('skip_reason', 'unknown')})") + else: + dl_max = dl_phase.get("max_working_size", 0) + dl_fail = dl_phase.get("failed_size", 0) + if dl_fail > 0: + log_info(f"Phase 3: Download max={dl_max}B, failed at {dl_fail}B") + else: + log_success(f"Phase 3: All download tests passed up to {dl_max}B") + log_info(f"Recommended download chunk: {rec_download}B") return jsonify(result) diff --git a/Tools/WebServer/app/routes/symbols.py b/Tools/WebServer/app/routes/symbols.py index d8dc148..79271ab 100644 --- a/Tools/WebServer/app/routes/symbols.py +++ b/Tools/WebServer/app/routes/symbols.py @@ -107,7 +107,11 @@ def _dynamic_timeout(size): Assumes ~128 bytes/chunk with ~2s per chunk worst case, plus generous headroom. """ - chunk_size = state.device.chunk_size if state.device.chunk_size > 0 else 128 + chunk_size = ( + state.device.download_chunk_size + if state.device.download_chunk_size > 0 + else 1024 + ) num_chunks = max(1, (size + chunk_size - 1) // chunk_size) return max(10.0, num_chunks * 3.0) diff --git a/Tools/WebServer/app/routes/transfer.py b/Tools/WebServer/app/routes/transfer.py index 642ce34..851430b 100644 --- a/Tools/WebServer/app/routes/transfer.py +++ b/Tools/WebServer/app/routes/transfer.py @@ -73,14 +73,19 @@ def _get_file_transfer(log_callback=None): """Get FileTransfer instance.""" *_, get_fpb_inject = _get_helpers() fpb = get_fpb_inject() - chunk_size = state.device.chunk_size or 256 + chunk_size = state.device.upload_chunk_size or 128 + download_chunk_size = state.device.download_chunk_size or 1024 max_retries = ( state.device.transfer_max_retries if hasattr(state.device, "transfer_max_retries") else 10 ) return FileTransfer( - fpb, chunk_size=chunk_size, max_retries=max_retries, log_callback=log_callback + fpb, + upload_chunk_size=chunk_size, + download_chunk_size=download_chunk_size, + max_retries=max_retries, + log_callback=log_callback, ) @@ -395,7 +400,7 @@ def do_upload(): return uploaded = 0 - chunk_size = ft.chunk_size + chunk_size = ft.upload_chunk_size ft.reset_stats() # Reset stats before transfer while uploaded < total_size: # Check cancel before each chunk @@ -433,8 +438,8 @@ def do_upload(): ft.fclose() return - # Verify CRC if enabled - if state.device.verify_crc and total_size > 0: + # Always verify CRC + if total_size > 0: expected_crc = crc16(file_data) success, dev_size, dev_crc = ft.fcrc(total_size) if not success: @@ -646,7 +651,7 @@ def do_download(): return file_data = b"" - chunk_size = ft.chunk_size + chunk_size = ft.download_chunk_size current_offset = 0 ft.reset_stats() # Reset stats before transfer while True: @@ -690,8 +695,8 @@ def do_download(): ft.fclose() return - # Verify CRC if enabled - if state.device.verify_crc and len(file_data) > 0: + # Always verify CRC + if len(file_data) > 0: local_crc = crc16(file_data) success, dev_size, dev_crc = ft.fcrc(len(file_data)) if not success: diff --git a/Tools/WebServer/cli/fpb_cli.py b/Tools/WebServer/cli/fpb_cli.py index 929e510..7e2db7f 100755 --- a/Tools/WebServer/cli/fpb_cli.py +++ b/Tools/WebServer/cli/fpb_cli.py @@ -56,9 +56,10 @@ def __init__(self): self.inject_base = 0x20001000 self.cached_slots = None # Cache for slot state self.slot_update_id = 0 - self.chunk_size = 128 # Default chunk size for upload - self.tx_chunk_size = 0 # 0 = disabled, >0 = chunk size for TX - self.tx_chunk_delay = 0.005 # Delay between TX chunks (seconds) + self.upload_chunk_size = 128 # Default chunk size for upload + self.download_chunk_size = 1024 # Default chunk size for download + self.serial_tx_fragment_size = 0 # 0 = disabled, >0 = fragment size for TX + self.serial_tx_fragment_delay = 0.002 # Delay between TX fragments (seconds) self.transfer_max_retries = 10 # Max retries for file transfer def add_tool_log(self, message): @@ -98,7 +99,7 @@ def __init__( elf_path: Optional[str] = None, compile_commands: Optional[str] = None, tx_chunk_size: int = 0, - tx_chunk_delay: float = 0.005, + tx_chunk_delay: float = 0.002, max_retries: int = 10, ): self.verbose = verbose @@ -107,8 +108,8 @@ def __init__( self._device_state = DeviceState() self._device_state.elf_path = elf_path self._device_state.compile_commands_path = compile_commands - self._device_state.tx_chunk_size = tx_chunk_size - self._device_state.tx_chunk_delay = tx_chunk_delay + self._device_state.serial_tx_fragment_size = tx_chunk_size + self._device_state.serial_tx_fragment_delay = tx_chunk_delay self._device_state.transfer_max_retries = max_retries self._fpb = FPBInject(self._device_state) @@ -526,7 +527,11 @@ def file_list(self, path: str = "/") -> None: raise FPBCLIError("No device connected.") from core.file_transfer import FileTransfer - ft = FileTransfer(self._fpb, chunk_size=self._device_state.chunk_size) + ft = FileTransfer( + self._fpb, + upload_chunk_size=self._device_state.upload_chunk_size, + download_chunk_size=self._device_state.download_chunk_size, + ) success, entries = ft.flist(path) if not success: raise FPBCLIError(f"Failed to list directory: {path}") @@ -541,7 +546,11 @@ def file_stat(self, path: str) -> None: raise FPBCLIError("No device connected.") from core.file_transfer import FileTransfer - ft = FileTransfer(self._fpb, chunk_size=self._device_state.chunk_size) + ft = FileTransfer( + self._fpb, + upload_chunk_size=self._device_state.upload_chunk_size, + download_chunk_size=self._device_state.download_chunk_size, + ) success, stat = ft.fstat(path) if not success: raise FPBCLIError(f"Failed to stat: {stat.get('error', 'unknown')}") @@ -559,7 +568,8 @@ def file_download(self, remote_path: str, local_path: str) -> None: ft = FileTransfer( self._fpb, - chunk_size=self._device_state.chunk_size, + upload_chunk_size=self._device_state.upload_chunk_size, + download_chunk_size=self._device_state.download_chunk_size, max_retries=self._device_state.transfer_max_retries, ) success, data, msg = ft.download(remote_path) diff --git a/Tools/WebServer/core/config_schema.py b/Tools/WebServer/core/config_schema.py index bf2c68f..e64d98e 100644 --- a/Tools/WebServer/core/config_schema.py +++ b/Tools/WebServer/core/config_schema.py @@ -224,26 +224,41 @@ def to_dict(self) -> dict: ), # === Transfer Parameters === ConfigItem( - key="chunk_size", - label="Chunk Size", + key="upload_chunk_size", + label="Upload Chunk", group=ConfigGroup.TRANSFER, config_type=ConfigType.NUMBER, default=128, - tooltip="Size of each uploaded data block. Smaller values are more stable but slower.", + tooltip="Size of each data block for PC→Device transfers (upload/inject/mem_write). " + "Limited by device shell receive buffer.", min_value=16, - max_value=1024, + max_value=512, step=16, unit="Bytes", order=10, ), ConfigItem( - key="tx_chunk_size", - label="TX Chunk", + key="download_chunk_size", + label="Download Chunk", + group=ConfigGroup.TRANSFER, + config_type=ConfigType.NUMBER, + default=1024, + tooltip="Size of each data block for Device→PC transfers (download/mem_read/mem_dump). " + "Can be much larger than upload chunk since device puts has no buffer limit.", + min_value=128, + max_value=8192, + step=128, + unit="Bytes", + order=15, + ), + ConfigItem( + key="serial_tx_fragment_size", + label="TX Fragment", group=ConfigGroup.TRANSFER, config_type=ConfigType.NUMBER, default=0, - tooltip="TX chunk size for serial commands (bytes). 0 = disabled. " - "Workaround for slow serial drivers.", + tooltip="Serial TX fragment size (bytes). 0 = disabled. " + "Workaround for slow serial drivers that drop data on large writes.", min_value=0, max_value=256, step=8, @@ -251,18 +266,18 @@ def to_dict(self) -> dict: order=20, ), ConfigItem( - key="tx_chunk_delay", - label="TX Delay", + key="serial_tx_fragment_delay", + label="TX Fragment Delay", group=ConfigGroup.TRANSFER, config_type=ConfigType.NUMBER, - default=0.005, - tooltip="Delay between TX chunks. Only used when TX Chunk > 0.", + default=0.002, + tooltip="Delay between TX fragments. Only used when TX Fragment > 0.", min_value=0.001, max_value=0.1, step=0.001, unit="ms", ui_multiplier=1000, # Display as milliseconds - order=30, + order=25, ), ConfigItem( key="transfer_max_retries", @@ -290,15 +305,6 @@ def to_dict(self) -> dict: unit="times", order=45, ), - ConfigItem( - key="verify_crc", - label="Verify CRC after Transfer", - group=ConfigGroup.TRANSFER, - config_type=ConfigType.BOOLEAN, - default=True, - tooltip="Verify file integrity with CRC after transfer", - order=50, - ), # === Logging Settings === ConfigItem( key="log_file_path", diff --git a/Tools/WebServer/core/file_transfer.py b/Tools/WebServer/core/file_transfer.py index fc98ee8..6564ab9 100644 --- a/Tools/WebServer/core/file_transfer.py +++ b/Tools/WebServer/core/file_transfer.py @@ -37,13 +37,15 @@ def _format_path_arg(path: str) -> str: class FileTransfer: """File transfer handler for device communication.""" - DEFAULT_CHUNK_SIZE = 256 + DEFAULT_UPLOAD_CHUNK_SIZE = 128 + DEFAULT_DOWNLOAD_CHUNK_SIZE = 1024 DEFAULT_MAX_RETRIES = 10 def __init__( self, fpb_inject, - chunk_size: int = DEFAULT_CHUNK_SIZE, + upload_chunk_size: int = DEFAULT_UPLOAD_CHUNK_SIZE, + download_chunk_size: int = DEFAULT_DOWNLOAD_CHUNK_SIZE, max_retries: int = DEFAULT_MAX_RETRIES, log_callback: Callable[[str], None] = None, ): @@ -52,12 +54,14 @@ def __init__( Args: fpb_inject: FPBInject instance for device communication - chunk_size: Size of data chunks for transfer (default 256) + upload_chunk_size: Size of data chunks for upload (default 128) + download_chunk_size: Size of data chunks for download (default 1024) max_retries: Maximum retry attempts for transfer (default 10) log_callback: Optional callback for logging transfer events to UI """ self.fpb = fpb_inject - self.chunk_size = chunk_size + self.upload_chunk_size = upload_chunk_size + self.download_chunk_size = download_chunk_size self.max_retries = max_retries self.log_callback = log_callback @@ -198,7 +202,7 @@ def fread( Read data from open file on device with retry support. Args: - size: Maximum bytes to read (default: chunk_size) + size: Maximum bytes to read (default: download_chunk_size) max_retries: Maximum retry attempts (default: self.max_retries) current_offset: Current file offset for seek on retry (optional) @@ -206,7 +210,7 @@ def fread( Tuple of (success, data_bytes, message) """ if size is None: - size = self.chunk_size + size = self.download_chunk_size if max_retries is None: max_retries = self.max_retries @@ -471,7 +475,6 @@ def upload( local_data: bytes, remote_path: str, progress_cb: Optional[Callable[[int, int], None]] = None, - verify_crc: bool = True, ) -> Tuple[bool, str]: """ Upload data to a file on device. @@ -480,7 +483,6 @@ def upload( local_data: Data bytes to upload remote_path: Destination path on device progress_cb: Optional callback(uploaded_bytes, total_bytes) - verify_crc: If True, verify entire file CRC after upload Returns: Tuple of (success, message) @@ -495,7 +497,7 @@ def upload( try: uploaded = 0 while uploaded < total_size: - chunk = local_data[uploaded : uploaded + self.chunk_size] + chunk = local_data[uploaded : uploaded + self.upload_chunk_size] # Pass current offset for seek on retry success, msg = self.fwrite(chunk, current_offset=uploaded) if not success: @@ -507,7 +509,8 @@ def upload( progress_cb(uploaded, total_size) # Verify entire file CRC before closing - if verify_crc and total_size > 0: + # Always verify entire file CRC before closing + if total_size > 0: expected_crc = crc16(local_data) success, dev_size, dev_crc = self.fcrc(total_size) if not success: @@ -550,7 +553,6 @@ def download( self, remote_path: str, progress_cb: Optional[Callable[[int, int], None]] = None, - verify_crc: bool = True, ) -> Tuple[bool, bytes, str]: """ Download a file from device. @@ -558,7 +560,6 @@ def download( Args: remote_path: Source path on device progress_cb: Optional callback(downloaded_bytes, total_bytes) - verify_crc: If True, verify entire file CRC after download Returns: Tuple of (success, data_bytes, message) @@ -583,7 +584,7 @@ def download( while True: # Pass current offset for seek on retry success, chunk, msg = self.fread( - self.chunk_size, current_offset=current_offset + self.download_chunk_size, current_offset=current_offset ) if not success: self.fclose() @@ -598,7 +599,8 @@ def download( progress_cb(len(data), total_size) # Verify entire file CRC before closing - if verify_crc and len(data) > 0: + # Always verify entire file CRC before closing + if len(data) > 0: local_crc = crc16(data) success, dev_size, dev_crc = self.fcrc(len(data)) if not success: diff --git a/Tools/WebServer/core/gdb_manager.py b/Tools/WebServer/core/gdb_manager.py index ca87c66..e67c79f 100644 --- a/Tools/WebServer/core/gdb_manager.py +++ b/Tools/WebServer/core/gdb_manager.py @@ -80,7 +80,7 @@ def write_memory_fn(addr, data): read_memory_fn=read_memory_fn, write_memory_fn=write_memory_fn, listen_port=DEFAULT_RSP_PORT, - cache_line_size=getattr(device, "chunk_size", 128), + cache_line_size=getattr(device, "download_chunk_size", 1024), ) _apply_elf_memory_regions(bridge, elf_path) port = bridge.start() @@ -215,7 +215,7 @@ def start_external_gdb_server(state, read_memory_fn=None, write_memory_fn=None) read_memory_fn=read_memory_fn, write_memory_fn=write_memory_fn, listen_port=port, - cache_line_size=getattr(device, "chunk_size", 128), + cache_line_size=getattr(device, "download_chunk_size", 1024), ) _apply_elf_memory_regions(bridge, device.elf_path) actual_port = bridge.start() diff --git a/Tools/WebServer/core/serial_protocol.py b/Tools/WebServer/core/serial_protocol.py index a2d2d0a..6cf2a04 100644 --- a/Tools/WebServer/core/serial_protocol.py +++ b/Tools/WebServer/core/serial_protocol.py @@ -202,15 +202,15 @@ def send_cmd( ser.reset_input_buffer() data_bytes = (full_cmd + "\n").encode() - tx_chunk_size = getattr(self.device, "tx_chunk_size", 0) - tx_chunk_delay = getattr(self.device, "tx_chunk_delay", 0.005) - if tx_chunk_size > 0 and len(data_bytes) > tx_chunk_size: - for i in range(0, len(data_bytes), tx_chunk_size): - chunk = data_bytes[i : i + tx_chunk_size] + tx_fragment_size = getattr(self.device, "serial_tx_fragment_size", 0) + tx_fragment_delay = getattr(self.device, "serial_tx_fragment_delay", 0.002) + if tx_fragment_size > 0 and len(data_bytes) > tx_fragment_size: + for i in range(0, len(data_bytes), tx_fragment_size): + chunk = data_bytes[i : i + tx_fragment_size] ser.write(chunk) ser.flush() - if i + tx_chunk_size < len(data_bytes): - time.sleep(tx_chunk_delay) + if i + tx_fragment_size < len(data_bytes): + time.sleep(tx_fragment_delay) else: ser.write(data_bytes) ser.flush() @@ -489,7 +489,9 @@ def upload( """Upload binary data in chunks using base64 encoding.""" total = len(data) data_offset = 0 - bytes_per_chunk = self.device.chunk_size if self.device.chunk_size > 0 else 128 + bytes_per_chunk = ( + self.device.upload_chunk_size if self.device.upload_chunk_size > 0 else 128 + ) upload_start = time.time() chunk_count = 0 @@ -579,7 +581,11 @@ def read_memory( Returns (data_bytes, message) on success, (None, error_msg) on failure. """ - bytes_per_chunk = self.device.chunk_size if self.device.chunk_size > 0 else 128 + bytes_per_chunk = ( + self.device.download_chunk_size + if self.device.download_chunk_size > 0 + else 1024 + ) buf = bytearray() offset = 0 @@ -622,7 +628,9 @@ def write_memory( Returns (success, message). """ - bytes_per_chunk = self.device.chunk_size if self.device.chunk_size > 0 else 128 + bytes_per_chunk = ( + self.device.upload_chunk_size if self.device.upload_chunk_size > 0 else 128 + ) total = len(data) offset = 0 @@ -727,10 +735,263 @@ def enable_patch( except Exception as e: return False, str(e) + def _probe_echo(self, test_size: int, timeout: float = 2.0) -> Dict: + """Send an echo command of given size and verify CRC. + + Returns a test_result dict with keys: size, cmd_len, passed, error, + response_time_ms. + """ + hex_data = "".join(f"{(i % 256):02X}" for i in range(test_size)) + cmd = f"-c echo -d {hex_data}" + + test_result: Dict = { + "size": test_size, + "cmd_len": len(cmd), + "passed": False, + "error": None, + "response_time_ms": 0, + } + + try: + start_time = time.time() + response = self.send_cmd(cmd, timeout=timeout) + elapsed_ms = (time.time() - start_time) * 1000 + test_result["response_time_ms"] = round(elapsed_ms, 2) + + if "[FLOK]" in response: + expected_crc = crc16(hex_data.encode("ascii")) + crc_match = re.search(r"0x([0-9A-Fa-f]{4})", response) + if crc_match: + received_crc = int(crc_match.group(1), 16) + if received_crc == expected_crc: + test_result["passed"] = True + else: + test_result["error"] = ( + f"CRC mismatch: expected 0x{expected_crc:04X}, " + f"got 0x{received_crc:04X}" + ) + else: + test_result["passed"] = True + else: + if "[FLERR]" in response: + test_result["error"] = "Device returned error" + elif not response: + test_result["error"] = "No response (timeout)" + else: + test_result["error"] = "Incomplete/invalid response" + except Exception as e: + test_result["error"] = str(e) + + return test_result + + def _phase_fragment_probe(self, timeout: float = 2.0) -> Dict: + """Phase 1: Detect whether TX fragmentation is needed. + + Sends a medium-length echo command (256 bytes). If it succeeds, + fragmentation is not needed. If it fails, fragmentation is required + but the optimal fragment size is left to the user or a future probe. + """ + result: Dict = { + "needed": False, + "test": None, + } + max_retries = 3 + for attempt in range(max_retries): + probe = self._probe_echo(256, timeout=timeout) + if probe["passed"]: + break + result["test"] = probe + if not probe["passed"]: + result["needed"] = True + return result + + def _phase_upload_probe( + self, start_size: int = 16, max_size: int = 512, timeout: float = 2.0 + ) -> Dict: + """Phase 2: Find the device shell receive buffer limit (upload direction). + + Uses increasing echo commands with x1.4 stepping. + """ + result: Dict = { + "max_working_size": 0, + "failed_size": 0, + "recommended_upload_chunk_size": 0, + "tests": [], + } + + test_size = start_size + max_working = 0 + max_retries = 3 + + while test_size <= max_size: + passed = False + probe = None + for attempt in range(max_retries): + probe = self._probe_echo(test_size, timeout=timeout) + if probe["passed"]: + passed = True + break + result["tests"].append(probe) + + if passed: + max_working = test_size + else: + result["failed_size"] = test_size + break + + test_size = max(test_size + 2, int(test_size * 1.4) // 2 * 2) + + result["max_working_size"] = max_working + if max_working > 0: + result["recommended_upload_chunk_size"] = (max_working * 3) // 4 + if result["recommended_upload_chunk_size"] < 16: + result["recommended_upload_chunk_size"] = max_working + return result + + def _probe_echoback(self, test_size: int, timeout: float = 3.0) -> Dict: + """Send an echoback request and verify the response. + + The device fills its send buffer with a known pattern of `test_size` + bytes and returns it as base64 with CRC. This tests the download + direction (device → PC) throughput. + + Protocol: + Request: fl -c echoback --len {N} + Response: [FLOK] ECHOBACK {N} bytes crc=0x{CRC} data={base64} + + Returns a test_result dict with keys: size, passed, error, + response_time_ms. + """ + cmd = f"-c echoback --len {test_size}" + + test_result: Dict = { + "size": test_size, + "passed": False, + "error": None, + "response_time_ms": 0, + } + + try: + start_time = time.time() + response = self.send_cmd(cmd, timeout=timeout) + elapsed_ms = (time.time() - start_time) * 1000 + test_result["response_time_ms"] = round(elapsed_ms, 2) + + if "[FLOK]" in response: + # Parse: [FLOK] ECHOBACK {N} bytes crc=0x{CRC} data={base64} + data_match = re.search(r"data=(\S+)", response) + crc_match = re.search(r"crc=0x([0-9A-Fa-f]{4})", response) + if data_match and crc_match: + raw = base64.b64decode(data_match.group(1)) + received_crc = int(crc_match.group(1), 16) + expected_crc = crc16(raw) + if len(raw) == test_size and received_crc == expected_crc: + test_result["passed"] = True + elif len(raw) != test_size: + test_result["error"] = ( + f"Size mismatch: expected {test_size}, got {len(raw)}" + ) + else: + test_result["error"] = ( + f"CRC mismatch: expected 0x{expected_crc:04X}, " + f"got 0x{received_crc:04X}" + ) + elif not data_match: + test_result["error"] = "No data in response" + else: + test_result["error"] = "No CRC in response" + else: + if "[FLERR]" in response: + test_result["error"] = "Device returned error" + elif not response: + test_result["error"] = "No response (timeout)" + else: + test_result["error"] = "Incomplete/invalid response" + except Exception as e: + test_result["error"] = str(e) + + return test_result + + def _phase_download_probe( + self, + start_size: int = 256, + max_size: int = 8192, + timeout: float = 3.0, + ) -> Dict: + """Phase 3: Find the max reliable download chunk size. + + Uses the `echoback` command to have the device generate and send + increasing amounts of data. The device reuses its send buffer to + fill with a pattern, so no RAM allocation is needed. + """ + result: Dict = { + "max_working_size": 0, + "failed_size": 0, + "recommended_download_chunk_size": 1024, + "tests": [], + "skipped": False, + } + + max_retries = 3 + + # Quick check: does the device support echoback? + probe = self._probe_echoback(start_size, timeout=timeout) + if probe.get("error") and ( + "Device returned error" in (probe["error"] or "") + or "No response" in (probe["error"] or "") + ): + result["skipped"] = True + result["skip_reason"] = ( + f"Device does not support echoback command: {probe['error']}" + ) + return result + + result["tests"].append(probe) + if not probe["passed"]: + result["failed_size"] = start_size + return result + + max_working = start_size + test_size = max(start_size + 64, int(start_size * 1.5) // 64 * 64) + + while test_size <= max_size: + passed = False + probe = None + for attempt in range(max_retries): + probe = self._probe_echoback(test_size, timeout=timeout) + if probe["passed"]: + passed = True + break + result["tests"].append(probe) + + if passed: + max_working = test_size + else: + result["failed_size"] = test_size + break + + test_size = max(test_size + 64, int(test_size * 1.5) // 64 * 64) + + result["max_working_size"] = max_working + if max_working > 0: + # Download direction is more stable, use 85% safety margin + recommended = (max_working * 85) // 100 // 64 * 64 + if recommended < 128: + recommended = max_working + result["recommended_download_chunk_size"] = recommended + return result + def test_serial_throughput( - self, start_size: int = 16, max_size: int = 4096, timeout: float = 2.0 + self, start_size: int = 16, max_size: int = 512, timeout: float = 2.0 ) -> Dict: - """Test serial port throughput by sending increasing data sizes.""" + """Test serial throughput with 3-phase probing. + + Phase 1: TX Fragment probe - detect if PC→device needs fragmentation. + Phase 2: Upload chunk probe - find device shell buffer limit. + Phase 3: Download chunk probe - find max reliable download size. + + Returns a comprehensive result dict with recommended parameters. + """ if self.device is None or self.device.ser is None: return { "success": False, @@ -738,95 +999,59 @@ def test_serial_throughput( "max_working_size": 0, "failed_size": 0, "tests": [], - "recommended_chunk_size": 16, + "recommended_upload_chunk_size": 16, + "recommended_download_chunk_size": 1024, + "fragment_needed": False, } - results = { + results: Dict = { "success": True, "max_working_size": 0, "failed_size": 0, "tests": [], - "recommended_chunk_size": 16, + "recommended_upload_chunk_size": 16, + "recommended_download_chunk_size": 1024, + "fragment_needed": False, + "phases": {}, } try: - test_size = start_size - max_working = 0 - - while test_size <= max_size: - hex_data = "".join(f"{(i % 256):02X}" for i in range(test_size)) - cmd = f"-c echo -d {hex_data}" - - test_result = { - "size": test_size, - "cmd_len": len(cmd), - "passed": False, - "error": None, - "response_time_ms": 0, - } - - try: - start_time = time.time() - response = self.send_cmd(cmd, timeout=timeout) - elapsed_ms = (time.time() - start_time) * 1000 - test_result["response_time_ms"] = round(elapsed_ms, 2) - - if "[FLOK]" in response: - expected_crc = crc16(hex_data.encode("ascii")) - crc_match = re.search(r"0x([0-9A-Fa-f]{4})", response) - if crc_match: - received_crc = int(crc_match.group(1), 16) - if received_crc == expected_crc: - test_result["passed"] = True - max_working = test_size - else: - test_result["passed"] = False - test_result["error"] = ( - f"CRC mismatch: expected 0x{expected_crc:04X}, " - f"got 0x{received_crc:04X}" - ) - results["failed_size"] = test_size - results["tests"].append(test_result) - break - else: - test_result["passed"] = True - max_working = test_size - else: - test_result["passed"] = False - if "[FLERR]" in response: - test_result["error"] = "Device returned error" - elif not response: - test_result["error"] = "No response (timeout)" - else: - test_result["error"] = "Incomplete/invalid response" - results["failed_size"] = test_size - results["tests"].append(test_result) - break - - except Exception as e: - test_result["passed"] = False - test_result["error"] = str(e) - results["failed_size"] = test_size - results["tests"].append(test_result) - break - - results["tests"].append(test_result) - test_size = max(test_size + 2, int(test_size * 1.4) // 2 * 2) - - results["max_working_size"] = max_working - if max_working > 0: - # Use 75% of max working size as safe chunk size - # Don't force minimum - respect actual device capability - results["recommended_chunk_size"] = (max_working * 3) // 4 - if results["recommended_chunk_size"] < 16: - results["recommended_chunk_size"] = max_working - else: + # Phase 1: TX Fragment probe + frag = self._phase_fragment_probe(timeout=timeout) + results["phases"]["fragment"] = frag + results["fragment_needed"] = frag["needed"] + + # Phase 2: Upload chunk probe (same as old test_serial_throughput) + upload = self._phase_upload_probe( + start_size=start_size, max_size=max_size, timeout=timeout + ) + results["phases"]["upload"] = upload + results["tests"] = upload["tests"] # backward compat + results["max_working_size"] = upload["max_working_size"] + results["failed_size"] = upload["failed_size"] + results["recommended_upload_chunk_size"] = upload[ + "recommended_upload_chunk_size" + ] + + if upload["max_working_size"] == 0: results["success"] = False results["error"] = ( "Serial communication failed at minimum size " f"({start_size} bytes). Check connection and try again." ) - results["recommended_chunk_size"] = 0 + results["recommended_upload_chunk_size"] = 0 + return results + + # Phase 3: Download chunk probe (uses echoback command) + download = self._phase_download_probe( + start_size=256, + max_size=8192, + timeout=max(timeout, 3.0), + ) + results["phases"]["download"] = download + results["recommended_download_chunk_size"] = download[ + "recommended_download_chunk_size" + ] except Exception as e: results["success"] = False diff --git a/Tools/WebServer/docs/serial-params-refactor-proposal.md b/Tools/WebServer/docs/serial-params-refactor-proposal.md new file mode 100644 index 0000000..b5105bc --- /dev/null +++ b/Tools/WebServer/docs/serial-params-refactor-proposal.md @@ -0,0 +1,284 @@ +# 串口传输参数重构方案 + +## 1. 现状分析 + +### 1.1 当前参数命名 + +| 参数名 | UI 标签 | 默认值 | 实际含义 | +|--------|---------|--------|----------| +| `chunk_size` | Chunk Size | 128 | 上传/下载共用的数据块大小 | +| `tx_chunk_size` | TX Chunk | 0 | PC→设备命令分片大小(workaround) | +| `tx_chunk_delay` | TX Delay | 5ms | 命令分片间延迟 | +| `transfer_max_retries` | Max Retries | 10 | 传输失败重试次数 | +| `wakeup_shell_cnt` | Wakeup Count | 3 | 进入 fl 模式前发送换行数 | +| `verify_crc` | Verify CRC | True | 传输后 CRC 校验(强制开启,不可配置) | + +### 1.2 核心问题 + +#### 问题 1:命名混淆 + +- `chunk_size` vs `tx_chunk_size` 容易混淆,用户不清楚两者的区别 +- `tx_chunk_size` 实际是一个 workaround(解决慢速串口驱动丢数据),不是常规传输参数 +- `tx_chunk_delay` 依赖 `tx_chunk_size > 0` 才生效,但 UI 上没有明确关联 + +#### 问题 2:上传/下载共用 chunk_size + +当前 `chunk_size` 同时控制两个方向: + +``` +上传 (PC → 设备): + chunk_size=128 → base64 编码后 ~176 字节 → 加命令前缀 ~220 字节 + 受限于: 设备 shell 接收缓冲区大小(通常 256-512 字节) + +下载 (设备 → PC): + chunk_size=128 → 设备 puts 输出 ~176 字节 base64 + 受限于: PC 串口接收能力(通常远大于设备缓冲区) +``` + +**不对称性**:设备 shell 接收缓冲区通常很小(256-512B),但设备 `puts` 输出能力远大于此。PC 端接收能力几乎无限。因此下载方向可以使用远大于上传方向的 chunk_size,显著提升下载速度。 + +#### 问题 3:test_serial 只测单方向 + +当前 `test_serial_throughput` 只测试 PC→设备方向(发送 echo 命令),其 `recommended_chunk_size` 建议值直接用于全局 `chunk_size`,导致下载方向也被限制在较小的值。 + +### 1.3 数据流分析 + +``` +┌──────────────────────────────────────────────────────────┐ +│ 上传 (PC → 设备) │ +│ │ +│ PC 发送命令: │ +│ "fl -c upload -a 0x{addr} -d {base64} -r 0x{crc}\n" │ +│ ↑ ↑ │ +│ 命令前缀 ~40B chunk_size 经 base64 膨胀 │ +│ │ +│ 总命令长度 ≈ 40 + chunk_size * 4/3 │ +│ 受限于: 设备 shell 接收缓冲区 (CONFIG_NSH_LINELEN) │ +│ │ +│ serial_tx_fragment 在此层再次分片发送 │ +│ (workaround: 某些串口驱动一次写入太多会丢数据) │ +└──────────────────────────────────────────────────────────┘ + +┌──────────────────────────────────────────────────────────┐ +│ 下载 (设备 → PC) │ +│ │ +│ PC 发送请求: │ +│ "fl -c fread --len {chunk_size}\n" (~30B, 很短) │ +│ │ +│ 设备响应: │ +│ "FREAD {n} bytes crc=0x{crc} data={base64}\n" │ +│ ↑ ↑ │ +│ 响应前缀 ~30B chunk_size 经 base64 │ +│ │ +│ 总响应长度 ≈ 30 + chunk_size * 4/3 │ +│ 受限于: 设备 puts 输出能力 (通常无限制) │ +│ PC 串口接收能力 (通常无限制) │ +└──────────────────────────────────────────────────────────┘ +``` + +--- + +## 2. 重构方案 + +### 2.1 参数重命名 + +| 旧名称 | 新名称 | 新 UI 标签 | 说明 | +|--------|--------|-----------|------| +| `chunk_size` | `upload_chunk_size` | Upload Chunk | 上传方向数据块大小 | +| _(新增)_ | `download_chunk_size` | Download Chunk | 下载方向数据块大小 | +| `tx_chunk_size` | `serial_tx_fragment_size` | TX Fragment | 串口发送分片大小(0=禁用) | +| `tx_chunk_delay` | `serial_tx_fragment_delay` | TX Fragment Delay | 分片间延迟 | +| `transfer_max_retries` | `transfer_max_retries` | Max Retries | 不变 | +| `wakeup_shell_cnt` | `wakeup_shell_cnt` | Wakeup Count | 不变 | +| `verify_crc` | _(删除)_ | — | CRC 校验强制开启,移除配置项 | + +**命名原则**: +- `upload/download` 明确方向,消除歧义 +- `fragment` 区别于 `chunk`:chunk 是应用层数据分块,fragment 是传输层命令分片(workaround) +- 不做旧参数迁移,用户重新执行一次串口连通性测试即可自动获得最优参数 + +### 2.2 参数默认值 + +| 参数 | 默认值 | 范围 | 说明 | +|------|--------|------|------| +| `upload_chunk_size` | 128 | 16-512, step=16 | 受限于设备 shell 缓冲区 | +| `download_chunk_size` | 1024 | 128-8192, step=128 | 设备 puts 通常无限制 | +| `serial_tx_fragment_size` | 0 | 0-256, step=8 | 0=禁用 | +| `serial_tx_fragment_delay` | 2ms | 1-100ms, step=1 | 仅 fragment > 0 时生效 | + +### 2.3 串口连通性测试(test_serial)重构 + +将现有的单向测试扩展为三阶段双向探测,集成在现有的 "串口连通性测试" 功能中: + +``` +┌─────────────────────────────────────────────────────┐ +│ 串口连通性测试 (test_serial) 流程 │ +├─────────────────────────────────────────────────────┤ +│ │ +│ Phase 1: TX Fragment 探测 │ +│ ───────────────────── │ +│ 目的: 检测 PC→设备 是否需要分片发送 │ +│ │ +│ 1. 发送一条中等长度命令 (256B echo) │ +│ 2. 如果成功 → fragment 不需要, 设为 0 │ +│ 3. 如果失败 → 启用 fragment, 二分法找最大可靠值 │ +│ - 从 128 开始, 逐步缩小直到稳定 │ +│ - 设置 fragment_delay = 2ms (保守值) │ +│ │ +│ Phase 2: Upload Chunk 探测 │ +│ ───────────────────── │ +│ 目的: 找到设备 shell 接收缓冲区的安全上限 │ +│ (与现有 test_serial_throughput 逻辑一致) │ +│ │ +│ 1. 从 start_size (16B) 开始 │ +│ 2. 生成 test_size 字节数据, 构造 echo 命令 │ +│ 3. 发送并验证 CRC │ +│ 4. 逐步增大 (×1.4), 直到失败 │ +│ 5. upload_chunk_size = last_success × 75% │ +│ │ +│ Phase 3: Download Chunk 探测 (新增) │ +│ ───────────────────────── │ +│ 目的: 找到设备→PC 方向的最大可靠传输块 │ +│ │ +│ 1. 在设备 RAM 中写入测试数据 (使用 Phase 2 的参数) │ +│ 2. 从 start_size (256B) 开始 │ +│ 3. 发送 "fl -c read --addr {addr} --len {size}" │ +│ 4. 验证返回数据的 CRC │ +│ 5. 逐步增大 (×1.5), 直到失败或达到上限 (8192B) │ +│ 6. download_chunk_size = last_success × 85% │ +│ (下载方向更稳定, 安全余量可以更小) │ +│ │ +└─────────────────────────────────────────────────────┘ +``` + +### 2.4 测试结果弹窗展示 + +测试完成后,通过弹窗(Modal Dialog)展示结果,用户确认后自动应用: + +``` +┌─────────────── 串口连通性测试结果 ───────────────┐ +│ │ +│ ✅ 测试完成 │ +│ │ +│ ┌─ 推荐参数 ─────────────────────────────────┐ │ +│ │ Upload Chunk: 128 B │ │ +│ │ Download Chunk: 2048 B │ │ +│ │ TX Fragment: disabled │ │ +│ │ TX Fragment Delay: 2 ms │ │ +│ └────────────────────────────────────────────┘ │ +│ │ +│ ┌─ 测试详情 ─────────────────────────────────┐ │ +│ │ Upload 最大成功: 170 B │ │ +│ │ Upload 首次失败: 238 B │ │ +│ │ Download 最大成功: 2412 B │ │ +│ │ Download 首次失败: 3618 B │ │ +│ │ 测试耗时: 3.2 s │ │ +│ └────────────────────────────────────────────┘ │ +│ │ +│ [应用参数] [取消] │ +│ │ +└─────────────────────────────────────────────────┘ +``` + +点击 "应用参数" 后自动写入配置并持久化。 + +--- + +## 3. CLI / MCP 适配 + +### 3.1 CLI 参数更新 + +`fpb_cli.py` 参数直接使用新名称,不保留旧名称: + +```bash +# 连通性测试(自动探测所有参数) +fpb_cli.py test-serial + +# 手动指定参数 +fpb_cli.py inject --upload-chunk-size 128 +fpb_cli.py download --download-chunk-size 2048 +fpb_cli.py inject --serial-tx-fragment-size 64 --serial-tx-fragment-delay 0.002 +``` + +### 3.2 MCP 工具更新 + +`test_serial` MCP 工具返回值扩展为包含双向结果: + +```json +{ + "upload_chunk_size": 128, + "download_chunk_size": 2048, + "serial_tx_fragment_size": 0, + "serial_tx_fragment_delay": 0.002, + "upload_max_working_size": 170, + "upload_failed_size": 238, + "download_max_working_size": 2412, + "download_failed_size": 3618, + "tests": { + "fragment": [...], + "upload": [...], + "download": [...] + } +} +``` + +各 MCP 工具(`inject`、`mem_read`、`mem_write`、`mem_dump` 等)不接受单次调用的 chunk 参数覆盖,统一使用全局串口配置。串口参数通过以下方式设置: + +1. 连接设备后执行 `test_serial`,自动探测并应用最优参数 +2. 通过 `connect` 工具的参数手动指定全局默认值 + +| 全局配置参数 | 影响的 MCP 工具 | +|-------------|----------------| +| `upload_chunk_size` | `inject`, `mem_write` | +| `download_chunk_size` | `mem_read`, `mem_dump`, 文件下载 | +| `serial_tx_fragment_size` | 所有串口通信 | +| `serial_tx_fragment_delay` | 所有串口通信 | + +--- + +## 4. 实施计划 + +### Phase 1: 参数重命名 + 拆分 + +1. `core/config_schema.py`: 重命名参数,新增 `download_chunk_size` +2. `core/state.py`: 删除旧参数,使用新默认值 +3. `core/serial_protocol.py`: 区分 upload/download chunk_size,重命名 tx_chunk 引用 +4. `core/file_transfer.py`: 构造时接收两个 chunk_size +5. 前端 UI: 更新配置面板标签 + +### Phase 2: 串口连通性测试扩展 + +1. `core/serial_protocol.py`: 扩展 `test_serial_throughput()` 为三阶段 +2. `app/routes/connection.py`: 更新 API 返回值 +3. 前端: 测试完成后弹窗展示结果,点击 "应用参数" 写入配置 + +### Phase 3: CLI / MCP 适配 + +1. `cli/fpb_cli.py`: 更新 CLI 参数名 +2. `fpb_mcp_server.py`: 更新 MCP 工具参数和返回值 +3. 更新所有相关测试 + +### 涉及文件 + +| 文件 | 改动内容 | +|------|---------| +| `core/config_schema.py` | 重命名参数,新增 download_chunk_size | +| `core/state.py` | 删除旧参数 | +| `core/serial_protocol.py` | 拆分 chunk_size,扩展 test_serial,重命名 tx_chunk | +| `core/file_transfer.py` | 接收两个 chunk_size | +| `cli/fpb_cli.py` | 更新 CLI 参数名 | +| `fpb_mcp_server.py` | 更新 MCP 工具参数 | +| `app/routes/connection.py` | 更新 test_serial API | +| `static/js/features/connection.js` | 弹窗展示测试结果 | +| `tests/test_*.py` | 更新相关测试 | + +--- + +## 5. 预期收益 + +| 指标 | 改进前 | 改进后 | +|------|--------|--------| +| 下载速度 | chunk_size=128 → ~10 KB/s | download_chunk=2048 → ~80 KB/s | +| 参数可理解性 | 3 个易混淆参数 | 语义清晰的命名 | +| 配置复杂度 | 手动调参 | 连通性测试自动探测 + 弹窗一键应用 | +| 上传速度 | 不变 | 不变(受限于设备缓冲区) | diff --git a/Tools/WebServer/fpb_mcp_server.py b/Tools/WebServer/fpb_mcp_server.py index 3799745..4b8bb4e 100644 --- a/Tools/WebServer/fpb_mcp_server.py +++ b/Tools/WebServer/fpb_mcp_server.py @@ -294,14 +294,19 @@ def test_serial( start_size: int = 16, max_size: int = 4096, ) -> dict: - """Test serial throughput to find optimal transfer parameters. + """Test serial throughput with 3-phase probing to find optimal parameters. - Probes the device's receive buffer limit using increasing packet sizes. + Phase 1: TX Fragment probe - detect if PC→device needs fragmentation. + Phase 2: Upload chunk probe - find device shell buffer limit. + Phase 3: Download chunk probe - find max reliable download size. + + Returns recommended_upload_chunk_size, recommended_download_chunk_size, + fragment_needed, and per-phase test details. Args: port: Serial port (uses existing connection if omitted) - start_size: Starting test size in bytes (default: 16) - max_size: Maximum test size in bytes (default: 4096) + start_size: Starting test size in bytes for upload probe (default: 16) + max_size: Maximum test size in bytes for upload probe (default: 4096) """ cli = _get_cli(port=port) return _capture_cli_output(cli.test_serial, start_size, max_size) diff --git a/Tools/WebServer/static/js/core/config-schema.js b/Tools/WebServer/static/js/core/config-schema.js index a852f68..8df950a 100644 --- a/Tools/WebServer/static/js/core/config-schema.js +++ b/Tools/WebServer/static/js/core/config-schema.js @@ -460,12 +460,6 @@ function onConfigItemChange(key) { return; // Handler will save config } break; - case 'verify_crc': - if (typeof onVerifyCrcChange === 'function') { - onVerifyCrcChange(); - return; // Handler will save config - } - break; case 'log_file_enabled': if (typeof onLogFileEnabledChange === 'function') { onLogFileEnabledChange(); @@ -478,6 +472,12 @@ function onConfigItemChange(key) { return; // Handler will save config } break; + case 'verify_crc': + if (typeof onVerifyCrcChange === 'function') { + onVerifyCrcChange(); + return; // Handler will save config + } + break; case 'enable_decompile': if (typeof onEnableDecompileChange === 'function') { onEnableDecompileChange(); diff --git a/Tools/WebServer/static/js/features/config.js b/Tools/WebServer/static/js/features/config.js index bc303e1..5bfd164 100644 --- a/Tools/WebServer/static/js/features/config.js +++ b/Tools/WebServer/static/js/features/config.js @@ -249,10 +249,14 @@ async function saveConfigLegacy(silent = false) { document.getElementById('compileCommandsPath')?.value || '', toolchain_path: document.getElementById('toolchainPath')?.value || '', patch_mode: document.getElementById('patchMode')?.value || 'trampoline', - chunk_size: parseInt(document.getElementById('chunkSize')?.value) || 128, - tx_chunk_size: parseInt(document.getElementById('txChunkSize')?.value) || 0, - tx_chunk_delay: - (parseInt(document.getElementById('txChunkDelay')?.value) || 5) / 1000, + upload_chunk_size: + parseInt(document.getElementById('chunkSize')?.value) || 128, + download_chunk_size: + parseInt(document.getElementById('downloadChunkSize')?.value) || 1024, + serial_tx_fragment_size: + parseInt(document.getElementById('txChunkSize')?.value) || 0, + serial_tx_fragment_delay: + (parseInt(document.getElementById('txChunkDelay')?.value) || 2) / 1000, transfer_max_retries: parseInt(document.getElementById('transferMaxRetries')?.value) || 3, watch_dirs: getWatchDirs(), @@ -260,7 +264,6 @@ async function saveConfigLegacy(silent = false) { enable_decompile: document.getElementById('enableDecompile')?.checked || false, ghidra_path: document.getElementById('ghidraPath')?.value || '', - verify_crc: document.getElementById('verifyCrc')?.checked ?? true, }; try { diff --git a/Tools/WebServer/static/js/features/fpb.js b/Tools/WebServer/static/js/features/fpb.js index ad84176..4f443fa 100644 --- a/Tools/WebServer/static/js/features/fpb.js +++ b/Tools/WebServer/static/js/features/fpb.js @@ -45,7 +45,7 @@ async function fpbTestSerial() { return; } - log.info('Starting serial throughput test (x1.4 stepping)...'); + log.info('Starting 3-phase serial throughput test...'); try { const res = await fetch('/api/fpb/test-serial', { @@ -53,7 +53,7 @@ async function fpbTestSerial() { headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ start_size: 16, - max_size: 4096, + max_size: 512, timeout: 2.0, }), }); @@ -62,8 +62,15 @@ async function fpbTestSerial() { if (data.success) { state.throughputTested = true; writeToOutput('─'.repeat(50), 'info'); - log.info('Serial Throughput Test Results:'); + /* Phase 1: Fragment probe */ + const fragNeeded = data.fragment_needed; + log.info( + `Phase 1 - TX Fragment: ${fragNeeded ? 'needed' : 'not needed'}`, + ); + + /* Phase 2: Upload probe */ + log.info('Phase 2 - Upload Chunk Probe:'); if (data.tests && data.tests.length > 0) { data.tests.forEach((test) => { const status = test.passed ? '✓' : '✗'; @@ -79,39 +86,59 @@ async function fpbTestSerial() { }); } + /* Phase 3: Download probe */ + const dlPhase = data.phases?.download; + if (dlPhase && !dlPhase.skipped) { + log.info('Phase 3 - Download Chunk Probe:'); + if (dlPhase.tests && dlPhase.tests.length > 0) { + dlPhase.tests.forEach((test) => { + const status = test.passed ? '✓' : '✗'; + const timeStr = test.response_time_ms + ? ` (${test.response_time_ms}ms)` + : ''; + const errStr = test.error ? ` - ${test.error}` : ''; + writeToOutput( + ` ${status} ${test.size} bytes${timeStr}${errStr}`, + test.passed ? 'success' : 'error', + ); + }); + } + } else if (dlPhase?.skipped) { + log.warn(`Phase 3 skipped: ${dlPhase.skip_reason || 'unknown'}`); + } + writeToOutput('─'.repeat(50), 'info'); - log.success(`Max working size: ${data.max_working_size} bytes`); - if (data.failed_size > 0) { - log.warn(`Failed at: ${data.failed_size} bytes`); + + if (data.max_working_size !== undefined) { + log.info(`Max working size: ${data.max_working_size} bytes`); + } + if (data.failed_size) { + log.warn(`Failed at size: ${data.failed_size} bytes`); } - log.success( - `Recommended chunk size: ${data.recommended_chunk_size} bytes`, - ); - /* Ask user if they want to apply recommended chunk size */ - const recommendedSize = data.recommended_chunk_size; - const currentSize = - parseInt(document.getElementById('chunkSize')?.value) || 128; + const recUpload = data.recommended_upload_chunk_size; + const recDownload = data.recommended_download_chunk_size; + log.success(`Recommended upload chunk: ${recUpload} bytes`); + log.success(`Recommended download chunk: ${recDownload} bytes`); const apply = confirm( - `${t('messages.serial_test_complete', 'Serial Throughput Test Complete!')}\n\n` + - `${t('messages.current_chunk_size', 'Current chunk size')}: ${currentSize} ${t('device.bytes', 'bytes')}\n` + - `${t('messages.recommended_chunk_size', 'Recommended chunk size')}: ${recommendedSize} ${t('device.bytes', 'bytes')}\n\n` + - t( - 'messages.apply_recommended_size', - 'Do you want to apply the recommended chunk size?', - ), + `✅ ${t('messages.serial_test_complete', 'Test Complete')}\n\n` + + `Upload: ${recUpload}B, Download: ${recDownload}B\n\n` + + t('messages.apply_recommended_size', 'Apply recommended parameters?'), ); if (apply) { - const chunkInput = document.getElementById('chunkSize'); - if (chunkInput) { - chunkInput.value = recommendedSize; - await saveConfig(true); - log.success(`Chunk size updated to ${recommendedSize} bytes`); - } + /* Apply both upload and download chunk sizes */ + const uploadInput = document.getElementById('uploadChunkSize'); + const downloadInput = document.getElementById('downloadChunkSize'); + if (uploadInput) uploadInput.value = recUpload; + if (downloadInput) downloadInput.value = recDownload; + await saveConfig(true); + log.success( + `Parameters applied: upload=${recUpload}B, download=${recDownload}B`, + ); } else { - log.info(`Chunk size unchanged (${currentSize} bytes)`); + log.info('Parameters unchanged'); } } else { log.error(`Test failed: ${data.error || 'Unknown error'}`); diff --git a/Tools/WebServer/static/js/locales/en.js b/Tools/WebServer/static/js/locales/en.js index f1c735d..0f375f5 100644 --- a/Tools/WebServer/static/js/locales/en.js +++ b/Tools/WebServer/static/js/locales/en.js @@ -32,12 +32,12 @@ window.i18nResources['en'] = { patch_mode: 'Inject Mode', auto_compile: 'Auto Inject on Save', watch_dirs: 'Watch Directories', - chunk_size: 'Chunk Size', - tx_chunk_size: 'TX Chunk', - tx_chunk_delay: 'TX Delay', + upload_chunk_size: 'Upload Chunk Size', + download_chunk_size: 'Download Chunk Size', + serial_tx_fragment_size: 'TX Fragment', + serial_tx_fragment_delay: 'TX Fragment Delay', transfer_max_retries: 'Max Retries', wakeup_shell_cnt: 'Wakeup Count', - verify_crc: 'Verify CRC after Transfer', log_file_path: 'Log Path', log_file_enabled: 'Record Serial Logs', serial_echo_enabled: 'Serial TX Echo', @@ -254,11 +254,8 @@ window.i18nResources['en'] = { 'Please clear some Slots in DEVICE INFO panel and try again.', auto_generated_patch_preview: 'Auto-generated patch (read-only preview)', // Serial test - serial_test_complete: 'Serial Throughput Test Complete!', - current_chunk_size: 'Current chunk size', - recommended_chunk_size: 'Recommended chunk size', - apply_recommended_size: - 'Do you want to apply the recommended chunk size?', + serial_test_complete: 'Test Complete', + apply_recommended_size: 'Apply recommended parameters?', // ELF watcher elf_file_changed: 'ELF file "{{fileName}}" has changed.', reload_symbols_now: 'Reload symbols now?', @@ -408,16 +405,18 @@ window.i18nResources['en'] = { auto_compile: 'Automatically compile and inject when source files are saved', watch_dirs: 'Directories to watch for file changes', - chunk_size: + upload_chunk_size: 'Size of each uploaded data block. Smaller values are more stable but slower.', - tx_chunk_size: - 'TX chunk size for serial commands (bytes). 0 = disabled. Workaround for slow serial drivers.', - tx_chunk_delay: 'Delay between TX chunks. Only used when TX Chunk > 0.', + download_chunk_size: + 'Size of each downloaded data block. Larger values are faster.', + serial_tx_fragment_size: + 'TX fragment size for serial commands (bytes). 0 = disabled. Workaround for slow serial drivers.', + serial_tx_fragment_delay: + 'Delay between TX fragments. Only used when TX Fragment > 0.', transfer_max_retries: 'Maximum retry attempts for file transfer when CRC mismatch occurs.', wakeup_shell_cnt: 'Number of newlines to send before entering fl mode to wake up shell.', - verify_crc: 'Verify file integrity with CRC after transfer', log_file_path: 'Path to save serial logs', log_file_enabled: 'Record serial communication logs to file', serial_echo_enabled: 'Echo TX commands to SERIAL panel (for debugging)', diff --git a/Tools/WebServer/static/js/locales/zh-CN.js b/Tools/WebServer/static/js/locales/zh-CN.js index 472b31b..8be311a 100644 --- a/Tools/WebServer/static/js/locales/zh-CN.js +++ b/Tools/WebServer/static/js/locales/zh-CN.js @@ -32,12 +32,12 @@ window.i18nResources['zh-CN'] = { patch_mode: '注入模式', auto_compile: '保存时自动注入', watch_dirs: '监视目录', - chunk_size: '块大小', - tx_chunk_size: '发送块大小', - tx_chunk_delay: '发送延迟', + upload_chunk_size: '上传块大小', + download_chunk_size: '下载块大小', + serial_tx_fragment_size: '发送分片大小', + serial_tx_fragment_delay: '发送分片延迟', transfer_max_retries: '最大重试次数', wakeup_shell_cnt: '唤醒次数', - verify_crc: '传输后校验 CRC', log_file_path: '日志路径', log_file_enabled: '记录串口日志', serial_echo_enabled: '串口发送回显', @@ -249,10 +249,8 @@ window.i18nResources['zh-CN'] = { clear_slots_hint: '请在设备信息面板中清除一些槽位后重试。', auto_generated_patch_preview: '自动生成的补丁(只读预览)', // 串口测试 - serial_test_complete: '串口吞吐测试完成!', - current_chunk_size: '当前块大小', - recommended_chunk_size: '推荐块大小', - apply_recommended_size: '是否应用推荐的块大小?', + serial_test_complete: '测试完成', + apply_recommended_size: '是否应用推荐参数?', // ELF 监视器 elf_file_changed: 'ELF 文件 "{{fileName}}" 已更改。', reload_symbols_now: '立即重新加载符号?', @@ -398,13 +396,14 @@ window.i18nResources['zh-CN'] = { 'Trampoline: 使用代码跳板(仅 FPB v1)\nDebugMonitor: 使用调试监视器异常(FPB v1/v2)\nDirect: 直接代码替换(仅 FPB v1)\n注意: FPB v2 仅支持 DebugMonitor 模式,会自动切换', auto_compile: '源文件保存时自动编译并注入', watch_dirs: '监视文件变化的目录', - chunk_size: '每个上传数据块的大小。较小的值更稳定但更慢。', - tx_chunk_size: - '串口命令的发送块大小(字节)。0 = 禁用。用于解决慢速串口驱动问题。', - tx_chunk_delay: '发送块之间的延迟。仅在发送块大小 > 0 时使用。', + upload_chunk_size: '每个上传数据块的大小。较小的值更稳定但更慢。', + download_chunk_size: '每个下载数据块的大小。较大的值更快。', + serial_tx_fragment_size: + '串口命令的发送分片大小(字节)。0 = 禁用。用于解决慢速串口驱动问题。', + serial_tx_fragment_delay: + '发送分片之间的延迟。仅在发送分片大小 > 0 时使用。', transfer_max_retries: 'CRC 校验失败时的最大重试次数。', wakeup_shell_cnt: '进入 fl 模式前发送换行符的次数,用于唤醒 shell。', - verify_crc: '传输后使用 CRC 校验文件完整性', log_file_path: '串口日志保存路径', log_file_enabled: '将串口通信日志记录到文件', serial_echo_enabled: '在串口面板回显发送的命令(用于调试)', diff --git a/Tools/WebServer/static/js/locales/zh-TW.js b/Tools/WebServer/static/js/locales/zh-TW.js index 17cd542..c13ed6f 100644 --- a/Tools/WebServer/static/js/locales/zh-TW.js +++ b/Tools/WebServer/static/js/locales/zh-TW.js @@ -32,12 +32,12 @@ window.i18nResources['zh-TW'] = { patch_mode: '注入模式', auto_compile: '儲存時自動注入', watch_dirs: '監視目錄', - chunk_size: '區塊大小', - tx_chunk_size: '傳送區塊大小', - tx_chunk_delay: '傳送延遲', + upload_chunk_size: '上傳區塊大小', + download_chunk_size: '下載區塊大小', + serial_tx_fragment_size: '傳送分片大小', + serial_tx_fragment_delay: '傳送分片延遲', transfer_max_retries: '最大重試次數', wakeup_shell_cnt: '喚醒次數', - verify_crc: '傳輸後驗證 CRC', log_file_path: '日誌路徑', log_file_enabled: '記錄串列埠日誌', serial_echo_enabled: '串列埠傳送回顯', @@ -249,10 +249,8 @@ window.i18nResources['zh-TW'] = { clear_slots_hint: '請在裝置資訊面板中清除一些槽位後重試。', auto_generated_patch_preview: '自動產生的補丁(唯讀預覽)', // 序列埠測試 - serial_test_complete: '序列埠吞吐測試完成!', - current_chunk_size: '目前區塊大小', - recommended_chunk_size: '建議區塊大小', - apply_recommended_size: '是否套用建議的區塊大小?', + serial_test_complete: '測試完成', + apply_recommended_size: '是否套用建議參數?', // ELF 監視器 elf_file_changed: 'ELF 檔案 "{{fileName}}" 已變更。', reload_symbols_now: '立即重新載入符號?', @@ -398,13 +396,14 @@ window.i18nResources['zh-TW'] = { 'Trampoline: 使用程式碼跳板(僅 FPB v1)\nDebugMonitor: 使用除錯監視器例外(FPB v1/v2)\nDirect: 直接程式碼替換(僅 FPB v1)\n注意: FPB v2 僅支援 DebugMonitor 模式,會自動切換', auto_compile: '原始檔儲存時自動編譯並注入', watch_dirs: '監視檔案變化的目錄', - chunk_size: '每個上傳資料區塊的大小。較小的值更穩定但更慢。', - tx_chunk_size: - '串列埠命令的傳送區塊大小(位元組)。0 = 停用。用於解決慢速串列埠驅動問題。', - tx_chunk_delay: '傳送區塊之間的延遲。僅在傳送區塊大小 > 0 時使用。', + upload_chunk_size: '每個上傳資料區塊的大小。較小的值更穩定但更慢。', + download_chunk_size: '每個下載資料區塊的大小。較大的值更快。', + serial_tx_fragment_size: + '串列埠命令的傳送分片大小(位元組)。0 = 停用。用於解決慢速串列埠驅動問題。', + serial_tx_fragment_delay: + '傳送分片之間的延遲。僅在傳送分片大小 > 0 時使用。', transfer_max_retries: 'CRC 驗證失敗時的最大重試次數。', wakeup_shell_cnt: '進入 fl 模式前傳送換行符的次數,用於喚醒 shell。', - verify_crc: '傳輸後使用 CRC 驗證檔案完整性', log_file_path: '串列埠日誌儲存路徑', log_file_enabled: '將串列埠通訊日誌記錄到檔案', serial_echo_enabled: '在串列埠面板回顯傳送的命令(用於除錯)', diff --git a/Tools/WebServer/tests/js/test_config.js b/Tools/WebServer/tests/js/test_config.js index b7f5672..2d419f0 100644 --- a/Tools/WebServer/tests/js/test_config.js +++ b/Tools/WebServer/tests/js/test_config.js @@ -168,27 +168,27 @@ module.exports = function (w) { w.FPBState.toolTerminal = null; }); - it('sets chunk_size from config', async () => { + it('sets upload_chunk_size from config', async () => { w.FPBState.toolTerminal = new MockTerminal(); - setFetchResponse('/api/config', { chunk_size: 256 }); + setFetchResponse('/api/config', { upload_chunk_size: 256 }); setFetchResponse('/api/status', { connected: false }); await w.loadConfig(); assertEqual(typeof w.loadConfig, 'function'); w.FPBState.toolTerminal = null; }); - it('sets tx_chunk_size from config', async () => { + it('sets serial_tx_fragment_size from config', async () => { w.FPBState.toolTerminal = new MockTerminal(); - setFetchResponse('/api/config', { tx_chunk_size: 64 }); + setFetchResponse('/api/config', { serial_tx_fragment_size: 64 }); setFetchResponse('/api/status', { connected: false }); await w.loadConfig(); assertEqual(typeof w.loadConfig, 'function'); w.FPBState.toolTerminal = null; }); - it('converts tx_chunk_delay to milliseconds', async () => { + it('converts serial_tx_fragment_delay to milliseconds', async () => { w.FPBState.toolTerminal = new MockTerminal(); - setFetchResponse('/api/config', { tx_chunk_delay: 0.01 }); + setFetchResponse('/api/config', { serial_tx_fragment_delay: 0.01 }); setFetchResponse('/api/status', { connected: false }); await w.loadConfig(); assertEqual(typeof w.loadConfig, 'function'); @@ -258,7 +258,7 @@ module.exports = function (w) { schema: [ { key: 'elf_path', config_type: 'file_path', default: '' }, { - key: 'chunk_size', + key: 'upload_chunk_size', config_type: 'number', default: 128, ui_multiplier: 1, @@ -337,7 +337,7 @@ module.exports = function (w) { schema: [ { key: 'elf_path', config_type: 'file_path', default: '' }, { - key: 'chunk_size', + key: 'upload_chunk_size', config_type: 'number', default: 128, ui_multiplier: 1, diff --git a/Tools/WebServer/tests/js/test_config_schema.js b/Tools/WebServer/tests/js/test_config_schema.js index 41ade93..e2157da 100644 --- a/Tools/WebServer/tests/js/test_config_schema.js +++ b/Tools/WebServer/tests/js/test_config_schema.js @@ -254,7 +254,7 @@ module.exports = function (w) { setFetchResponse('/api/config/schema', { schema: [ { - key: 'chunk_size', + key: 'upload_chunk_size', config_type: 'number', label: 'Chunk Size', group: 'transfer', @@ -270,7 +270,7 @@ module.exports = function (w) { group_order: ['transfer'], }); await w.renderConfigPanel('configContainer'); - assertContains(container.innerHTML, 'chunkSize'); + assertContains(container.innerHTML, 'uploadChunkSize'); assertContains(container.innerHTML, 'type="number"'); assertContains(container.innerHTML, 'bytes'); }); @@ -482,7 +482,7 @@ module.exports = function (w) { setFetchResponse('/api/config/schema', { schema: [ { - key: 'tx_chunk_delay', + key: 'serial_tx_fragment_delay', config_type: 'number', default: 0.005, ui_multiplier: 1000, @@ -491,8 +491,10 @@ module.exports = function (w) { groups: {}, group_order: [], }); - setFetchResponse('/api/config', { tx_chunk_delay: 0.01 }); - const el = browserGlobals.document.getElementById('txChunkDelay'); + setFetchResponse('/api/config', { serial_tx_fragment_delay: 0.01 }); + const el = browserGlobals.document.getElementById( + 'serialTxFragmentDelay', + ); await w.loadConfigValues(); assertEqual(parseInt(el.value), 10); }); @@ -515,7 +517,7 @@ module.exports = function (w) { setFetchResponse('/api/config/schema', { schema: [ { - key: 'chunk_size', + key: 'upload_chunk_size', config_type: 'number', default: 128, ui_multiplier: 1, @@ -524,8 +526,8 @@ module.exports = function (w) { groups: {}, group_order: [], }); - setFetchResponse('/api/config', { chunk_size: null }); - const el = browserGlobals.document.getElementById('chunkSize'); + setFetchResponse('/api/config', { upload_chunk_size: null }); + const el = browserGlobals.document.getElementById('uploadChunkSize'); await w.loadConfigValues(); assertEqual(parseInt(el.value), 128); }); diff --git a/Tools/WebServer/tests/js/test_features.js b/Tools/WebServer/tests/js/test_features.js index 5730f42..4fa34e5 100644 --- a/Tools/WebServer/tests/js/test_features.js +++ b/Tools/WebServer/tests/js/test_features.js @@ -98,7 +98,7 @@ module.exports = function (w) { success: true, tests: [], max_working_size: 1024, - recommended_chunk_size: 128, + recommended_upload_chunk_size: 128, }); await w.fpbTestSerial(); // Check side effect instead of fetch calls @@ -119,7 +119,7 @@ module.exports = function (w) { ], max_working_size: 16, failed_size: 32, - recommended_chunk_size: 16, + recommended_upload_chunk_size: 16, }); await w.fpbTestSerial(); assertTrue( @@ -2517,7 +2517,7 @@ module.exports = function (w) { success: true, tests: [], max_working_size: 1024, - recommended_chunk_size: 128, + recommended_upload_chunk_size: 128, }); await w.fpbTestSerial(); // Check side effect instead of fetch calls @@ -2538,7 +2538,7 @@ module.exports = function (w) { ], max_working_size: 16, failed_size: 32, - recommended_chunk_size: 16, + recommended_upload_chunk_size: 16, }); await w.fpbTestSerial(); assertTrue( @@ -2579,7 +2579,7 @@ module.exports = function (w) { { size: 256, passed: true, response_time_ms: 45, cmd_len: 264 }, ], max_working_size: 256, - recommended_chunk_size: 256, + recommended_upload_chunk_size: 256, }); await w.fpbTestSerial(); assertTrue( @@ -2604,7 +2604,7 @@ module.exports = function (w) { tests: [{ size: 512, passed: true }], max_working_size: 512, failed_size: 0, - recommended_chunk_size: 512, + recommended_upload_chunk_size: 512, }); await w.fpbTestSerial(); assertTrue( @@ -2618,24 +2618,24 @@ module.exports = function (w) { w.FPBState.isConnected = true; const mockTerm = new MockTerminal(); w.FPBState.toolTerminal = mockTerm; - browserGlobals.document.getElementById('chunkSize').value = '128'; + browserGlobals.document.getElementById('uploadChunkSize').value = '128'; browserGlobals.confirm = () => true; setFetchResponse('/api/fpb/test-serial', { success: true, tests: [], max_working_size: 512, - recommended_chunk_size: 384, + recommended_upload_chunk_size: 384, }); setFetchResponse('/api/config', { success: true }); await w.fpbTestSerial(); // value can be number or string depending on mock implementation assertEqual( - String(browserGlobals.document.getElementById('chunkSize').value), + String(browserGlobals.document.getElementById('uploadChunkSize').value), '384', ); assertTrue( mockTerm._writes.some( - (wr) => wr.msg && wr.msg.includes('Chunk size updated'), + (wr) => wr.msg && wr.msg.includes('Parameters applied'), ), ); w.FPBState.isConnected = false; @@ -2647,7 +2647,7 @@ module.exports = function (w) { w.FPBState.isConnected = true; const mockTerm = new MockTerminal(); w.FPBState.toolTerminal = mockTerm; - browserGlobals.document.getElementById('chunkSize').value = '128'; + browserGlobals.document.getElementById('uploadChunkSize').value = '128'; // Override global confirm to return false const origConfirm = global.confirm; global.confirm = () => false; @@ -2655,11 +2655,11 @@ module.exports = function (w) { success: true, tests: [], max_working_size: 512, - recommended_chunk_size: 384, + recommended_upload_chunk_size: 384, }); await w.fpbTestSerial(); assertEqual( - browserGlobals.document.getElementById('chunkSize').value, + browserGlobals.document.getElementById('uploadChunkSize').value, '128', ); assertTrue( diff --git a/Tools/WebServer/tests/test_config_schema.py b/Tools/WebServer/tests/test_config_schema.py index ad946a0..6b91800 100644 --- a/Tools/WebServer/tests/test_config_schema.py +++ b/Tools/WebServer/tests/test_config_schema.py @@ -179,9 +179,8 @@ def test_get_config_defaults(self): self.assertEqual(len(defaults), len(CONFIG_SCHEMA)) # Check some known defaults - self.assertEqual(defaults["chunk_size"], 128) + self.assertEqual(defaults["upload_chunk_size"], 128) self.assertEqual(defaults["patch_mode"], "trampoline") - self.assertEqual(defaults["verify_crc"], True) def test_get_schema_by_key_found(self): """Test get_schema_by_key with existing key.""" @@ -292,15 +291,26 @@ def test_patch_mode_config(self): self.assertEqual(item.default, "trampoline") self.assertEqual(len(item.options), 3) - def test_chunk_size_config(self): - """Test chunk_size configuration.""" - item = get_schema_by_key("chunk_size") + def test_upload_chunk_size_config(self): + """Test upload_chunk_size configuration.""" + item = get_schema_by_key("upload_chunk_size") self.assertIsNotNone(item) self.assertEqual(item.group, ConfigGroup.TRANSFER) self.assertEqual(item.config_type, ConfigType.NUMBER) self.assertEqual(item.default, 128) self.assertEqual(item.min_value, 16) - self.assertEqual(item.max_value, 1024) + self.assertEqual(item.max_value, 512) + self.assertEqual(item.unit, "Bytes") + + def test_download_chunk_size_config(self): + """Test download_chunk_size configuration.""" + item = get_schema_by_key("download_chunk_size") + self.assertIsNotNone(item) + self.assertEqual(item.group, ConfigGroup.TRANSFER) + self.assertEqual(item.config_type, ConfigType.NUMBER) + self.assertEqual(item.default, 1024) + self.assertEqual(item.min_value, 128) + self.assertEqual(item.max_value, 8192) self.assertEqual(item.unit, "Bytes") def test_ghidra_path_config(self): @@ -329,9 +339,9 @@ def test_watch_dirs_config(self): # watch_dirs is now independent of auto_compile (no depends_on) self.assertIsNone(item.depends_on) - def test_tx_chunk_delay_ui_multiplier(self): - """Test tx_chunk_delay has UI multiplier for ms display.""" - item = get_schema_by_key("tx_chunk_delay") + def test_serial_tx_fragment_delay_ui_multiplier(self): + """Test serial_tx_fragment_delay has UI multiplier for ms display.""" + item = get_schema_by_key("serial_tx_fragment_delay") self.assertIsNotNone(item) self.assertEqual(item.ui_multiplier, 1000) self.assertEqual(item.unit, "ms") diff --git a/Tools/WebServer/tests/test_file_transfer.py b/Tools/WebServer/tests/test_file_transfer.py index faa2463..addb1ac 100644 --- a/Tools/WebServer/tests/test_file_transfer.py +++ b/Tools/WebServer/tests/test_file_transfer.py @@ -72,14 +72,14 @@ def setUp(self): def test_init(self): """Test FileTransfer initialization.""" - ft = FileTransfer(self.mock_fpb, chunk_size=128) + ft = FileTransfer(self.mock_fpb, upload_chunk_size=128, download_chunk_size=128) self.assertEqual(ft.fpb, self.mock_fpb) - self.assertEqual(ft.chunk_size, 128) + self.assertEqual(ft.upload_chunk_size, 128) def test_init_default_chunk_size(self): """Test default chunk size.""" ft = FileTransfer(self.mock_fpb) - self.assertEqual(ft.chunk_size, FileTransfer.DEFAULT_CHUNK_SIZE) + self.assertEqual(ft.upload_chunk_size, FileTransfer.DEFAULT_UPLOAD_CHUNK_SIZE) class TestFileTransferBasicOps(unittest.TestCase): @@ -89,7 +89,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_fopen_success(self): """Test successful file open.""" @@ -163,7 +165,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_fread_success(self): """Test successful file read.""" @@ -241,7 +245,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_fstat_success(self): """Test successful file stat.""" @@ -287,7 +293,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_flist_success(self): """Test successful directory listing.""" @@ -349,7 +357,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_fremove_success(self): """Test successful file removal.""" @@ -403,7 +413,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_fcrc_success(self): """Test successful fcrc.""" @@ -453,7 +465,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_upload_success(self): """Test successful file upload with CRC verification.""" @@ -476,15 +490,17 @@ def progress_cb(uploaded, total): self.assertEqual(len(progress_calls), 2) def test_upload_success_without_crc_verify(self): - """Test successful file upload without CRC verification.""" + """Test successful file upload with CRC verification warning.""" + data = b"x" * 300 + expected_crc = crc16(data) self.mock_fpb.send_fl_cmd.side_effect = [ (True, "[FLOK] FOPEN /test.txt mode=w"), (True, "[FLOK] FWRITE 256 bytes"), (True, "[FLOK] FWRITE 44 bytes"), + (True, f"[FLOK] FCRC size=300 crc=0x{expected_crc:04X}"), (True, "[FLOK] FCLOSE"), ] - data = b"x" * 300 - success, msg = self.ft.upload(data, "/test.txt", verify_crc=False) + success, msg = self.ft.upload(data, "/test.txt") self.assertTrue(success) def test_upload_crc_mismatch(self): @@ -588,12 +604,15 @@ def test_upload_exception_with_close_failure(self): def test_upload_no_progress_callback(self): """Test upload without progress callback.""" + data = b"test" + expected_crc = crc16(data) self.mock_fpb.send_fl_cmd.side_effect = [ (True, "[FLOK] FOPEN /test.txt mode=w"), (True, "[FLOK] FWRITE 4 bytes"), + (True, f"[FLOK] FCRC size=4 crc=0x{expected_crc:04X}"), (True, "[FLOK] FCLOSE"), ] - success, msg = self.ft.upload(b"test", "/test.txt", verify_crc=False) + success, msg = self.ft.upload(data, "/test.txt") self.assertTrue(success) @@ -604,7 +623,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_download_success(self): """Test successful file download with CRC verification.""" @@ -635,7 +656,7 @@ def progress_cb(downloaded, total): self.assertGreaterEqual(len(progress_calls), 1) def test_download_success_without_crc_verify(self): - """Test successful file download without CRC verification.""" + """Test successful file download with CRC verification.""" test_data = b"hello world" b64_data = base64.b64encode(test_data).decode("ascii") crc = crc16(test_data) @@ -648,9 +669,10 @@ def test_download_success_without_crc_verify(self): f"[FLOK] FREAD {len(test_data)} bytes crc=0x{crc:04X} data={b64_data}", ), (True, "[FLOK] FREAD 0 bytes EOF"), + (True, f"[FLOK] FCRC size={len(test_data)} crc=0x{crc:04X}"), (True, "[FLOK] FCLOSE"), ] - success, data, msg = self.ft.download("/test.txt", verify_crc=False) + success, data, msg = self.ft.download("/test.txt") self.assertTrue(success) self.assertEqual(data, test_data) @@ -782,9 +804,10 @@ def test_download_no_progress_callback(self): f"[FLOK] FREAD {len(test_data)} bytes crc=0x{crc:04X} data={b64_data}", ), (True, "[FLOK] FREAD 0 bytes EOF"), + (True, f"[FLOK] FCRC size={len(test_data)} crc=0x{crc:04X}"), (True, "[FLOK] FCLOSE"), ] - success, data, msg = self.ft.download("/test.txt", verify_crc=False) + success, data, msg = self.ft.download("/test.txt") self.assertTrue(success) self.assertEqual(data, test_data) @@ -795,22 +818,24 @@ class TestFileTransferIntegration(unittest.TestCase): def test_upload_download_roundtrip(self): """Test upload then download returns same data.""" mock_fpb = Mock() - ft = FileTransfer(mock_fpb, chunk_size=256) + ft = FileTransfer(mock_fpb, upload_chunk_size=256, download_chunk_size=256) original_data = b"Test file content for roundtrip" b64_data = base64.b64encode(original_data).decode("ascii") crc = crc16(original_data) - # Upload (without CRC verify for simplicity) + # Upload + upload_crc = crc16(original_data) mock_fpb.send_fl_cmd.side_effect = [ (True, "[FLOK] FOPEN /test.txt mode=w"), (True, f"[FLOK] FWRITE {len(original_data)} bytes"), + (True, f"[FLOK] FCRC size={len(original_data)} crc=0x{upload_crc:04X}"), (True, "[FLOK] FCLOSE"), ] - success, _ = ft.upload(original_data, "/test.txt", verify_crc=False) + success, _ = ft.upload(original_data, "/test.txt") self.assertTrue(success) - # Download (without CRC verify for simplicity) + # Download mock_fpb.send_fl_cmd.side_effect = [ ( True, @@ -822,9 +847,10 @@ def test_upload_download_roundtrip(self): f"[FLOK] FREAD {len(original_data)} bytes crc=0x{crc:04X} data={b64_data}", ), (True, "[FLOK] FREAD 0 bytes EOF"), + (True, f"[FLOK] FCRC size={len(original_data)} crc=0x{crc:04X}"), (True, "[FLOK] FCLOSE"), ] - success, downloaded_data, _ = ft.download("/test.txt", verify_crc=False) + success, downloaded_data, _ = ft.download("/test.txt") self.assertTrue(success) self.assertEqual(downloaded_data, original_data) @@ -835,7 +861,9 @@ class TestSendCmd(unittest.TestCase): def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_send_cmd_with_timeout(self): """Test _send_cmd passes timeout correctly.""" @@ -868,7 +896,9 @@ class TestFileTransferRetry(unittest.TestCase): def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) self.ft.max_retries = 3 def test_fwrite_retry_on_crc_mismatch(self): @@ -987,8 +1017,8 @@ def test_fread_max_retries_exceeded(self): self.assertFalse(success) self.assertEqual(self.mock_fpb.send_fl_cmd.call_count, 4) # 1 + 3 retries - def test_fread_uses_chunk_size_as_default(self): - """Test fread uses chunk_size as default read size.""" + def test_fread_uses_download_chunk_size_as_default(self): + """Test fread uses download_chunk_size as default read size.""" test_data = b"hello" b64_data = base64.b64encode(test_data).decode("ascii") crc = crc16(test_data) @@ -999,7 +1029,7 @@ def test_fread_uses_chunk_size_as_default(self): ) self.ft.fread() # No size argument call_args = self.mock_fpb.send_fl_cmd.call_args[0][0] - self.assertIn("--len 256", call_args) # chunk_size is 256 + self.assertIn("--len 256", call_args) # download_chunk_size is 256 def test_fwrite_custom_max_retries(self): """Test fwrite with custom max_retries.""" @@ -1026,7 +1056,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] Test")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_initial_stats(self): """Test initial stats are all zero.""" @@ -1184,7 +1216,9 @@ def setUp(self): """Set up mock FPB and FileTransfer.""" self.mock_fpb = Mock() self.mock_fpb.send_fl_cmd = Mock(return_value=(True, "[FLOK] FSEEK")) - self.ft = FileTransfer(self.mock_fpb, chunk_size=256) + self.ft = FileTransfer( + self.mock_fpb, upload_chunk_size=256, download_chunk_size=256 + ) def test_fseek_success(self): """Test successful fseek.""" diff --git a/Tools/WebServer/tests/test_fpb_cli.py b/Tools/WebServer/tests/test_fpb_cli.py index c750420..896b525 100644 --- a/Tools/WebServer/tests/test_fpb_cli.py +++ b/Tools/WebServer/tests/test_fpb_cli.py @@ -35,9 +35,9 @@ def test_init_defaults(self): self.assertEqual(state.inject_base, 0x20001000) self.assertIsNone(state.cached_slots) self.assertEqual(state.slot_update_id, 0) - self.assertEqual(state.chunk_size, 128) - self.assertEqual(state.tx_chunk_size, 0) - self.assertEqual(state.tx_chunk_delay, 0.005) + self.assertEqual(state.upload_chunk_size, 128) + self.assertEqual(state.serial_tx_fragment_size, 0) + self.assertEqual(state.serial_tx_fragment_delay, 0.002) @unittest.skipIf(not HAS_SERIAL, "pyserial not installed") @patch("cli.fpb_cli.serial.Serial") @@ -125,8 +125,8 @@ def test_init_with_paths(self): def test_init_with_tx_chunk_params(self): """Test initialization with TX chunk parameters""" cli = FPBCLI(tx_chunk_size=16, tx_chunk_delay=0.01) - self.assertEqual(cli._device_state.tx_chunk_size, 16) - self.assertEqual(cli._device_state.tx_chunk_delay, 0.01) + self.assertEqual(cli._device_state.serial_tx_fragment_size, 16) + self.assertEqual(cli._device_state.serial_tx_fragment_delay, 0.01) cli.cleanup() @unittest.skipIf(not HAS_SERIAL, "pyserial not installed") @@ -679,7 +679,10 @@ def test_test_serial_success(self): {"size": 256, "passed": True}, {"size": 512, "passed": False, "error": "timeout"}, ], - "recommended_chunk_size": 192, + "recommended_upload_chunk_size": 192, + "recommended_download_chunk_size": 2048, + "fragment_needed": False, + "phases": {}, } f = io.StringIO() @@ -701,7 +704,10 @@ def test_test_serial_all_pass(self): "max_working_size": 4096, "failed_size": 0, "tests": [{"size": 16, "passed": True}], - "recommended_chunk_size": 3072, + "recommended_upload_chunk_size": 3072, + "recommended_download_chunk_size": 4096, + "fragment_needed": False, + "phases": {}, } f = io.StringIO() @@ -1845,7 +1851,7 @@ def test_init(self): device = DeviceState() self.assertIsNone(device.ser) self.assertFalse(device.connected) - self.assertEqual(device.chunk_size, 128) + self.assertEqual(device.upload_chunk_size, 128) def test_disconnect(self): """Test disconnect""" diff --git a/Tools/WebServer/tests/test_fpb_inject.py b/Tools/WebServer/tests/test_fpb_inject.py index 7c5d4bc..39e1137 100644 --- a/Tools/WebServer/tests/test_fpb_inject.py +++ b/Tools/WebServer/tests/test_fpb_inject.py @@ -578,7 +578,7 @@ def setUp(self): self.device = DeviceState() self.device.ser = Mock() self.device.ser.isOpen.return_value = True - self.device.chunk_size = 48 # Set fixed chunk_size for testing + self.device.upload_chunk_size = 48 # Set fixed upload_chunk_size for testing self.fpb = FPBInject(self.device) def test_send_cmd_write_error(self): @@ -2022,7 +2022,9 @@ def test_test_serial_recommended_size(self): # Recommended should be 75% of max working size if result["max_working_size"] > 0: expected_min = 64 # Minimum recommended size - self.assertGreaterEqual(result["recommended_chunk_size"], expected_min) + self.assertGreaterEqual( + result["recommended_upload_chunk_size"], expected_min + ) def test_test_serial_exception_handling(self): """Test serial throughput exception handling""" diff --git a/Tools/WebServer/tests/test_fpb_routes.py b/Tools/WebServer/tests/test_fpb_routes.py index 6fb99b1..2290078 100644 --- a/Tools/WebServer/tests/test_fpb_routes.py +++ b/Tools/WebServer/tests/test_fpb_routes.py @@ -108,7 +108,10 @@ def test_serial_test_success(self, mock_helpers): "success": True, "max_working_size": 256, "failed_size": 512, - "recommended_chunk_size": 192, + "recommended_upload_chunk_size": 192, + "recommended_download_chunk_size": 2048, + "fragment_needed": False, + "phases": {}, } mock_helpers.return_value = make_mock_helpers(mock_fpb) diff --git a/Tools/WebServer/tests/test_i18n.py b/Tools/WebServer/tests/test_i18n.py index bd8bad3..e9b3783 100644 --- a/Tools/WebServer/tests/test_i18n.py +++ b/Tools/WebServer/tests/test_i18n.py @@ -1259,8 +1259,6 @@ class TestDevicePopupMessages(unittest.TestCase): "clear_slots_hint", # Serial test "serial_test_complete", - "current_chunk_size", - "recommended_chunk_size", "apply_recommended_size", # ELF watcher "elf_file_changed", diff --git a/Tools/WebServer/tests/test_routes.py b/Tools/WebServer/tests/test_routes.py index b713570..7e54736 100644 --- a/Tools/WebServer/tests/test_routes.py +++ b/Tools/WebServer/tests/test_routes.py @@ -229,9 +229,10 @@ def test_api_config(self): "port": "/dev/ttyTest", "baudrate": 9600, "patch_mode": "debugmon", - "chunk_size": 128, - "tx_chunk_size": 16, - "tx_chunk_delay": 0.01, + "upload_chunk_size": 128, + "download_chunk_size": 1024, + "serial_tx_fragment_size": 16, + "serial_tx_fragment_delay": 0.01, } response = self.client.post("/api/config", json=payload) data = json.loads(response.data) @@ -240,9 +241,9 @@ def test_api_config(self): self.assertEqual(state.device.port, "/dev/ttyTest") self.assertEqual(state.device.baudrate, 9600) self.assertEqual(state.device.patch_mode, "debugmon") - self.assertEqual(state.device.chunk_size, 128) - self.assertEqual(state.device.tx_chunk_size, 16) - self.assertEqual(state.device.tx_chunk_delay, 0.01) + self.assertEqual(state.device.upload_chunk_size, 128) + self.assertEqual(state.device.serial_tx_fragment_size, 16) + self.assertEqual(state.device.serial_tx_fragment_delay, 0.01) def test_patch_template(self): """Test getting patch template""" @@ -267,7 +268,7 @@ def test_get_status_all_fields(self): "compile_commands_path", "watch_dirs", "patch_mode", - "chunk_size", + "upload_chunk_size", "auto_connect", "auto_compile", "inject_active", @@ -354,17 +355,17 @@ def test_update_patch_mode(self): self.assertTrue(data["success"]) self.assertEqual(state.device.patch_mode, "jump") - def test_update_chunk_size(self): - """Test updating chunk size""" + def test_update_upload_chunk_size(self): + """Test updating upload chunk size""" response = self.client.post( "/api/config", - data=json.dumps({"chunk_size": 512}), + data=json.dumps({"upload_chunk_size": 512}), content_type="application/json", ) data = json.loads(response.data) self.assertTrue(data["success"]) - self.assertEqual(state.device.chunk_size, 512) + self.assertEqual(state.device.upload_chunk_size, 512) def test_update_transfer_max_retries(self): """Test updating transfer max retries""" @@ -453,26 +454,26 @@ def test_get_config_includes_ghidra_path(self): self.assertIn("ghidra_path", data) self.assertEqual(data["ghidra_path"], "/home/user/ghidra") - def test_update_verify_crc(self): - """Test updating verify_crc setting""" + def test_update_transfer_max_retries_setting(self): + """Test updating transfer_max_retries setting (replaces old verify_crc test)""" response = self.client.post( "/api/config", - data=json.dumps({"verify_crc": False}), + data=json.dumps({"transfer_max_retries": 20}), content_type="application/json", ) data = json.loads(response.data) self.assertTrue(data["success"]) - self.assertFalse(state.device.verify_crc) + self.assertEqual(state.device.transfer_max_retries, 20) - def test_get_config_includes_verify_crc(self): - """Test GET config includes verify_crc""" - state.device.verify_crc = True + def test_get_config_includes_upload_chunk_size(self): + """Test GET config includes upload_chunk_size""" + state.device.upload_chunk_size = 256 response = self.client.get("/api/config") data = json.loads(response.data) - self.assertIn("verify_crc", data) - self.assertTrue(data["verify_crc"]) + self.assertIn("upload_chunk_size", data) + self.assertEqual(data["upload_chunk_size"], 256) def test_update_enable_decompile(self): """Test updating enable_decompile setting""" @@ -544,7 +545,18 @@ def test_serial_success(self, mock_get_fpb): {"size": 256, "passed": True, "response_time_ms": 20.1}, {"size": 512, "passed": False, "error": "No response (timeout)"}, ], - "recommended_chunk_size": 192, + "recommended_upload_chunk_size": 192, + "recommended_download_chunk_size": 2048, + "fragment_needed": False, + "phases": { + "fragment": {"needed": False}, + "upload": {"max_working_size": 256, "failed_size": 512}, + "download": { + "max_working_size": 2412, + "failed_size": 0, + "skipped": False, + }, + }, } mock_fpb.enter_fl_mode = Mock() mock_fpb.exit_fl_mode = Mock() @@ -560,7 +572,8 @@ def test_serial_success(self, mock_get_fpb): self.assertTrue(data["success"]) self.assertEqual(data["max_working_size"], 256) self.assertEqual(data["failed_size"], 512) - self.assertEqual(data["recommended_chunk_size"], 192) + self.assertEqual(data["recommended_upload_chunk_size"], 192) + self.assertIn("recommended_download_chunk_size", data) self.assertEqual(len(data["tests"]), 6) @patch("routes.get_fpb_inject") @@ -575,7 +588,10 @@ def test_serial_all_pass(self, mock_get_fpb): {"size": 16, "passed": True}, {"size": 32, "passed": True}, ], - "recommended_chunk_size": 3072, + "recommended_upload_chunk_size": 3072, + "recommended_download_chunk_size": 4096, + "fragment_needed": False, + "phases": {}, } mock_fpb.enter_fl_mode = Mock() mock_fpb.exit_fl_mode = Mock() @@ -601,7 +617,9 @@ def test_serial_not_connected(self, mock_get_fpb): "max_working_size": 0, "failed_size": 0, "tests": [], - "recommended_chunk_size": 64, + "recommended_upload_chunk_size": 64, + "recommended_download_chunk_size": 1024, + "fragment_needed": False, } mock_fpb.enter_fl_mode = Mock() mock_fpb.exit_fl_mode = Mock() diff --git a/Tools/WebServer/tests/test_serial_protocol.py b/Tools/WebServer/tests/test_serial_protocol.py index 5d51697..d2b97d9 100644 --- a/Tools/WebServer/tests/test_serial_protocol.py +++ b/Tools/WebServer/tests/test_serial_protocol.py @@ -247,7 +247,7 @@ class TestReadMemory(unittest.TestCase): def setUp(self): self.device = MagicMock() - self.device.chunk_size = 128 + self.device.download_chunk_size = 128 self.protocol = FPBProtocol(self.device) def test_single_chunk(self): @@ -275,7 +275,7 @@ def test_multi_chunk(self): import struct from utils.crc import crc16_update - self.device.chunk_size = 4 + self.device.download_chunk_size = 4 base_addr = 0x20000000 def mock_send(cmd, timeout=0.5): @@ -343,7 +343,7 @@ class TestWriteMemory(unittest.TestCase): def setUp(self): self.device = MagicMock() - self.device.chunk_size = 128 + self.device.upload_chunk_size = 128 self.protocol = FPBProtocol(self.device) def test_single_chunk(self): @@ -359,7 +359,7 @@ def test_single_chunk(self): def test_multi_chunk(self): """Write data spanning multiple chunks.""" - self.device.chunk_size = 4 + self.device.upload_chunk_size = 4 self.protocol.send_cmd = MagicMock(return_value="[FLOK] WRITE 4 bytes") ok, msg = self.protocol.write_memory(0x20000000, b"\xaa" * 10) @@ -400,7 +400,8 @@ class TestEnhancedCRC(unittest.TestCase): def setUp(self): self.device = MagicMock() - self.device.chunk_size = 128 + self.device.upload_chunk_size = 128 + self.device.download_chunk_size = 128 self.protocol = FPBProtocol(self.device) def test_write_crc_includes_addr_and_len(self): diff --git a/Tools/WebServer/tests/test_state.py b/Tools/WebServer/tests/test_state.py index 2bff338..968acba 100644 --- a/Tools/WebServer/tests/test_state.py +++ b/Tools/WebServer/tests/test_state.py @@ -27,7 +27,7 @@ def test_init_defaults(self): self.assertIsNone(device.ser) self.assertEqual(device.baudrate, 115200) self.assertEqual(device.patch_mode, "trampoline") - self.assertEqual(device.chunk_size, 128) + self.assertEqual(device.upload_chunk_size, 128) def test_add_tool_log(self): """Test adding tool log""" @@ -223,14 +223,14 @@ def test_round_trip_all_keys(self): device1.compile_commands_path = "/path/to/compile_commands.json" device1.watch_dirs = ["/dir1", "/dir2"] device1.patch_mode = "debugmon" - device1.chunk_size = 256 - device1.tx_chunk_size = 32 - device1.tx_chunk_delay = 0.01 + device1.upload_chunk_size = 256 + device1.download_chunk_size = 2048 + device1.serial_tx_fragment_size = 32 + device1.serial_tx_fragment_delay = 0.01 device1.auto_connect = True device1.auto_compile = True device1.enable_decompile = True device1.ghidra_path = "/opt/ghidra_11.0" - device1.verify_crc = False device1.transfer_max_retries = 5 device1.log_file_enabled = True device1.log_file_path = "/tmp/test.log" diff --git a/Tools/WebServer/tests/test_symbols_routes.py b/Tools/WebServer/tests/test_symbols_routes.py index 7b716bd..5b55232 100644 --- a/Tools/WebServer/tests/test_symbols_routes.py +++ b/Tools/WebServer/tests/test_symbols_routes.py @@ -1228,14 +1228,14 @@ def test_small_size(self): def test_large_size(self): from app.routes.symbols import _dynamic_timeout - state.device.chunk_size = 128 + state.device.download_chunk_size = 128 # 4096 bytes = 32 chunks * 3s = 96s self.assertEqual(_dynamic_timeout(4096), 96.0) def test_custom_chunk_size(self): from app.routes.symbols import _dynamic_timeout - state.device.chunk_size = 256 + state.device.download_chunk_size = 256 # 1024 bytes = 4 chunks * 3s = 12s self.assertEqual(_dynamic_timeout(1024), 12.0) diff --git a/Tools/WebServer/tests/test_transfer_extended.py b/Tools/WebServer/tests/test_transfer_extended.py index 8674b95..9e3b283 100644 --- a/Tools/WebServer/tests/test_transfer_extended.py +++ b/Tools/WebServer/tests/test_transfer_extended.py @@ -34,8 +34,8 @@ def setUp(self): self.mock_fpb.exit_fl_mode = Mock() self.mock_device = Mock() - self.mock_device.chunk_size = 64 - self.mock_device.verify_crc = False + self.mock_device.upload_chunk_size = 64 + self.mock_device.download_chunk_size = 64 self.mock_device.transfer_max_retries = 3 self.state_patcher = patch("app.routes.transfer.state") @@ -85,10 +85,16 @@ class TestUploadRoute(TransferTestBase): def test_upload_success(self, mock_get_ft): """Test successful file upload.""" mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fopen.return_value = (True, "OK") mock_ft.fwrite.return_value = (True, "OK") mock_ft.fclose.return_value = (True, "OK") + mock_ft.fcrc.return_value = ( + False, + 0, + 0, + ) # CRC check fails gracefully (warning) mock_ft.get_stats.return_value = {"packet_loss_rate": "0.0"} mock_ft.reset_stats = Mock() mock_ft.fpb = self.mock_fpb @@ -114,7 +120,8 @@ def test_upload_success(self, mock_get_ft): def test_upload_fopen_failure(self, mock_get_ft): """Test upload when fopen fails.""" mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fopen.return_value = (False, "Permission denied") mock_ft.fpb = self.mock_fpb mock_get_ft.return_value = mock_ft @@ -138,7 +145,8 @@ def test_upload_fopen_failure(self, mock_get_ft): def test_upload_fwrite_failure(self, mock_get_ft): """Test upload when fwrite fails.""" mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fopen.return_value = (True, "OK") mock_ft.fwrite.return_value = (False, "Write error") mock_ft.fclose.return_value = (True, "OK") @@ -164,10 +172,10 @@ def test_upload_fwrite_failure(self, mock_get_ft): @patch("app.routes.transfer._get_file_transfer") def test_upload_with_crc_success(self, mock_get_ft): """Test upload with CRC verification success.""" - self.mock_device.verify_crc = True mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fopen.return_value = (True, "OK") mock_ft.fwrite.return_value = (True, "OK") mock_ft.fclose.return_value = (True, "OK") @@ -200,10 +208,10 @@ def test_upload_with_crc_success(self, mock_get_ft): @patch("app.routes.transfer._get_file_transfer") def test_upload_crc_mismatch(self, mock_get_ft): """Test upload with CRC mismatch.""" - self.mock_device.verify_crc = True mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fopen.return_value = (True, "OK") mock_ft.fwrite.return_value = (True, "OK") mock_ft.fclose.return_value = (True, "OK") @@ -231,10 +239,10 @@ def test_upload_crc_mismatch(self, mock_get_ft): @patch("app.routes.transfer._get_file_transfer") def test_upload_size_mismatch(self, mock_get_ft): """Test upload with size mismatch.""" - self.mock_device.verify_crc = True mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fopen.return_value = (True, "OK") mock_ft.fwrite.return_value = (True, "OK") mock_ft.fclose.return_value = (True, "OK") @@ -262,10 +270,10 @@ def test_upload_size_mismatch(self, mock_get_ft): @patch("app.routes.transfer._get_file_transfer") def test_upload_crc_check_failure(self, mock_get_ft): """Test upload when CRC check itself fails.""" - self.mock_device.verify_crc = True mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fopen.return_value = (True, "OK") mock_ft.fwrite.return_value = (True, "OK") mock_ft.fclose.return_value = (True, "OK") @@ -294,7 +302,8 @@ def test_upload_crc_check_failure(self, mock_get_ft): def test_upload_cancel(self, mock_get_ft): """Test upload cancellation.""" mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 def fopen_sets_cancel(*args, **kwargs): _transfer_cancelled.set() @@ -350,13 +359,19 @@ class TestDownloadRoute(TransferTestBase): def test_download_success(self, mock_get_ft): """Test successful file download.""" mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fstat.return_value = (True, {"size": 11, "type": "file"}) mock_ft.fopen.return_value = (True, "OK") mock_ft.fread.side_effect = [ (True, b"hello world", ""), (True, b"", "EOF"), ] + mock_ft.fcrc.return_value = ( + False, + 0, + 0, + ) # CRC check fails gracefully (warning) mock_ft.fclose.return_value = (True, "OK") mock_ft.get_stats.return_value = {"packet_loss_rate": "0.0"} mock_ft.reset_stats = Mock() @@ -459,7 +474,8 @@ def test_download_fopen_failure(self, mock_get_ft): def test_download_fread_failure(self, mock_get_ft): """Test download when fread fails.""" mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fstat.return_value = (True, {"size": 100, "type": "file"}) mock_ft.fopen.return_value = (True, "OK") mock_ft.fread.return_value = (False, b"", "Read error") @@ -481,7 +497,6 @@ def test_download_fread_failure(self, mock_get_ft): @patch("app.routes.transfer._get_file_transfer") def test_download_with_crc_success(self, mock_get_ft): """Test download with CRC verification.""" - self.mock_device.verify_crc = True file_content = b"test data here" from utils.crc import crc16 @@ -489,7 +504,8 @@ def test_download_with_crc_success(self, mock_get_ft): expected_crc = crc16(file_content) mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fstat.return_value = (True, {"size": len(file_content), "type": "file"}) mock_ft.fopen.return_value = (True, "OK") mock_ft.fread.side_effect = [ @@ -516,10 +532,10 @@ def test_download_with_crc_success(self, mock_get_ft): @patch("app.routes.transfer._get_file_transfer") def test_download_crc_mismatch(self, mock_get_ft): """Test download with CRC mismatch.""" - self.mock_device.verify_crc = True mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fstat.return_value = (True, {"size": 10, "type": "file"}) mock_ft.fopen.return_value = (True, "OK") mock_ft.fread.side_effect = [ @@ -548,7 +564,8 @@ def test_download_crc_mismatch(self, mock_get_ft): def test_download_cancel(self, mock_get_ft): """Test download cancellation.""" mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fstat.return_value = (True, {"size": 1000, "type": "file"}) def fopen_sets_cancel(*args, **kwargs): @@ -591,10 +608,10 @@ def test_download_worker_not_running(self): @patch("app.routes.transfer._get_file_transfer") def test_download_crc_check_failure(self, mock_get_ft): """Test download when CRC check itself fails.""" - self.mock_device.verify_crc = True mock_ft = Mock() - mock_ft.chunk_size = 64 + mock_ft.upload_chunk_size = 64 + mock_ft.download_chunk_size = 64 mock_ft.fstat.return_value = (True, {"size": 5, "type": "file"}) mock_ft.fopen.return_value = (True, "OK") mock_ft.fread.side_effect = [ diff --git a/Tools/WebServer/tests/test_transfer_routes.py b/Tools/WebServer/tests/test_transfer_routes.py index 9cc933f..8ea1a76 100644 --- a/Tools/WebServer/tests/test_transfer_routes.py +++ b/Tools/WebServer/tests/test_transfer_routes.py @@ -40,7 +40,8 @@ def setUp(self): # Create mock device self.mock_device = Mock() - self.mock_device.chunk_size = 256 + self.mock_device.upload_chunk_size = 256 + self.mock_device.download_chunk_size = 256 self.mock_device.add_tool_log = Mock() # Create mock log functions @@ -482,13 +483,14 @@ def test_get_file_transfer(self): with patch("app.routes.transfer._get_helpers") as mock_helpers: mock_helpers.return_value = (Mock(), lambda: mock_fpb) with patch("app.routes.transfer.state") as mock_state: - mock_state.device.chunk_size = 512 + mock_state.device.upload_chunk_size = 512 + mock_state.device.download_chunk_size = 512 mock_state.device.transfer_max_retries = 5 from app.routes.transfer import _get_file_transfer ft = _get_file_transfer() self.assertEqual(ft.fpb, mock_fpb) - self.assertEqual(ft.chunk_size, 512) + self.assertEqual(ft.upload_chunk_size, 512) self.assertEqual(ft.max_retries, 5) def test_get_file_transfer_default_chunk_size(self): @@ -497,12 +499,13 @@ def test_get_file_transfer_default_chunk_size(self): with patch("app.routes.transfer._get_helpers") as mock_helpers: mock_helpers.return_value = (Mock(), lambda: mock_fpb) with patch("app.routes.transfer.state") as mock_state: - mock_state.device.chunk_size = None + mock_state.device.upload_chunk_size = None + mock_state.device.download_chunk_size = None mock_state.device.transfer_max_retries = 10 from app.routes.transfer import _get_file_transfer ft = _get_file_transfer() - self.assertEqual(ft.chunk_size, 256) + self.assertEqual(ft.upload_chunk_size, 128) def test_get_file_transfer_default_max_retries(self): """Test _get_file_transfer with default max_retries when not set.""" @@ -510,7 +513,8 @@ def test_get_file_transfer_default_max_retries(self): with patch("app.routes.transfer._get_helpers") as mock_helpers: mock_helpers.return_value = (Mock(), lambda: mock_fpb) with patch("app.routes.transfer.state") as mock_state: - mock_state.device.chunk_size = 256 + mock_state.device.upload_chunk_size = 256 + mock_state.device.download_chunk_size = 256 # Simulate missing transfer_max_retries attribute del mock_state.device.transfer_max_retries from app.routes.transfer import _get_file_transfer