diff --git a/applications/debug/unit_tests/resources/unit_tests/compress/hsstream.in.bin b/applications/debug/unit_tests/resources/unit_tests/compress/hsstream.in.bin new file mode 100644 index 000000000..c3bcf5fda Binary files /dev/null and b/applications/debug/unit_tests/resources/unit_tests/compress/hsstream.in.bin differ diff --git a/applications/debug/unit_tests/resources/unit_tests/compress/test.ths b/applications/debug/unit_tests/resources/unit_tests/compress/test.ths new file mode 100644 index 000000000..ab7d34af8 Binary files /dev/null and b/applications/debug/unit_tests/resources/unit_tests/compress/test.ths differ diff --git a/applications/debug/unit_tests/tests/compress/compress_test.c b/applications/debug/unit_tests/tests/compress/compress_test.c index 15984083d..0f2bd7a03 100644 --- a/applications/debug/unit_tests/tests/compress/compress_test.c +++ b/applications/debug/unit_tests/tests/compress/compress_test.c @@ -1,6 +1,9 @@ #include "../test.h" // IWYU pragma: keep #include +#include +#include +#include #include #include @@ -56,7 +59,7 @@ static void compress_test_reference_comp_decomp() { furi_record_close(RECORD_STORAGE); uint8_t* temp_buffer = malloc(1024); - Compress* comp = compress_alloc(1024); + Compress* comp = compress_alloc(CompressTypeHeatshrink, &compress_config_heatshrink_default); size_t encoded_size = 0; mu_assert( @@ -98,7 +101,7 @@ static void compress_test_random_comp_decomp() { // We only fill half of the buffer with random data, so if anything goes wrong, there's no overflow static const size_t src_data_size = src_buffer_size / 2; - Compress* comp = compress_alloc(src_buffer_size); + Compress* comp = compress_alloc(CompressTypeHeatshrink, &compress_config_heatshrink_default); uint8_t* src_buff = malloc(src_buffer_size); uint8_t* encoded_buff = malloc(encoded_buffer_size); uint8_t* decoded_buff = malloc(src_buffer_size); @@ -146,9 +149,200 @@ static void compress_test_random_comp_decomp() { compress_free(comp); } +static int32_t hs_unpacker_file_read(void* context, uint8_t* buffer, size_t size) { + File* file = (File*)context; + return storage_file_read(file, buffer, size); +} + +static int32_t hs_unpacker_file_write(void* context, uint8_t* buffer, size_t size) { + File* file = (File*)context; + return storage_file_write(file, buffer, size); +} +/* +Source file was generated with: +```python3 +import random, string +random.seed(1337) +with open("hsstream.out.bin", "wb") as f: + for c in random.choices(string.printable, k=1024): + for _ in range(random.randint(1, 10)): + f.write(c.encode()) +``` + +It was compressed with heatshrink using the following command: +`python3 -m heatshrink2 compress -w 9 -l 4 hsstream.out.bin hsstream.in.bin` +*/ + +#define HSSTREAM_IN COMPRESS_UNIT_TESTS_PATH("hsstream.in.bin") +#define HSSTREAM_OUT COMPRESS_UNIT_TESTS_PATH("hsstream.out.bin") + +static void compress_test_heatshrink_stream() { + Storage* api = furi_record_open(RECORD_STORAGE); + File* comp_file = storage_file_alloc(api); + File* dest_file = storage_file_alloc(api); + + CompressConfigHeatshrink config = { + .window_sz2 = 9, + .lookahead_sz2 = 4, + .input_buffer_sz = 128, + }; + Compress* compress = compress_alloc(CompressTypeHeatshrink, &config); + + do { + storage_simply_remove(api, HSSTREAM_OUT); + + mu_assert( + storage_file_open(comp_file, HSSTREAM_IN, FSAM_READ, FSOM_OPEN_EXISTING), + "Failed to open compressed file"); + + mu_assert( + storage_file_open(dest_file, HSSTREAM_OUT, FSAM_WRITE, FSOM_OPEN_ALWAYS), + "Failed to open decompressed file"); + + mu_assert( + compress_decode_streamed( + compress, hs_unpacker_file_read, comp_file, hs_unpacker_file_write, dest_file), + "Decompression failed"); + + storage_file_close(dest_file); + + unsigned char md5[16]; + FS_Error file_error; + mu_assert( + md5_calc_file(dest_file, HSSTREAM_OUT, md5, &file_error), "Failed to calculate md5"); + + const unsigned char expected_md5[16] = { + 0xa3, + 0x70, + 0xe8, + 0x8b, + 0xa9, + 0x42, + 0x74, + 0xf4, + 0xaa, + 0x12, + 0x8d, + 0x41, + 0xd2, + 0xb6, + 0x71, + 0xc9}; + mu_assert(memcmp(md5, expected_md5, sizeof(md5)) == 0, "MD5 mismatch after decompression"); + + storage_simply_remove(api, HSSTREAM_OUT); + } while(false); + + compress_free(compress); + storage_file_free(comp_file); + storage_file_free(dest_file); + furi_record_close(RECORD_STORAGE); +} + +#define HS_TAR_PATH COMPRESS_UNIT_TESTS_PATH("test.ths") +#define HS_TAR_EXTRACT_PATH COMPRESS_UNIT_TESTS_PATH("tar_out") + +static bool file_counter(const char* name, bool is_dir, void* context) { + UNUSED(name); + UNUSED(is_dir); + int32_t* n_entries = (int32_t*)context; + (*n_entries)++; + return true; +} + +/* +Heatshrink tar file contents and MD5 sums: +file1.txt: 64295676ceed5cce2d0dcac402e4bda4 +file2.txt: 188f67f297eedd7bf3d6a4d3c2fc31c4 +dir/file3.txt: 34d98ad8135ffe502dba374690136d16 +dir/big_file.txt: ee169c1e1791a4d319dbfaefaa850e98 +dir/nested_dir/file4.txt: e099fcb2aaa0672375eaedc549247ee6 +dir/nested_dir/empty_file.txt: d41d8cd98f00b204e9800998ecf8427e + +XOR of all MD5 sums: 92ed5729786d0e1176d047e35f52d376 +*/ + +static void compress_test_heatshrink_tar() { + Storage* api = furi_record_open(RECORD_STORAGE); + + TarArchive* archive = tar_archive_alloc(api); + FuriString* path = furi_string_alloc(); + FileInfo fileinfo; + File* file = storage_file_alloc(api); + + do { + storage_simply_remove_recursive(api, HS_TAR_EXTRACT_PATH); + + mu_assert(storage_simply_mkdir(api, HS_TAR_EXTRACT_PATH), "Failed to create extract dir"); + + mu_assert( + tar_archive_get_mode_for_path(HS_TAR_PATH) == TarOpenModeReadHeatshrink, + "Invalid mode for heatshrink tar"); + + mu_assert( + tar_archive_open(archive, HS_TAR_PATH, TarOpenModeReadHeatshrink), + "Failed to open heatshrink tar"); + + int32_t n_entries = 0; + tar_archive_set_file_callback(archive, file_counter, &n_entries); + + mu_assert( + tar_archive_unpack_to(archive, HS_TAR_EXTRACT_PATH, NULL), + "Failed to unpack heatshrink tar"); + + mu_assert(n_entries == 9, "Invalid number of entries in heatshrink tar"); + + uint8_t md5_total[16] = {0}, md5_file[16]; + + DirWalk* dir_walk = dir_walk_alloc(api); + mu_assert(dir_walk_open(dir_walk, HS_TAR_EXTRACT_PATH), "Failed to open dirwalk"); + while(dir_walk_read(dir_walk, path, &fileinfo) == DirWalkOK) { + if(file_info_is_dir(&fileinfo)) { + continue; + } + mu_assert( + md5_calc_file(file, furi_string_get_cstr(path), md5_file, NULL), + "Failed to calc md5"); + + for(size_t i = 0; i < 16; i++) { + md5_total[i] ^= md5_file[i]; + } + } + dir_walk_free(dir_walk); + + static const unsigned char expected_md5[16] = { + 0x92, + 0xed, + 0x57, + 0x29, + 0x78, + 0x6d, + 0x0e, + 0x11, + 0x76, + 0xd0, + 0x47, + 0xe3, + 0x5f, + 0x52, + 0xd3, + 0x76}; + mu_assert(memcmp(md5_total, expected_md5, sizeof(md5_total)) == 0, "MD5 mismatch"); + + storage_simply_remove_recursive(api, HS_TAR_EXTRACT_PATH); + } while(false); + + storage_file_free(file); + furi_string_free(path); + tar_archive_free(archive); + furi_record_close(RECORD_STORAGE); +} + MU_TEST_SUITE(test_compress) { MU_RUN_TEST(compress_test_random_comp_decomp); MU_RUN_TEST(compress_test_reference_comp_decomp); + MU_RUN_TEST(compress_test_heatshrink_stream); + MU_RUN_TEST(compress_test_heatshrink_tar); } int run_minunit_test_compress(void) { diff --git a/applications/services/rpc/rpc_storage.c b/applications/services/rpc/rpc_storage.c index 306b25777..89991aa86 100644 --- a/applications/services/rpc/rpc_storage.c +++ b/applications/services/rpc/rpc_storage.c @@ -700,21 +700,21 @@ static void rpc_system_storage_tar_extract_process(const PB_Main* request, void* TarArchive* archive = tar_archive_alloc(rpc_storage->api); do { - if(!path_contains_only_ascii(request->content.storage_tar_extract_request.out_path)) { + const char *tar_path = request->content.storage_tar_extract_request.tar_path, + *out_path = request->content.storage_tar_extract_request.out_path; + if(!path_contains_only_ascii(out_path)) { status = PB_CommandStatus_ERROR_STORAGE_INVALID_NAME; break; } - if(!tar_archive_open( - archive, - request->content.storage_tar_extract_request.tar_path, - TAR_OPEN_MODE_READ)) { + TarOpenMode tar_mode = tar_archive_get_mode_for_path(tar_path); + + if(!tar_archive_open(archive, tar_path, tar_mode)) { status = PB_CommandStatus_ERROR_STORAGE_INVALID_PARAMETER; break; } - if(!tar_archive_unpack_to( - archive, request->content.storage_tar_extract_request.out_path, NULL)) { + if(!tar_archive_unpack_to(archive, out_path, NULL)) { status = PB_CommandStatus_ERROR_STORAGE_INTERNAL; break; } diff --git a/applications/services/storage/storage_cli.c b/applications/services/storage/storage_cli.c index 67a7e288b..6e8a937ea 100644 --- a/applications/services/storage/storage_cli.c +++ b/applications/services/storage/storage_cli.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ static void storage_cli_print_usage(void) { printf("\tmd5\t - md5 hash of the file\r\n"); printf("\tstat\t - info about file or dir\r\n"); printf("\ttimestamp\t - last modification timestamp\r\n"); + printf("\textract\t - extract tar archive to destination\r\n"); }; static void storage_cli_print_error(FS_Error error) { @@ -496,6 +498,47 @@ static void storage_cli_md5(Cli* cli, FuriString* path) { furi_record_close(RECORD_STORAGE); } +static bool tar_extract_file_callback(const char* name, bool is_directory, void* context) { + UNUSED(context); + printf("\t%s %s\r\n", is_directory ? "D" : "F", name); + return true; +} + +static void storage_cli_extract(Cli* cli, FuriString* old_path, FuriString* args) { + UNUSED(cli); + FuriString* new_path = furi_string_alloc(); + + if(!args_read_probably_quoted_string_and_trim(args, new_path)) { + storage_cli_print_usage(); + furi_string_free(new_path); + return; + } + + Storage* api = furi_record_open(RECORD_STORAGE); + + TarArchive* archive = tar_archive_alloc(api); + TarOpenMode tar_mode = tar_archive_get_mode_for_path(furi_string_get_cstr(old_path)); + do { + if(!tar_archive_open(archive, furi_string_get_cstr(old_path), tar_mode)) { + printf("Failed to open archive\r\n"); + break; + } + uint32_t start_tick = furi_get_tick(); + tar_archive_set_file_callback(archive, tar_extract_file_callback, NULL); + printf("Unpacking to %s\r\n", furi_string_get_cstr(new_path)); + bool success = tar_archive_unpack_to(archive, furi_string_get_cstr(new_path), NULL); + uint32_t end_tick = furi_get_tick(); + printf( + "Decompression %s in %lu ticks \r\n", + success ? "success" : "failed", + end_tick - start_tick); + } while(false); + + tar_archive_free(archive); + furi_string_free(new_path); + furi_record_close(RECORD_STORAGE); +} + void storage_cli(Cli* cli, FuriString* args, void* context) { UNUSED(context); FuriString* cmd; @@ -589,6 +632,11 @@ void storage_cli(Cli* cli, FuriString* args, void* context) { break; } + if(furi_string_cmp_str(cmd, "extract") == 0) { + storage_cli_extract(cli, path, args); + break; + } + storage_cli_print_usage(); } while(false); diff --git a/applications/services/storage/storage_internal_api.c b/applications/services/storage/storage_internal_api.c index 37ea4a2a5..4cbce7546 100644 --- a/applications/services/storage/storage_internal_api.c +++ b/applications/services/storage/storage_internal_api.c @@ -6,7 +6,7 @@ FS_Error storage_int_backup(Storage* storage, const char* dstname) { furi_check(storage); TarArchive* archive = tar_archive_alloc(storage); - bool success = tar_archive_open(archive, dstname, TAR_OPEN_MODE_WRITE) && + bool success = tar_archive_open(archive, dstname, TarOpenModeWrite) && tar_archive_add_dir(archive, STORAGE_INT_PATH_PREFIX, "") && tar_archive_finalize(archive); tar_archive_free(archive); @@ -18,7 +18,7 @@ FS_Error furi_check(storage); TarArchive* archive = tar_archive_alloc(storage); - bool success = tar_archive_open(archive, srcname, TAR_OPEN_MODE_READ) && + bool success = tar_archive_open(archive, srcname, TarOpenModeRead) && tar_archive_unpack_to(archive, STORAGE_INT_PATH_PREFIX, converter); tar_archive_free(archive); return success ? FSE_OK : FSE_INTERNAL; diff --git a/applications/system/updater/util/update_task.c b/applications/system/updater/util/update_task.c index c0ea6421c..17f1a2680 100644 --- a/applications/system/updater/util/update_task.c +++ b/applications/system/updater/util/update_task.c @@ -9,6 +9,8 @@ #include #include +#define TAG "UpdWorker" + static const char* update_task_stage_descr[] = { [UpdateTaskStageProgress] = "...", [UpdateTaskStageReadManifest] = "Loading update manifest", @@ -23,7 +25,9 @@ static const char* update_task_stage_descr[] = { [UpdateTaskStageOBValidation] = "Validating opt. bytes", [UpdateTaskStageLfsBackup] = "Backing up LFS", [UpdateTaskStageLfsRestore] = "Restoring LFS", - [UpdateTaskStageResourcesUpdate] = "Updating resources", + [UpdateTaskStageResourcesFileCleanup] = "Cleaning up files", + [UpdateTaskStageResourcesDirCleanup] = "Cleaning up directories", + [UpdateTaskStageResourcesFileUnpack] = "Extracting resources", [UpdateTaskStageSplashscreenInstall] = "Installing splashscreen", [UpdateTaskStageCompleted] = "Restarting...", [UpdateTaskStageError] = "Error", @@ -196,7 +200,19 @@ static const struct { .descr = "LFS I/O error", }, { - .stage = UpdateTaskStageResourcesUpdate, + .stage = UpdateTaskStageResourcesFileCleanup, + .percent_min = 0, + .percent_max = 100, + .descr = "SD card I/O error", + }, + { + .stage = UpdateTaskStageResourcesDirCleanup, + .percent_min = 0, + .percent_max = 100, + .descr = "SD card I/O error", + }, + { + .stage = UpdateTaskStageResourcesFileUnpack, .percent_min = 0, .percent_max = 100, .descr = "SD card I/O error", @@ -230,20 +246,22 @@ static const UpdateTaskStageGroupMap update_task_stage_progress[] = { [UpdateTaskStageLfsBackup] = STAGE_DEF(UpdateTaskStageGroupPreUpdate, 5), [UpdateTaskStageRadioImageValidate] = STAGE_DEF(UpdateTaskStageGroupRadio, 15), - [UpdateTaskStageRadioErase] = STAGE_DEF(UpdateTaskStageGroupRadio, 35), - [UpdateTaskStageRadioWrite] = STAGE_DEF(UpdateTaskStageGroupRadio, 60), + [UpdateTaskStageRadioErase] = STAGE_DEF(UpdateTaskStageGroupRadio, 25), + [UpdateTaskStageRadioWrite] = STAGE_DEF(UpdateTaskStageGroupRadio, 40), [UpdateTaskStageRadioInstall] = STAGE_DEF(UpdateTaskStageGroupRadio, 30), [UpdateTaskStageRadioBusy] = STAGE_DEF(UpdateTaskStageGroupRadio, 5), [UpdateTaskStageOBValidation] = STAGE_DEF(UpdateTaskStageGroupOptionBytes, 2), - [UpdateTaskStageValidateDFUImage] = STAGE_DEF(UpdateTaskStageGroupFirmware, 30), - [UpdateTaskStageFlashWrite] = STAGE_DEF(UpdateTaskStageGroupFirmware, 150), - [UpdateTaskStageFlashValidate] = STAGE_DEF(UpdateTaskStageGroupFirmware, 15), + [UpdateTaskStageValidateDFUImage] = STAGE_DEF(UpdateTaskStageGroupFirmware, 33), + [UpdateTaskStageFlashWrite] = STAGE_DEF(UpdateTaskStageGroupFirmware, 100), + [UpdateTaskStageFlashValidate] = STAGE_DEF(UpdateTaskStageGroupFirmware, 20), [UpdateTaskStageLfsRestore] = STAGE_DEF(UpdateTaskStageGroupPostUpdate, 5), - [UpdateTaskStageResourcesUpdate] = STAGE_DEF(UpdateTaskStageGroupResources, 255), + [UpdateTaskStageResourcesFileCleanup] = STAGE_DEF(UpdateTaskStageGroupResources, 100), + [UpdateTaskStageResourcesDirCleanup] = STAGE_DEF(UpdateTaskStageGroupResources, 50), + [UpdateTaskStageResourcesFileUnpack] = STAGE_DEF(UpdateTaskStageGroupResources, 255), [UpdateTaskStageSplashscreenInstall] = STAGE_DEF(UpdateTaskStageGroupSplashscreen, 5), [UpdateTaskStageCompleted] = STAGE_DEF(UpdateTaskStageGroupMisc, 1), @@ -288,6 +306,7 @@ static void update_task_calc_completed_stages(UpdateTask* update_task) { void update_task_set_progress(UpdateTask* update_task, UpdateTaskStage stage, uint8_t progress) { if(stage != UpdateTaskStageProgress) { + FURI_LOG_I(TAG, "Stage %d, progress %d", stage, progress); /* do not override more specific error states */ if((stage >= UpdateTaskStageError) && (update_task->state.stage >= UpdateTaskStageError)) { return; diff --git a/applications/system/updater/util/update_task.h b/applications/system/updater/util/update_task.h index a3c47429e..82e310fbd 100644 --- a/applications/system/updater/util/update_task.h +++ b/applications/system/updater/util/update_task.h @@ -31,7 +31,9 @@ typedef enum { UpdateTaskStageFlashValidate, UpdateTaskStageLfsRestore, - UpdateTaskStageResourcesUpdate, + UpdateTaskStageResourcesFileCleanup, + UpdateTaskStageResourcesDirCleanup, + UpdateTaskStageResourcesFileUnpack, UpdateTaskStageSplashscreenInstall, UpdateTaskStageCompleted, diff --git a/applications/system/updater/util/update_task_worker_backup.c b/applications/system/updater/util/update_task_worker_backup.c index ef4276fac..5fcae6ef3 100644 --- a/applications/system/updater/util/update_task_worker_backup.c +++ b/applications/system/updater/util/update_task_worker_backup.c @@ -35,36 +35,23 @@ static bool update_task_pre_update(UpdateTask* update_task) { furi_string_free(backup_file_path); return success; } - -typedef enum { - UpdateTaskResourcesWeightsFileCleanup = 20, - UpdateTaskResourcesWeightsDirCleanup = 20, - UpdateTaskResourcesWeightsFileUnpack = 60, -} UpdateTaskResourcesWeights; - -#define UPDATE_TASK_RESOURCES_FILE_TO_TOTAL_PERCENT 90 - typedef struct { UpdateTask* update_task; - int32_t total_files, processed_files; + TarArchive* archive; } TarUnpackProgress; static bool update_task_resource_unpack_cb(const char* name, bool is_directory, void* context) { UNUSED(name); UNUSED(is_directory); TarUnpackProgress* unpack_progress = context; - unpack_progress->processed_files++; + int32_t progress = 0, total = 0; + tar_archive_get_read_progress(unpack_progress->archive, &progress, &total); update_task_set_progress( - unpack_progress->update_task, - UpdateTaskStageProgress, - /* For this stage, last progress segment = extraction */ - (UpdateTaskResourcesWeightsFileCleanup + UpdateTaskResourcesWeightsDirCleanup) + - (unpack_progress->processed_files * UpdateTaskResourcesWeightsFileUnpack) / - (unpack_progress->total_files + 1)); + unpack_progress->update_task, UpdateTaskStageProgress, (progress * 100) / (total + 1)); return true; } -static void update_task_cleanup_resources(UpdateTask* update_task, const uint32_t n_tar_entries) { +static void update_task_cleanup_resources(UpdateTask* update_task) { ResourceManifestReader* manifest_reader = resource_manifest_reader_alloc(update_task->storage); do { FURI_LOG_D(TAG, "Cleaning up old manifest"); @@ -73,20 +60,26 @@ static void update_task_cleanup_resources(UpdateTask* update_task, const uint32_ break; } - const uint32_t n_approx_file_entries = - n_tar_entries * UPDATE_TASK_RESOURCES_FILE_TO_TOTAL_PERCENT / 100 + 1; - uint32_t n_dir_entries = 1; - ResourceManifestEntry* entry_ptr = NULL; - uint32_t n_processed_entries = 0; + /* Iterate over manifest and calculate entries count */ + uint32_t n_file_entries = 1, n_dir_entries = 1; + while((entry_ptr = resource_manifest_reader_next(manifest_reader))) { + if(entry_ptr->type == ResourceManifestEntryTypeFile) { + n_file_entries++; + } else if(entry_ptr->type == ResourceManifestEntryTypeDirectory) { + n_dir_entries++; + } + } + resource_manifest_rewind(manifest_reader); + + update_task_set_progress(update_task, UpdateTaskStageResourcesFileCleanup, 0); + uint32_t n_processed_file_entries = 0; while((entry_ptr = resource_manifest_reader_next(manifest_reader))) { if(entry_ptr->type == ResourceManifestEntryTypeFile) { update_task_set_progress( update_task, UpdateTaskStageProgress, - /* For this stage, first pass = old manifest's file cleanup */ - (n_processed_entries++ * UpdateTaskResourcesWeightsFileCleanup) / - n_approx_file_entries); + (n_processed_file_entries++ * 100) / n_file_entries); FuriString* file_path = furi_string_alloc(); path_concat( @@ -108,16 +101,14 @@ static void update_task_cleanup_resources(UpdateTask* update_task, const uint32_ } } - n_processed_entries = 0; + update_task_set_progress(update_task, UpdateTaskStageResourcesDirCleanup, 0); + uint32_t n_processed_dir_entries = 0; while((entry_ptr = resource_manifest_reader_previous(manifest_reader))) { if(entry_ptr->type == ResourceManifestEntryTypeDirectory) { update_task_set_progress( update_task, UpdateTaskStageProgress, - /* For this stage, second 10% of progress = cleanup directories */ - UpdateTaskResourcesWeightsFileCleanup + - (n_processed_entries++ * UpdateTaskResourcesWeightsDirCleanup) / - n_dir_entries); + (n_processed_dir_entries++ * 100) / n_dir_entries); FuriString* folder_path = furi_string_alloc(); @@ -166,26 +157,22 @@ static bool update_task_post_update(UpdateTask* update_task) { if(update_task->state.groups & UpdateTaskStageGroupResources) { TarUnpackProgress progress = { .update_task = update_task, - .total_files = 0, - .processed_files = 0, + .archive = archive, }; - update_task_set_progress(update_task, UpdateTaskStageResourcesUpdate, 0); path_concat( furi_string_get_cstr(update_task->update_path), furi_string_get_cstr(update_task->manifest->resource_bundle), file_path); + CHECK_RESULT(tar_archive_open( + archive, furi_string_get_cstr(file_path), TarOpenModeReadHeatshrink)); + + update_task_cleanup_resources(update_task); + + update_task_set_progress(update_task, UpdateTaskStageResourcesFileUnpack, 0); tar_archive_set_file_callback(archive, update_task_resource_unpack_cb, &progress); - CHECK_RESULT( - tar_archive_open(archive, furi_string_get_cstr(file_path), TAR_OPEN_MODE_READ)); - - progress.total_files = tar_archive_get_entries_count(archive); - if(progress.total_files > 0) { - update_task_cleanup_resources(update_task, progress.total_files); - - CHECK_RESULT(tar_archive_unpack_to(archive, STORAGE_EXT_PATH_PREFIX, NULL)); - } + CHECK_RESULT(tar_archive_unpack_to(archive, STORAGE_EXT_PATH_PREFIX, NULL)); } if(update_task->state.groups & UpdateTaskStageGroupSplashscreen) { diff --git a/documentation/OTA.md b/documentation/OTA.md index 9028eff71..0456eab1f 100644 --- a/documentation/OTA.md +++ b/documentation/OTA.md @@ -102,7 +102,7 @@ Even if something goes wrong, updater allows you to retry failed operations and | Writing flash | **10** | **0-100** | Block read/write error | | Validating flash | **11** | **0-100** | Block read/write error | | Restoring LFS | **12** | **0-100** | FS read/write error | -| Updating resources | **13** | **0-100** | SD card read/write error | +| Updating resources | **13-15** | **0-100** | SD card read/write error | ## Building update packages diff --git a/documentation/file_formats/TarHeatshrinkFormat.md b/documentation/file_formats/TarHeatshrinkFormat.md new file mode 100644 index 000000000..86c27a698 --- /dev/null +++ b/documentation/file_formats/TarHeatshrinkFormat.md @@ -0,0 +1,19 @@ +# Heatshrink-compressed Tarball Format + +Flipper supports the use of Heatshrink compression library for .tar archives. This allows for smaller file sizes and faster OTA updates. + +Heatshrink specification does not define a container format for storing compression parameters. This document describes the format used by Flipper to store Heatshrink-compressed data streams. + +## Header + +Header begins with a magic value, followed by a version number and compression parameters - window size and lookahead size. + +Magic value consists of 4 bytes: `0x48 0x53 0x44 0x53` (ASCII "HSDS", HeatShrink DataStream). + +Version number is a single byte, currently set to `0x01`. + +Window size is a single byte, representing the size of the sliding window used by the compressor. It corresponds to `-w` parameter in Heatshrink CLI. + +Lookahead size is a single byte, representing the size of the lookahead buffer used by the compressor. It corresponds to `-l` parameter in Heatshrink CLI. + +Total header size is 7 bytes. Header is followed by compressed data. diff --git a/lib/toolbox/compress.c b/lib/toolbox/compress.c index 780bea27a..5e794891f 100644 --- a/lib/toolbox/compress.c +++ b/lib/toolbox/compress.c @@ -13,9 +13,15 @@ /** Defines encoder and decoder lookahead buffer size */ #define COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG (4u) -/** Buffer size for input data */ #define COMPRESS_ICON_ENCODED_BUFF_SIZE (256u) +const CompressConfigHeatshrink compress_config_heatshrink_default = { + .window_sz2 = COMPRESS_EXP_BUFF_SIZE_LOG, + .lookahead_sz2 = COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG, + .input_buffer_sz = COMPRESS_ICON_ENCODED_BUFF_SIZE, +}; + +/** Buffer size for input data */ static bool compress_decode_internal( heatshrink_decoder* decoder, const uint8_t* data_in, @@ -83,16 +89,19 @@ void compress_icon_decode(CompressIcon* instance, const uint8_t* icon_data, uint } struct Compress { + const void* config; heatshrink_encoder* encoder; heatshrink_decoder* decoder; }; -Compress* compress_alloc(uint16_t compress_buff_size) { +Compress* compress_alloc(CompressType type, const void* config) { + furi_check(type == CompressTypeHeatshrink); + furi_check(config); + Compress* compress = malloc(sizeof(Compress)); - compress->encoder = - heatshrink_encoder_alloc(COMPRESS_EXP_BUFF_SIZE_LOG, COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG); - compress->decoder = heatshrink_decoder_alloc( - compress_buff_size, COMPRESS_EXP_BUFF_SIZE_LOG, COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG); + compress->config = config; + compress->encoder = NULL; + compress->decoder = NULL; return compress; } @@ -100,8 +109,12 @@ Compress* compress_alloc(uint16_t compress_buff_size) { void compress_free(Compress* compress) { furi_check(compress); - heatshrink_encoder_free(compress->encoder); - heatshrink_decoder_free(compress->decoder); + if(compress->encoder) { + heatshrink_encoder_free(compress->encoder); + } + if(compress->decoder) { + heatshrink_decoder_free(compress->decoder); + } free(compress); } @@ -125,6 +138,7 @@ static bool compress_encode_internal( size_t sunk = 0; size_t res_buff_size = sizeof(CompressHeader); + heatshrink_encoder_reset(encoder); /* Sink data to encoding buffer */ while((sunk < data_in_size) && !encode_failed) { sink_res = @@ -179,10 +193,116 @@ static bool compress_encode_internal( *data_res_size = 0; result = false; } - heatshrink_encoder_reset(encoder); return result; } +static inline bool compress_decoder_poll( + heatshrink_decoder* decoder, + uint8_t* decompressed_chunk, + size_t decomp_buffer_size, + CompressIoCallback write_cb, + void* write_context) { + HSD_poll_res poll_res; + size_t poll_size; + + do { + poll_res = + heatshrink_decoder_poll(decoder, decompressed_chunk, decomp_buffer_size, &poll_size); + if(poll_res < 0) { + return false; + } + + size_t write_size = write_cb(write_context, decompressed_chunk, poll_size); + if(write_size != poll_size) { + return false; + } + } while(poll_res == HSDR_POLL_MORE); + + return true; +} + +static bool compress_decode_stream_internal( + heatshrink_decoder* decoder, + const size_t work_buffer_size, + CompressIoCallback read_cb, + void* read_context, + CompressIoCallback write_cb, + void* write_context) { + bool decode_failed = false; + HSD_sink_res sink_res; + HSD_finish_res finish_res; + size_t read_size = 0; + size_t sink_size = 0; + + uint8_t* compressed_chunk = malloc(work_buffer_size); + uint8_t* decompressed_chunk = malloc(work_buffer_size); + + /* Sink data to decoding buffer */ + do { + read_size = read_cb(read_context, compressed_chunk, work_buffer_size); + + size_t sunk = 0; + while(sunk < read_size && !decode_failed) { + sink_res = heatshrink_decoder_sink( + decoder, &compressed_chunk[sunk], read_size - sunk, &sink_size); + if(sink_res < 0) { + decode_failed = true; + break; + } + sunk += sink_size; + + if(!compress_decoder_poll( + decoder, decompressed_chunk, work_buffer_size, write_cb, write_context)) { + decode_failed = true; + break; + } + } + } while(!decode_failed && read_size); + + /* Notify sinking complete and poll decoded data */ + if(!decode_failed) { + while((finish_res = heatshrink_decoder_finish(decoder)) != HSDR_FINISH_DONE) { + if(finish_res < 0) { + decode_failed = true; + break; + } + + if(!compress_decoder_poll( + decoder, decompressed_chunk, work_buffer_size, write_cb, write_context)) { + decode_failed = true; + break; + } + } + } + + free(compressed_chunk); + free(decompressed_chunk); + + return !decode_failed; +} + +typedef struct { + uint8_t* data_ptr; + size_t data_size; + bool is_source; +} MemoryStreamState; + +static int32_t memory_stream_io_callback(void* context, uint8_t* ptr, size_t size) { + MemoryStreamState* state = (MemoryStreamState*)context; + + if(size > state->data_size) { + size = state->data_size; + } + if(state->is_source) { + memcpy(ptr, state->data_ptr, size); + } else { + memcpy(state->data_ptr, ptr, size); + } + state->data_ptr += size; + state->data_size -= size; + return size; +} + static bool compress_decode_internal( heatshrink_decoder* decoder, const uint8_t* data_in, @@ -196,59 +316,29 @@ static bool compress_decode_internal( furi_check(data_res_size); bool result = false; - bool decode_failed = false; - HSD_sink_res sink_res; - HSD_poll_res poll_res; - HSD_finish_res finish_res; - size_t sink_size = 0; - size_t res_buff_size = 0; - size_t poll_size = 0; CompressHeader* header = (CompressHeader*)data_in; if(header->is_compressed) { - /* Sink data to decoding buffer */ - size_t compressed_size = header->compressed_buff_size; - size_t sunk = 0; - while(sunk < compressed_size && !decode_failed) { - sink_res = heatshrink_decoder_sink( + MemoryStreamState compressed_context = { + .data_ptr = (uint8_t*)&data_in[sizeof(CompressHeader)], + .data_size = header->compressed_buff_size, + .is_source = true, + }; + MemoryStreamState decompressed_context = { + .data_ptr = data_out, + .data_size = data_out_size, + .is_source = false, + }; + heatshrink_decoder_reset(decoder); + if((result = compress_decode_stream_internal( decoder, - (uint8_t*)&data_in[sizeof(CompressHeader) + sunk], - compressed_size - sunk, - &sink_size); - if(sink_res < 0) { - decode_failed = true; - break; - } - sunk += sink_size; - do { - poll_res = heatshrink_decoder_poll( - decoder, &data_out[res_buff_size], data_out_size - res_buff_size, &poll_size); - if((poll_res < 0) || ((data_out_size - res_buff_size) == 0)) { - decode_failed = true; - break; - } - res_buff_size += poll_size; - } while(poll_res == HSDR_POLL_MORE); + COMPRESS_ICON_ENCODED_BUFF_SIZE, + memory_stream_io_callback, + &compressed_context, + memory_stream_io_callback, + &decompressed_context))) { + *data_res_size = data_out_size - decompressed_context.data_size; } - /* Notify sinking complete and poll decoded data */ - if(!decode_failed) { - finish_res = heatshrink_decoder_finish(decoder); - if(finish_res < 0) { - decode_failed = true; - } else { - do { - poll_res = heatshrink_decoder_poll( - decoder, - &data_out[res_buff_size], - data_out_size - res_buff_size, - &poll_size); - res_buff_size += poll_size; - finish_res = heatshrink_decoder_finish(decoder); - } while(finish_res != HSDR_FINISH_DONE); - } - } - *data_res_size = res_buff_size; - result = !decode_failed; } else if(data_out_size >= data_in_size - 1) { memcpy(data_out, &data_in[1], data_in_size); *data_res_size = data_in_size - 1; @@ -257,7 +347,6 @@ static bool compress_decode_internal( /* Not enough space in output buffer */ result = false; } - heatshrink_decoder_reset(decoder); return result; } @@ -268,6 +357,11 @@ bool compress_encode( uint8_t* data_out, size_t data_out_size, size_t* data_res_size) { + if(!compress->encoder) { + CompressConfigHeatshrink* hs_config = (CompressConfigHeatshrink*)compress->config; + compress->encoder = + heatshrink_encoder_alloc(hs_config->window_sz2, hs_config->lookahead_sz2); + } return compress_encode_internal( compress->encoder, data_in, data_in_size, data_out, data_out_size, data_res_size); } @@ -279,6 +373,201 @@ bool compress_decode( uint8_t* data_out, size_t data_out_size, size_t* data_res_size) { + if(!compress->decoder) { + CompressConfigHeatshrink* hs_config = (CompressConfigHeatshrink*)compress->config; + compress->decoder = heatshrink_decoder_alloc( + hs_config->input_buffer_sz, hs_config->window_sz2, hs_config->lookahead_sz2); + } return compress_decode_internal( compress->decoder, data_in, data_in_size, data_out, data_out_size, data_res_size); } + +bool compress_decode_streamed( + Compress* compress, + CompressIoCallback read_cb, + void* read_context, + CompressIoCallback write_cb, + void* write_context) { + CompressConfigHeatshrink* hs_config = (CompressConfigHeatshrink*)compress->config; + if(!compress->decoder) { + compress->decoder = heatshrink_decoder_alloc( + hs_config->input_buffer_sz, hs_config->window_sz2, hs_config->lookahead_sz2); + } + + heatshrink_decoder_reset(compress->decoder); + return compress_decode_stream_internal( + compress->decoder, + hs_config->input_buffer_sz, + read_cb, + read_context, + write_cb, + write_context); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +struct CompressStreamDecoder { + heatshrink_decoder* decoder; + size_t stream_position; + size_t decode_buffer_size; + size_t decode_buffer_position; + uint8_t* decode_buffer; + CompressIoCallback read_cb; + void* read_context; +}; + +CompressStreamDecoder* compress_stream_decoder_alloc( + CompressType type, + const void* config, + CompressIoCallback read_cb, + void* read_context) { + furi_check(type == CompressTypeHeatshrink); + furi_check(config); + + const CompressConfigHeatshrink* hs_config = (const CompressConfigHeatshrink*)config; + CompressStreamDecoder* instance = malloc(sizeof(CompressStreamDecoder)); + instance->decoder = heatshrink_decoder_alloc( + hs_config->input_buffer_sz, hs_config->window_sz2, hs_config->lookahead_sz2); + instance->stream_position = 0; + instance->decode_buffer_size = hs_config->input_buffer_sz; + instance->decode_buffer_position = 0; + instance->decode_buffer = malloc(hs_config->input_buffer_sz); + instance->read_cb = read_cb; + instance->read_context = read_context; + + return instance; +} + +void compress_stream_decoder_free(CompressStreamDecoder* instance) { + furi_check(instance); + heatshrink_decoder_free(instance->decoder); + free(instance->decode_buffer); + free(instance); +} + +static bool compress_decode_stream_chunk( + CompressStreamDecoder* sd, + CompressIoCallback read_cb, + void* read_context, + uint8_t* decompressed_chunk, + size_t decomp_chunk_size) { + HSD_sink_res sink_res; + HSD_poll_res poll_res; + + /* + First, try to output data from decoder to the output buffer. + If the we could fill the output buffer, return + If the output buffer is not full, keep polling the decoder + until it has no more data to output. + Then, read more data from the input and sink it to the decoder. + Repeat until the input is exhausted or output buffer is full. + */ + + bool failed = false; + bool can_sink_more = true; + bool can_read_more = true; + + do { + do { + size_t poll_size = 0; + poll_res = heatshrink_decoder_poll( + sd->decoder, decompressed_chunk, decomp_chunk_size, &poll_size); + if(poll_res < 0) { + return false; + } + + decomp_chunk_size -= poll_size; + decompressed_chunk += poll_size; + } while((poll_res == HSDR_POLL_MORE) && decomp_chunk_size); + + if(!decomp_chunk_size) { + break; + } + + if(can_read_more && (sd->decode_buffer_position < sd->decode_buffer_size)) { + size_t read_size = read_cb( + read_context, + &sd->decode_buffer[sd->decode_buffer_position], + sd->decode_buffer_size - sd->decode_buffer_position); + sd->decode_buffer_position += read_size; + can_read_more = read_size > 0; + } + + while(sd->decode_buffer_position && can_sink_more) { + size_t sink_size = 0; + sink_res = heatshrink_decoder_sink( + sd->decoder, sd->decode_buffer, sd->decode_buffer_position, &sink_size); + can_sink_more = sink_res == HSDR_SINK_OK; + if(sink_res < 0) { + failed = true; + break; + } + sd->decode_buffer_position -= sink_size; + + /* If some data was left in the buffer, move it to the beginning */ + if(sink_size && sd->decode_buffer_position) { + memmove( + sd->decode_buffer, &sd->decode_buffer[sink_size], sd->decode_buffer_position); + } + } + } while(!failed); + + return decomp_chunk_size == 0; +} + +bool compress_stream_decoder_read( + CompressStreamDecoder* instance, + uint8_t* data_out, + size_t data_out_size) { + furi_check(instance); + furi_check(data_out); + + if(compress_decode_stream_chunk( + instance, instance->read_cb, instance->read_context, data_out, data_out_size)) { + instance->stream_position += data_out_size; + return true; + } + return false; +} + +bool compress_stream_decoder_seek(CompressStreamDecoder* instance, size_t position) { + furi_check(instance); + + /* Check if requested position is ahead of current position + we can't rewind the input stream */ + furi_check(position >= instance->stream_position); + + /* Read and discard data up to requested position */ + uint8_t* dummy_buffer = malloc(instance->decode_buffer_size); + bool success = true; + + while(instance->stream_position < position) { + size_t bytes_to_read = position - instance->stream_position; + if(bytes_to_read > instance->decode_buffer_size) { + bytes_to_read = instance->decode_buffer_size; + } + if(!compress_stream_decoder_read(instance, dummy_buffer, bytes_to_read)) { + success = false; + break; + } + } + + free(dummy_buffer); + return success; +} + +size_t compress_stream_decoder_tell(CompressStreamDecoder* instance) { + furi_check(instance); + return instance->stream_position; +} + +bool compress_stream_decoder_rewind(CompressStreamDecoder* instance) { + furi_check(instance); + + /* Reset decoder and read buffer */ + heatshrink_decoder_reset(instance->decoder); + instance->stream_position = 0; + instance->decode_buffer_position = 0; + + return true; +} diff --git a/lib/toolbox/compress.h b/lib/toolbox/compress.h index f08e17584..f5862222d 100644 --- a/lib/toolbox/compress.h +++ b/lib/toolbox/compress.h @@ -44,17 +44,34 @@ void compress_icon_free(CompressIcon* instance); */ void compress_icon_decode(CompressIcon* instance, const uint8_t* icon_data, uint8_t** output); +////////////////////////////////////////////////////////////////////////// + /** Compress control structure */ typedef struct Compress Compress; +/** Supported compression types */ +typedef enum { + CompressTypeHeatshrink = 0, +} CompressType; + +/** Configuration for heatshrink compression */ +typedef struct { + uint16_t window_sz2; + uint16_t lookahead_sz2; + uint16_t input_buffer_sz; +} CompressConfigHeatshrink; + +/** Default configuration for heatshrink compression. Used for image assets. */ +extern const CompressConfigHeatshrink compress_config_heatshrink_default; + /** Allocate encoder and decoder * - * @param compress_buff_size size of decoder and encoder buffer to - * allocate + * @param type Compression type + * @param[in] config Configuration for compression, specific to type * * @return Compress instance */ -Compress* compress_alloc(uint16_t compress_buff_size); +Compress* compress_alloc(CompressType type, const void* config); /** Free encoder and decoder * @@ -71,6 +88,8 @@ void compress_free(Compress* compress); * @param[in] data_out_size The data out size * @param data_res_size pointer to result output data size * + * @note Prepends compressed stream with a header. If data is not compressible, + * it will be stored as is after the header. * @return true on success */ bool compress_encode( @@ -90,6 +109,7 @@ bool compress_encode( * @param[in] data_out_size The data out size * @param data_res_size pointer to result output data size * + * @note Expects compressed stream with a header, as produced by `compress_encode`. * @return true on success */ bool compress_decode( @@ -100,6 +120,100 @@ bool compress_decode( size_t data_out_size, size_t* data_res_size); +/** I/O callback for streamed compression/decompression + * + * @param context user context + * @param buffer buffer to read/write + * @param size size of buffer + * + * @return number of bytes read/written, 0 on end of stream, negative on error + */ +typedef int32_t (*CompressIoCallback)(void* context, uint8_t* buffer, size_t size); + +/** Decompress streamed data + * + * @param compress Compress instance + * @param read_cb read callback + * @param read_context read callback context + * @param write_cb write callback + * @param write_context write callback context + * + * @note Does not expect a header, just compressed data stream. + * @return true on success + */ +bool compress_decode_streamed( + Compress* compress, + CompressIoCallback read_cb, + void* read_context, + CompressIoCallback write_cb, + void* write_context); + +////////////////////////////////////////////////////////////////////////// + +/** CompressStreamDecoder control structure */ +typedef struct CompressStreamDecoder CompressStreamDecoder; + +/** Allocate stream decoder + * + * @param type Compression type + * @param[in] config Configuration for compression, specific to type + * @param read_cb The read callback for input (compressed) data + * @param read_context The read context + * + * @return CompressStreamDecoder instance + */ +CompressStreamDecoder* compress_stream_decoder_alloc( + CompressType type, + const void* config, + CompressIoCallback read_cb, + void* read_context); + +/** Free stream decoder + * + * @param instance The CompressStreamDecoder instance + */ +void compress_stream_decoder_free(CompressStreamDecoder* instance); + +/** Read uncompressed data chunk from stream decoder + * + * @param instance The CompressStreamDecoder instance + * @param data_out The data out + * @param[in] data_out_size The data out size + * + * @return true on success + */ +bool compress_stream_decoder_read( + CompressStreamDecoder* instance, + uint8_t* data_out, + size_t data_out_size); + +/** Seek to position in uncompressed data stream + * + * @param instance The CompressStreamDecoder instance + * @param[in] position The position + * + * @return true on success + * @warning Backward seeking is not supported + */ +bool compress_stream_decoder_seek(CompressStreamDecoder* instance, size_t position); + +/** Get current position in uncompressed data stream + * + * @param instance The CompressStreamDecoder instance + * + * @return current position + */ +size_t compress_stream_decoder_tell(CompressStreamDecoder* instance); + +/** Reset stream decoder to the beginning + * @warning Read callback must be repositioned by caller separately + * + * @param instance The CompressStreamDecoder instance + * + * @return true on success + */ +bool compress_stream_decoder_rewind(CompressStreamDecoder* instance); + #ifdef __cplusplus } #endif diff --git a/lib/toolbox/path.c b/lib/toolbox/path.c index 37bacd65a..a3fe68473 100644 --- a/lib/toolbox/path.c +++ b/lib/toolbox/path.c @@ -43,6 +43,7 @@ void path_extract_filename(FuriString* path, FuriString* name, bool trim_ext) { void path_extract_extension(FuriString* path, char* ext, size_t ext_len_max) { furi_check(path); furi_check(ext); + furi_check(ext_len_max > 0); size_t dot = furi_string_search_rchar(path, '.'); size_t filename_start = furi_string_search_rchar(path, '/'); diff --git a/lib/toolbox/tar/tar_archive.c b/lib/toolbox/tar/tar_archive.c index 25084aaa0..e7400fe98 100644 --- a/lib/toolbox/tar/tar_archive.c +++ b/lib/toolbox/tar/tar_archive.c @@ -4,6 +4,7 @@ #include #include #include +#include #define TAG "TarArch" #define MAX_NAME_LEN 255 @@ -12,14 +13,29 @@ #define FILE_OPEN_NTRIES 10 #define FILE_OPEN_RETRY_DELAY 25 +TarOpenMode tar_archive_get_mode_for_path(const char* path) { + char ext[8]; + + FuriString* path_str = furi_string_alloc_set_str(path); + path_extract_extension(path_str, ext, sizeof(ext)); + furi_string_free(path_str); + + if(strcmp(ext, ".ths") == 0) { + return TarOpenModeReadHeatshrink; + } else { + return TarOpenModeRead; + } +} + typedef struct TarArchive { Storage* storage; + File* stream; mtar_t tar; tar_unpack_file_cb unpack_cb; void* unpack_cb_context; } TarArchive; -/* API WRAPPER */ +/* Plain file backend - uncompressed, supports read and write */ static int mtar_storage_file_write(void* stream, const void* data, unsigned size) { uint16_t bytes_written = storage_file_write(stream, data, size); return (bytes_written == size) ? bytes_written : MTAR_EWRITEFAIL; @@ -38,7 +54,6 @@ static int mtar_storage_file_seek(void* stream, unsigned offset) { static int mtar_storage_file_close(void* stream) { if(stream) { storage_file_close(stream); - storage_file_free(stream); } return MTAR_ESUCCESS; } @@ -50,41 +65,133 @@ const struct mtar_ops filesystem_ops = { .close = mtar_storage_file_close, }; +/* Heatshrink stream backend - compressed, read-only */ + +typedef struct { + CompressConfigHeatshrink heatshrink_config; + File* stream; + CompressStreamDecoder* decoder; +} HeatshrinkStream; + +/* HSDS 'heatshrink data stream' header magic */ +static const uint32_t HEATSHRINK_MAGIC = 0x53445348; + +typedef struct { + uint32_t magic; + uint8_t version; + uint8_t window_sz2; + uint8_t lookahead_sz2; +} FURI_PACKED HeatshrinkStreamHeader; +_Static_assert(sizeof(HeatshrinkStreamHeader) == 7, "Invalid HeatshrinkStreamHeader size"); + +static int mtar_heatshrink_file_close(void* stream) { + HeatshrinkStream* hs_stream = stream; + if(hs_stream) { + if(hs_stream->decoder) { + compress_stream_decoder_free(hs_stream->decoder); + } + storage_file_close(hs_stream->stream); + storage_file_free(hs_stream->stream); + free(hs_stream); + } + return MTAR_ESUCCESS; +} + +static int mtar_heatshrink_file_read(void* stream, void* data, unsigned size) { + HeatshrinkStream* hs_stream = stream; + bool read_success = compress_stream_decoder_read(hs_stream->decoder, data, size); + return read_success ? (int)size : MTAR_EREADFAIL; +} + +static int mtar_heatshrink_file_seek(void* stream, unsigned offset) { + HeatshrinkStream* hs_stream = stream; + bool success = false; + if(offset == 0) { + success = storage_file_seek(hs_stream->stream, sizeof(HeatshrinkStreamHeader), true) && + compress_stream_decoder_rewind(hs_stream->decoder); + } else { + success = compress_stream_decoder_seek(hs_stream->decoder, offset); + } + return success ? MTAR_ESUCCESS : MTAR_ESEEKFAIL; +} + +const struct mtar_ops heatshrink_ops = { + .read = mtar_heatshrink_file_read, + .write = NULL, // not supported + .seek = mtar_heatshrink_file_seek, + .close = mtar_heatshrink_file_close, +}; + +////////////////////////////////////////////////////////////////////////// + TarArchive* tar_archive_alloc(Storage* storage) { furi_check(storage); TarArchive* archive = malloc(sizeof(TarArchive)); archive->storage = storage; + archive->stream = storage_file_alloc(archive->storage); archive->unpack_cb = NULL; return archive; } +static int32_t file_read_cb(void* context, uint8_t* buffer, size_t buffer_size) { + File* file = context; + return storage_file_read(file, buffer, buffer_size); +} + bool tar_archive_open(TarArchive* archive, const char* path, TarOpenMode mode) { furi_check(archive); FS_AccessMode access_mode; FS_OpenMode open_mode; + bool compressed = false; int mtar_access = 0; switch(mode) { - case TAR_OPEN_MODE_READ: + case TarOpenModeRead: mtar_access = MTAR_READ; access_mode = FSAM_READ; open_mode = FSOM_OPEN_EXISTING; break; - case TAR_OPEN_MODE_WRITE: + case TarOpenModeWrite: mtar_access = MTAR_WRITE; access_mode = FSAM_WRITE; open_mode = FSOM_CREATE_ALWAYS; break; + case TarOpenModeReadHeatshrink: + mtar_access = MTAR_READ; + access_mode = FSAM_READ; + open_mode = FSOM_OPEN_EXISTING; + compressed = true; + break; default: return false; } - File* stream = storage_file_alloc(archive->storage); + File* stream = archive->stream; if(!storage_file_open(stream, path, access_mode, open_mode)) { - storage_file_free(stream); return false; } - mtar_init(&archive->tar, mtar_access, &filesystem_ops, stream); + + if(compressed) { + /* Read and validate stream header */ + HeatshrinkStreamHeader header; + if(storage_file_read(stream, &header, sizeof(HeatshrinkStreamHeader)) != + sizeof(HeatshrinkStreamHeader) || + header.magic != HEATSHRINK_MAGIC) { + storage_file_close(stream); + return false; + } + + HeatshrinkStream* hs_stream = malloc(sizeof(HeatshrinkStream)); + hs_stream->stream = stream; + hs_stream->heatshrink_config.window_sz2 = header.window_sz2; + hs_stream->heatshrink_config.lookahead_sz2 = header.lookahead_sz2; + hs_stream->heatshrink_config.input_buffer_sz = FILE_BLOCK_SIZE; + hs_stream->decoder = compress_stream_decoder_alloc( + CompressTypeHeatshrink, &hs_stream->heatshrink_config, file_read_cb, stream); + mtar_init(&archive->tar, mtar_access, &heatshrink_ops, hs_stream); + } else { + mtar_init(&archive->tar, mtar_access, &filesystem_ops, stream); + } return true; } @@ -94,6 +201,7 @@ void tar_archive_free(TarArchive* archive) { if(mtar_is_open(&archive->tar)) { mtar_close(&archive->tar); } + storage_file_free(archive->stream); free(archive); } @@ -121,6 +229,21 @@ int32_t tar_archive_get_entries_count(TarArchive* archive) { return counter; } +bool tar_archive_get_read_progress(TarArchive* archive, int32_t* processed, int32_t* total) { + furi_check(archive); + if(mtar_access_mode(&archive->tar) != MTAR_READ) { + return false; + } + + if(processed) { + *processed = storage_file_tell(archive->stream); + } + if(total) { + *total = storage_file_size(archive->stream); + } + return true; +} + bool tar_archive_dir_add_element(TarArchive* archive, const char* dirpath) { furi_check(archive); return (mtar_write_dir_header(&archive->tar, dirpath) == MTAR_ESUCCESS); @@ -258,7 +381,7 @@ static int archive_extract_foreach_cb(mtar_t* tar, const mtar_header_t* header, furi_string_free(converted_fname); furi_string_free(full_extracted_fname); - return success ? 0 : -1; + return success ? 0 : MTAR_EFAILURE; } bool tar_archive_unpack_to( diff --git a/lib/toolbox/tar/tar_archive.h b/lib/toolbox/tar/tar_archive.h index ba2f7749f..3eb97391e 100644 --- a/lib/toolbox/tar/tar_archive.h +++ b/lib/toolbox/tar/tar_archive.h @@ -12,62 +12,197 @@ typedef struct TarArchive TarArchive; typedef struct Storage Storage; +/** Tar archive open mode + */ typedef enum { - TAR_OPEN_MODE_READ = 'r', - TAR_OPEN_MODE_WRITE = 'w', - TAR_OPEN_MODE_STDOUT = 's' /* to be implemented */ + TarOpenModeRead = 'r', + TarOpenModeWrite = 'w', + /* read-only heatshrink compressed tar */ + TarOpenModeReadHeatshrink = 'h', } TarOpenMode; +/** Get expected open mode for archive at the path. + * Used for automatic mode detection based on the file extension. + * + * @param[in] path Path to the archive + * + * @return open mode from TarOpenMode enum + */ +TarOpenMode tar_archive_get_mode_for_path(const char* path); + +/** Tar archive constructor + * + * @param storage Storage API pointer + * + * @return allocated object + */ TarArchive* tar_archive_alloc(Storage* storage); +/** Open tar archive + * + * @param archive Tar archive object + * @param[in] path Path to the tar archive + * @param mode Open mode + * + * @return true if successful + */ bool tar_archive_open(TarArchive* archive, const char* path, TarOpenMode mode); +/** Tar archive destructor + * + * @param archive Tar archive object + */ void tar_archive_free(TarArchive* archive); /* High-level API - assumes archive is open */ + +/** Unpack tar archive to destination + * + * @param archive Tar archive object. Must be opened in read mode + * @param[in] destination Destination path + * @param converter Storage name converter + * + * @return true if successful + */ bool tar_archive_unpack_to( TarArchive* archive, const char* destination, Storage_name_converter converter); +/** Add file to tar archive + * + * @param archive Tar archive object. Must be opened in write mode + * @param[in] fs_file_path Path to the file on the filesystem + * @param[in] archive_fname Name of the file in the archive + * @param file_size Size of the file + * + * @return true if successful + */ bool tar_archive_add_file( TarArchive* archive, const char* fs_file_path, const char* archive_fname, const int32_t file_size); +/** Add directory to tar archive + * + * @param archive Tar archive object. Must be opened in write mode + * @param fs_full_path Path to the directory on the filesystem + * @param path_prefix Prefix to add to the directory name in the archive + * + * @return true if successful + */ bool tar_archive_add_dir(TarArchive* archive, const char* fs_full_path, const char* path_prefix); +/** Get number of entries in the archive + * + * @param archive Tar archive object + * + * @return number of entries. -1 on error + */ int32_t tar_archive_get_entries_count(TarArchive* archive); +/** Get read progress + * + * @param archive Tar archive object. Must be opened in read mode + * @param[in] processed Number of processed entries + * @param[in] total Total number of entries + * + * @return true if successful + */ +bool tar_archive_get_read_progress(TarArchive* archive, int32_t* processed, int32_t* total); + +/** Unpack single file from tar archive + * + * @param archive Tar archive object. Must be opened in read mode + * @param[in] archive_fname Name of the file in the archive + * @param[in] destination Destination path + * + * @return true if successful + */ bool tar_archive_unpack_file( TarArchive* archive, const char* archive_fname, const char* destination); -/* Optional per-entry callback on unpacking - return false to skip entry */ +/** Optional per-entry callback on unpacking + * @param name Name of the file or directory + * @param is_directory True if the entry is a directory + * @param[in] context User context + * @return true to process the entry, false to skip + */ typedef bool (*tar_unpack_file_cb)(const char* name, bool is_directory, void* context); +/** Set per-entry callback on unpacking + * @param archive Tar archive object + * @param callback Callback function + * @param[in] context User context + */ void tar_archive_set_file_callback(TarArchive* archive, tar_unpack_file_cb callback, void* context); /* Low-level API */ + +/** Add tar archive directory header + * + * @param archive Tar archive object. Must be opened in write mode + * @param[in] dirpath Path to the directory + * + * @return true if successful + */ bool tar_archive_dir_add_element(TarArchive* archive, const char* dirpath); +/** Add tar archive file header + * + * @param archive Tar archive object. Must be opened in write mode + * @param[in] path Path to the file + * @param data_len Size of the file + * + * @return true if successful + */ bool tar_archive_file_add_header(TarArchive* archive, const char* path, const int32_t data_len); +/** Add tar archive file data block + * + * @param archive Tar archive object. Must be opened in write mode + * @param[in] data_block Data block + * @param block_len Size of the data block + * + * @return true if successful + */ bool tar_archive_file_add_data_block( TarArchive* archive, const uint8_t* data_block, const int32_t block_len); +/** Finalize tar archive file + * + * @param archive Tar archive object. Must be opened in write mode + * + * @return true if successful + */ bool tar_archive_file_finalize(TarArchive* archive); +/** Store data in tar archive + * + * @param archive Tar archive object. Must be opened in write mode + * @param[in] path Path to the file + * @param[in] data Data to store + * @param data_len Size of the data + * + * @return true if successful + */ bool tar_archive_store_data( TarArchive* archive, const char* path, const uint8_t* data, const int32_t data_len); +/** Finalize tar archive + * + * @param archive Tar archive object. Must be opened in write mode + * + * @return true if successful + */ bool tar_archive_finalize(TarArchive* archive); #ifdef __cplusplus diff --git a/lib/update_util/resources/manifest.c b/lib/update_util/resources/manifest.c index 5a818a0a4..580a76d45 100644 --- a/lib/update_util/resources/manifest.c +++ b/lib/update_util/resources/manifest.c @@ -161,3 +161,9 @@ ResourceManifestEntry* return NULL; } } + +bool resource_manifest_rewind(ResourceManifestReader* resource_manifest) { + furi_assert(resource_manifest); + + return stream_seek(resource_manifest->stream, 0, StreamOffsetFromStart); +} diff --git a/lib/update_util/resources/manifest.h b/lib/update_util/resources/manifest.h index ddceb5ffa..decba02cd 100644 --- a/lib/update_util/resources/manifest.h +++ b/lib/update_util/resources/manifest.h @@ -47,6 +47,13 @@ void resource_manifest_reader_free(ResourceManifestReader* resource_manifest); */ bool resource_manifest_reader_open(ResourceManifestReader* resource_manifest, const char* filename); +/** + * @brief Rewind manifest to the beginning + * @param resource_manifest allocated object + * @return true if successful + */ +bool resource_manifest_rewind(ResourceManifestReader* resource_manifest); + /** * @brief Read next file/dir entry from manifest * @param resource_manifest allocated object diff --git a/scripts/flipper/assets/heatshrink_stream.py b/scripts/flipper/assets/heatshrink_stream.py new file mode 100644 index 000000000..bae84d3fe --- /dev/null +++ b/scripts/flipper/assets/heatshrink_stream.py @@ -0,0 +1,26 @@ +import struct + + +class HeatshrinkDataStreamHeader: + MAGIC = 0x53445348 + VERSION = 1 + + def __init__(self, window_size, lookahead_size): + self.window_size = window_size + self.lookahead_size = lookahead_size + + def pack(self): + return struct.pack( + "