mirror of
https://github.com/flipperdevices/flipperzero-firmware.git
synced 2025-12-12 20:59:50 +04:00
Updater: resource compression (#3716)
* toolbox: compress: moved decompressor implementation to separate func * toolbox: compress: callback-based api; cli: storage unpack command * toolbox: compress: separate r/w contexts for stream api * targets: f18: sync API * compress: naming fixes & cleanup * toolbox: compress: using hs buffer size for stream buffers * toolbox: tar: heatshrink stream mode * toolbox: compress: docs & small cleanup * toolbox: tar: header support for .hs; updater: now uses .hs for resources; .hs.tar: now rewindable * toolbox: compress: fixed hs stream tail handling * updater: reworked progress for resources cleanup; rebalanced stage weights * updater: single-pass decompression; scripts: print resources compression ratio * updater: fixed warnings * toolbox: tar: doxygen * docs: update * docs: info or tarhs format; scripts: added standalone compression/decompression tool for heatshrink-formatted streams * scripts: tarhs: fixed parameter handling * cli: storage extract command; toolbox: tar: guess type based on extension * unit_tests: added test for streamed raw hs decompressor `compress_decode_streamed` * unit_tests: compress: added extraction test for .tar.hs * rpc: autodetect compressed archives * scripts: minor cleanup of common parts * scripts: update: now using in-memory intermediate tar stream * scripts: added hs.py wrapper for heatshrink-related ops (single object and directory-as-tar compression) * scripts: naming fixes * Toolbox: export compress_config_heatshrink_default as const symbol * Toolbox: fix various types naming * Toolbox: more of types naming fixes * Toolbox: use size_t in compress io callbacks and structures * UnitTests: update to match new compress API * Toolbox: proper path_extract_extension usage Co-authored-by: あく <alleteam@gmail.com>
This commit is contained in:
Binary file not shown.
Binary file not shown.
@@ -1,6 +1,9 @@
|
|||||||
#include "../test.h" // IWYU pragma: keep
|
#include "../test.h" // IWYU pragma: keep
|
||||||
|
|
||||||
#include <toolbox/compress.h>
|
#include <toolbox/compress.h>
|
||||||
|
#include <toolbox/md5_calc.h>
|
||||||
|
#include <toolbox/tar/tar_archive.h>
|
||||||
|
#include <toolbox/dir_walk.h>
|
||||||
|
|
||||||
#include <furi.h>
|
#include <furi.h>
|
||||||
#include <furi_hal.h>
|
#include <furi_hal.h>
|
||||||
@@ -56,7 +59,7 @@ static void compress_test_reference_comp_decomp() {
|
|||||||
furi_record_close(RECORD_STORAGE);
|
furi_record_close(RECORD_STORAGE);
|
||||||
|
|
||||||
uint8_t* temp_buffer = malloc(1024);
|
uint8_t* temp_buffer = malloc(1024);
|
||||||
Compress* comp = compress_alloc(1024);
|
Compress* comp = compress_alloc(CompressTypeHeatshrink, &compress_config_heatshrink_default);
|
||||||
|
|
||||||
size_t encoded_size = 0;
|
size_t encoded_size = 0;
|
||||||
mu_assert(
|
mu_assert(
|
||||||
@@ -98,7 +101,7 @@ static void compress_test_random_comp_decomp() {
|
|||||||
// We only fill half of the buffer with random data, so if anything goes wrong, there's no overflow
|
// We only fill half of the buffer with random data, so if anything goes wrong, there's no overflow
|
||||||
static const size_t src_data_size = src_buffer_size / 2;
|
static const size_t src_data_size = src_buffer_size / 2;
|
||||||
|
|
||||||
Compress* comp = compress_alloc(src_buffer_size);
|
Compress* comp = compress_alloc(CompressTypeHeatshrink, &compress_config_heatshrink_default);
|
||||||
uint8_t* src_buff = malloc(src_buffer_size);
|
uint8_t* src_buff = malloc(src_buffer_size);
|
||||||
uint8_t* encoded_buff = malloc(encoded_buffer_size);
|
uint8_t* encoded_buff = malloc(encoded_buffer_size);
|
||||||
uint8_t* decoded_buff = malloc(src_buffer_size);
|
uint8_t* decoded_buff = malloc(src_buffer_size);
|
||||||
@@ -146,9 +149,200 @@ static void compress_test_random_comp_decomp() {
|
|||||||
compress_free(comp);
|
compress_free(comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t hs_unpacker_file_read(void* context, uint8_t* buffer, size_t size) {
|
||||||
|
File* file = (File*)context;
|
||||||
|
return storage_file_read(file, buffer, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t hs_unpacker_file_write(void* context, uint8_t* buffer, size_t size) {
|
||||||
|
File* file = (File*)context;
|
||||||
|
return storage_file_write(file, buffer, size);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
Source file was generated with:
|
||||||
|
```python3
|
||||||
|
import random, string
|
||||||
|
random.seed(1337)
|
||||||
|
with open("hsstream.out.bin", "wb") as f:
|
||||||
|
for c in random.choices(string.printable, k=1024):
|
||||||
|
for _ in range(random.randint(1, 10)):
|
||||||
|
f.write(c.encode())
|
||||||
|
```
|
||||||
|
|
||||||
|
It was compressed with heatshrink using the following command:
|
||||||
|
`python3 -m heatshrink2 compress -w 9 -l 4 hsstream.out.bin hsstream.in.bin`
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define HSSTREAM_IN COMPRESS_UNIT_TESTS_PATH("hsstream.in.bin")
|
||||||
|
#define HSSTREAM_OUT COMPRESS_UNIT_TESTS_PATH("hsstream.out.bin")
|
||||||
|
|
||||||
|
static void compress_test_heatshrink_stream() {
|
||||||
|
Storage* api = furi_record_open(RECORD_STORAGE);
|
||||||
|
File* comp_file = storage_file_alloc(api);
|
||||||
|
File* dest_file = storage_file_alloc(api);
|
||||||
|
|
||||||
|
CompressConfigHeatshrink config = {
|
||||||
|
.window_sz2 = 9,
|
||||||
|
.lookahead_sz2 = 4,
|
||||||
|
.input_buffer_sz = 128,
|
||||||
|
};
|
||||||
|
Compress* compress = compress_alloc(CompressTypeHeatshrink, &config);
|
||||||
|
|
||||||
|
do {
|
||||||
|
storage_simply_remove(api, HSSTREAM_OUT);
|
||||||
|
|
||||||
|
mu_assert(
|
||||||
|
storage_file_open(comp_file, HSSTREAM_IN, FSAM_READ, FSOM_OPEN_EXISTING),
|
||||||
|
"Failed to open compressed file");
|
||||||
|
|
||||||
|
mu_assert(
|
||||||
|
storage_file_open(dest_file, HSSTREAM_OUT, FSAM_WRITE, FSOM_OPEN_ALWAYS),
|
||||||
|
"Failed to open decompressed file");
|
||||||
|
|
||||||
|
mu_assert(
|
||||||
|
compress_decode_streamed(
|
||||||
|
compress, hs_unpacker_file_read, comp_file, hs_unpacker_file_write, dest_file),
|
||||||
|
"Decompression failed");
|
||||||
|
|
||||||
|
storage_file_close(dest_file);
|
||||||
|
|
||||||
|
unsigned char md5[16];
|
||||||
|
FS_Error file_error;
|
||||||
|
mu_assert(
|
||||||
|
md5_calc_file(dest_file, HSSTREAM_OUT, md5, &file_error), "Failed to calculate md5");
|
||||||
|
|
||||||
|
const unsigned char expected_md5[16] = {
|
||||||
|
0xa3,
|
||||||
|
0x70,
|
||||||
|
0xe8,
|
||||||
|
0x8b,
|
||||||
|
0xa9,
|
||||||
|
0x42,
|
||||||
|
0x74,
|
||||||
|
0xf4,
|
||||||
|
0xaa,
|
||||||
|
0x12,
|
||||||
|
0x8d,
|
||||||
|
0x41,
|
||||||
|
0xd2,
|
||||||
|
0xb6,
|
||||||
|
0x71,
|
||||||
|
0xc9};
|
||||||
|
mu_assert(memcmp(md5, expected_md5, sizeof(md5)) == 0, "MD5 mismatch after decompression");
|
||||||
|
|
||||||
|
storage_simply_remove(api, HSSTREAM_OUT);
|
||||||
|
} while(false);
|
||||||
|
|
||||||
|
compress_free(compress);
|
||||||
|
storage_file_free(comp_file);
|
||||||
|
storage_file_free(dest_file);
|
||||||
|
furi_record_close(RECORD_STORAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define HS_TAR_PATH COMPRESS_UNIT_TESTS_PATH("test.ths")
|
||||||
|
#define HS_TAR_EXTRACT_PATH COMPRESS_UNIT_TESTS_PATH("tar_out")
|
||||||
|
|
||||||
|
static bool file_counter(const char* name, bool is_dir, void* context) {
|
||||||
|
UNUSED(name);
|
||||||
|
UNUSED(is_dir);
|
||||||
|
int32_t* n_entries = (int32_t*)context;
|
||||||
|
(*n_entries)++;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Heatshrink tar file contents and MD5 sums:
|
||||||
|
file1.txt: 64295676ceed5cce2d0dcac402e4bda4
|
||||||
|
file2.txt: 188f67f297eedd7bf3d6a4d3c2fc31c4
|
||||||
|
dir/file3.txt: 34d98ad8135ffe502dba374690136d16
|
||||||
|
dir/big_file.txt: ee169c1e1791a4d319dbfaefaa850e98
|
||||||
|
dir/nested_dir/file4.txt: e099fcb2aaa0672375eaedc549247ee6
|
||||||
|
dir/nested_dir/empty_file.txt: d41d8cd98f00b204e9800998ecf8427e
|
||||||
|
|
||||||
|
XOR of all MD5 sums: 92ed5729786d0e1176d047e35f52d376
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void compress_test_heatshrink_tar() {
|
||||||
|
Storage* api = furi_record_open(RECORD_STORAGE);
|
||||||
|
|
||||||
|
TarArchive* archive = tar_archive_alloc(api);
|
||||||
|
FuriString* path = furi_string_alloc();
|
||||||
|
FileInfo fileinfo;
|
||||||
|
File* file = storage_file_alloc(api);
|
||||||
|
|
||||||
|
do {
|
||||||
|
storage_simply_remove_recursive(api, HS_TAR_EXTRACT_PATH);
|
||||||
|
|
||||||
|
mu_assert(storage_simply_mkdir(api, HS_TAR_EXTRACT_PATH), "Failed to create extract dir");
|
||||||
|
|
||||||
|
mu_assert(
|
||||||
|
tar_archive_get_mode_for_path(HS_TAR_PATH) == TarOpenModeReadHeatshrink,
|
||||||
|
"Invalid mode for heatshrink tar");
|
||||||
|
|
||||||
|
mu_assert(
|
||||||
|
tar_archive_open(archive, HS_TAR_PATH, TarOpenModeReadHeatshrink),
|
||||||
|
"Failed to open heatshrink tar");
|
||||||
|
|
||||||
|
int32_t n_entries = 0;
|
||||||
|
tar_archive_set_file_callback(archive, file_counter, &n_entries);
|
||||||
|
|
||||||
|
mu_assert(
|
||||||
|
tar_archive_unpack_to(archive, HS_TAR_EXTRACT_PATH, NULL),
|
||||||
|
"Failed to unpack heatshrink tar");
|
||||||
|
|
||||||
|
mu_assert(n_entries == 9, "Invalid number of entries in heatshrink tar");
|
||||||
|
|
||||||
|
uint8_t md5_total[16] = {0}, md5_file[16];
|
||||||
|
|
||||||
|
DirWalk* dir_walk = dir_walk_alloc(api);
|
||||||
|
mu_assert(dir_walk_open(dir_walk, HS_TAR_EXTRACT_PATH), "Failed to open dirwalk");
|
||||||
|
while(dir_walk_read(dir_walk, path, &fileinfo) == DirWalkOK) {
|
||||||
|
if(file_info_is_dir(&fileinfo)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
mu_assert(
|
||||||
|
md5_calc_file(file, furi_string_get_cstr(path), md5_file, NULL),
|
||||||
|
"Failed to calc md5");
|
||||||
|
|
||||||
|
for(size_t i = 0; i < 16; i++) {
|
||||||
|
md5_total[i] ^= md5_file[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dir_walk_free(dir_walk);
|
||||||
|
|
||||||
|
static const unsigned char expected_md5[16] = {
|
||||||
|
0x92,
|
||||||
|
0xed,
|
||||||
|
0x57,
|
||||||
|
0x29,
|
||||||
|
0x78,
|
||||||
|
0x6d,
|
||||||
|
0x0e,
|
||||||
|
0x11,
|
||||||
|
0x76,
|
||||||
|
0xd0,
|
||||||
|
0x47,
|
||||||
|
0xe3,
|
||||||
|
0x5f,
|
||||||
|
0x52,
|
||||||
|
0xd3,
|
||||||
|
0x76};
|
||||||
|
mu_assert(memcmp(md5_total, expected_md5, sizeof(md5_total)) == 0, "MD5 mismatch");
|
||||||
|
|
||||||
|
storage_simply_remove_recursive(api, HS_TAR_EXTRACT_PATH);
|
||||||
|
} while(false);
|
||||||
|
|
||||||
|
storage_file_free(file);
|
||||||
|
furi_string_free(path);
|
||||||
|
tar_archive_free(archive);
|
||||||
|
furi_record_close(RECORD_STORAGE);
|
||||||
|
}
|
||||||
|
|
||||||
MU_TEST_SUITE(test_compress) {
|
MU_TEST_SUITE(test_compress) {
|
||||||
MU_RUN_TEST(compress_test_random_comp_decomp);
|
MU_RUN_TEST(compress_test_random_comp_decomp);
|
||||||
MU_RUN_TEST(compress_test_reference_comp_decomp);
|
MU_RUN_TEST(compress_test_reference_comp_decomp);
|
||||||
|
MU_RUN_TEST(compress_test_heatshrink_stream);
|
||||||
|
MU_RUN_TEST(compress_test_heatshrink_tar);
|
||||||
}
|
}
|
||||||
|
|
||||||
int run_minunit_test_compress(void) {
|
int run_minunit_test_compress(void) {
|
||||||
|
|||||||
@@ -700,21 +700,21 @@ static void rpc_system_storage_tar_extract_process(const PB_Main* request, void*
|
|||||||
TarArchive* archive = tar_archive_alloc(rpc_storage->api);
|
TarArchive* archive = tar_archive_alloc(rpc_storage->api);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if(!path_contains_only_ascii(request->content.storage_tar_extract_request.out_path)) {
|
const char *tar_path = request->content.storage_tar_extract_request.tar_path,
|
||||||
|
*out_path = request->content.storage_tar_extract_request.out_path;
|
||||||
|
if(!path_contains_only_ascii(out_path)) {
|
||||||
status = PB_CommandStatus_ERROR_STORAGE_INVALID_NAME;
|
status = PB_CommandStatus_ERROR_STORAGE_INVALID_NAME;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!tar_archive_open(
|
TarOpenMode tar_mode = tar_archive_get_mode_for_path(tar_path);
|
||||||
archive,
|
|
||||||
request->content.storage_tar_extract_request.tar_path,
|
if(!tar_archive_open(archive, tar_path, tar_mode)) {
|
||||||
TAR_OPEN_MODE_READ)) {
|
|
||||||
status = PB_CommandStatus_ERROR_STORAGE_INVALID_PARAMETER;
|
status = PB_CommandStatus_ERROR_STORAGE_INVALID_PARAMETER;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!tar_archive_unpack_to(
|
if(!tar_archive_unpack_to(archive, out_path, NULL)) {
|
||||||
archive, request->content.storage_tar_extract_request.out_path, NULL)) {
|
|
||||||
status = PB_CommandStatus_ERROR_STORAGE_INTERNAL;
|
status = PB_CommandStatus_ERROR_STORAGE_INTERNAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
#include <lib/toolbox/args.h>
|
#include <lib/toolbox/args.h>
|
||||||
#include <lib/toolbox/md5_calc.h>
|
#include <lib/toolbox/md5_calc.h>
|
||||||
#include <lib/toolbox/dir_walk.h>
|
#include <lib/toolbox/dir_walk.h>
|
||||||
|
#include <lib/toolbox/tar/tar_archive.h>
|
||||||
#include <storage/storage.h>
|
#include <storage/storage.h>
|
||||||
#include <storage/storage_sd_api.h>
|
#include <storage/storage_sd_api.h>
|
||||||
#include <power/power_service/power.h>
|
#include <power/power_service/power.h>
|
||||||
@@ -33,6 +34,7 @@ static void storage_cli_print_usage(void) {
|
|||||||
printf("\tmd5\t - md5 hash of the file\r\n");
|
printf("\tmd5\t - md5 hash of the file\r\n");
|
||||||
printf("\tstat\t - info about file or dir\r\n");
|
printf("\tstat\t - info about file or dir\r\n");
|
||||||
printf("\ttimestamp\t - last modification timestamp\r\n");
|
printf("\ttimestamp\t - last modification timestamp\r\n");
|
||||||
|
printf("\textract\t - extract tar archive to destination\r\n");
|
||||||
};
|
};
|
||||||
|
|
||||||
static void storage_cli_print_error(FS_Error error) {
|
static void storage_cli_print_error(FS_Error error) {
|
||||||
@@ -496,6 +498,47 @@ static void storage_cli_md5(Cli* cli, FuriString* path) {
|
|||||||
furi_record_close(RECORD_STORAGE);
|
furi_record_close(RECORD_STORAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool tar_extract_file_callback(const char* name, bool is_directory, void* context) {
|
||||||
|
UNUSED(context);
|
||||||
|
printf("\t%s %s\r\n", is_directory ? "D" : "F", name);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void storage_cli_extract(Cli* cli, FuriString* old_path, FuriString* args) {
|
||||||
|
UNUSED(cli);
|
||||||
|
FuriString* new_path = furi_string_alloc();
|
||||||
|
|
||||||
|
if(!args_read_probably_quoted_string_and_trim(args, new_path)) {
|
||||||
|
storage_cli_print_usage();
|
||||||
|
furi_string_free(new_path);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Storage* api = furi_record_open(RECORD_STORAGE);
|
||||||
|
|
||||||
|
TarArchive* archive = tar_archive_alloc(api);
|
||||||
|
TarOpenMode tar_mode = tar_archive_get_mode_for_path(furi_string_get_cstr(old_path));
|
||||||
|
do {
|
||||||
|
if(!tar_archive_open(archive, furi_string_get_cstr(old_path), tar_mode)) {
|
||||||
|
printf("Failed to open archive\r\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
uint32_t start_tick = furi_get_tick();
|
||||||
|
tar_archive_set_file_callback(archive, tar_extract_file_callback, NULL);
|
||||||
|
printf("Unpacking to %s\r\n", furi_string_get_cstr(new_path));
|
||||||
|
bool success = tar_archive_unpack_to(archive, furi_string_get_cstr(new_path), NULL);
|
||||||
|
uint32_t end_tick = furi_get_tick();
|
||||||
|
printf(
|
||||||
|
"Decompression %s in %lu ticks \r\n",
|
||||||
|
success ? "success" : "failed",
|
||||||
|
end_tick - start_tick);
|
||||||
|
} while(false);
|
||||||
|
|
||||||
|
tar_archive_free(archive);
|
||||||
|
furi_string_free(new_path);
|
||||||
|
furi_record_close(RECORD_STORAGE);
|
||||||
|
}
|
||||||
|
|
||||||
void storage_cli(Cli* cli, FuriString* args, void* context) {
|
void storage_cli(Cli* cli, FuriString* args, void* context) {
|
||||||
UNUSED(context);
|
UNUSED(context);
|
||||||
FuriString* cmd;
|
FuriString* cmd;
|
||||||
@@ -589,6 +632,11 @@ void storage_cli(Cli* cli, FuriString* args, void* context) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(furi_string_cmp_str(cmd, "extract") == 0) {
|
||||||
|
storage_cli_extract(cli, path, args);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
storage_cli_print_usage();
|
storage_cli_print_usage();
|
||||||
} while(false);
|
} while(false);
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ FS_Error storage_int_backup(Storage* storage, const char* dstname) {
|
|||||||
furi_check(storage);
|
furi_check(storage);
|
||||||
|
|
||||||
TarArchive* archive = tar_archive_alloc(storage);
|
TarArchive* archive = tar_archive_alloc(storage);
|
||||||
bool success = tar_archive_open(archive, dstname, TAR_OPEN_MODE_WRITE) &&
|
bool success = tar_archive_open(archive, dstname, TarOpenModeWrite) &&
|
||||||
tar_archive_add_dir(archive, STORAGE_INT_PATH_PREFIX, "") &&
|
tar_archive_add_dir(archive, STORAGE_INT_PATH_PREFIX, "") &&
|
||||||
tar_archive_finalize(archive);
|
tar_archive_finalize(archive);
|
||||||
tar_archive_free(archive);
|
tar_archive_free(archive);
|
||||||
@@ -18,7 +18,7 @@ FS_Error
|
|||||||
furi_check(storage);
|
furi_check(storage);
|
||||||
|
|
||||||
TarArchive* archive = tar_archive_alloc(storage);
|
TarArchive* archive = tar_archive_alloc(storage);
|
||||||
bool success = tar_archive_open(archive, srcname, TAR_OPEN_MODE_READ) &&
|
bool success = tar_archive_open(archive, srcname, TarOpenModeRead) &&
|
||||||
tar_archive_unpack_to(archive, STORAGE_INT_PATH_PREFIX, converter);
|
tar_archive_unpack_to(archive, STORAGE_INT_PATH_PREFIX, converter);
|
||||||
tar_archive_free(archive);
|
tar_archive_free(archive);
|
||||||
return success ? FSE_OK : FSE_INTERNAL;
|
return success ? FSE_OK : FSE_INTERNAL;
|
||||||
|
|||||||
@@ -9,6 +9,8 @@
|
|||||||
#include <update_util/lfs_backup.h>
|
#include <update_util/lfs_backup.h>
|
||||||
#include <update_util/update_operation.h>
|
#include <update_util/update_operation.h>
|
||||||
|
|
||||||
|
#define TAG "UpdWorker"
|
||||||
|
|
||||||
static const char* update_task_stage_descr[] = {
|
static const char* update_task_stage_descr[] = {
|
||||||
[UpdateTaskStageProgress] = "...",
|
[UpdateTaskStageProgress] = "...",
|
||||||
[UpdateTaskStageReadManifest] = "Loading update manifest",
|
[UpdateTaskStageReadManifest] = "Loading update manifest",
|
||||||
@@ -23,7 +25,9 @@ static const char* update_task_stage_descr[] = {
|
|||||||
[UpdateTaskStageOBValidation] = "Validating opt. bytes",
|
[UpdateTaskStageOBValidation] = "Validating opt. bytes",
|
||||||
[UpdateTaskStageLfsBackup] = "Backing up LFS",
|
[UpdateTaskStageLfsBackup] = "Backing up LFS",
|
||||||
[UpdateTaskStageLfsRestore] = "Restoring LFS",
|
[UpdateTaskStageLfsRestore] = "Restoring LFS",
|
||||||
[UpdateTaskStageResourcesUpdate] = "Updating resources",
|
[UpdateTaskStageResourcesFileCleanup] = "Cleaning up files",
|
||||||
|
[UpdateTaskStageResourcesDirCleanup] = "Cleaning up directories",
|
||||||
|
[UpdateTaskStageResourcesFileUnpack] = "Extracting resources",
|
||||||
[UpdateTaskStageSplashscreenInstall] = "Installing splashscreen",
|
[UpdateTaskStageSplashscreenInstall] = "Installing splashscreen",
|
||||||
[UpdateTaskStageCompleted] = "Restarting...",
|
[UpdateTaskStageCompleted] = "Restarting...",
|
||||||
[UpdateTaskStageError] = "Error",
|
[UpdateTaskStageError] = "Error",
|
||||||
@@ -196,7 +200,19 @@ static const struct {
|
|||||||
.descr = "LFS I/O error",
|
.descr = "LFS I/O error",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.stage = UpdateTaskStageResourcesUpdate,
|
.stage = UpdateTaskStageResourcesFileCleanup,
|
||||||
|
.percent_min = 0,
|
||||||
|
.percent_max = 100,
|
||||||
|
.descr = "SD card I/O error",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.stage = UpdateTaskStageResourcesDirCleanup,
|
||||||
|
.percent_min = 0,
|
||||||
|
.percent_max = 100,
|
||||||
|
.descr = "SD card I/O error",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.stage = UpdateTaskStageResourcesFileUnpack,
|
||||||
.percent_min = 0,
|
.percent_min = 0,
|
||||||
.percent_max = 100,
|
.percent_max = 100,
|
||||||
.descr = "SD card I/O error",
|
.descr = "SD card I/O error",
|
||||||
@@ -230,20 +246,22 @@ static const UpdateTaskStageGroupMap update_task_stage_progress[] = {
|
|||||||
[UpdateTaskStageLfsBackup] = STAGE_DEF(UpdateTaskStageGroupPreUpdate, 5),
|
[UpdateTaskStageLfsBackup] = STAGE_DEF(UpdateTaskStageGroupPreUpdate, 5),
|
||||||
|
|
||||||
[UpdateTaskStageRadioImageValidate] = STAGE_DEF(UpdateTaskStageGroupRadio, 15),
|
[UpdateTaskStageRadioImageValidate] = STAGE_DEF(UpdateTaskStageGroupRadio, 15),
|
||||||
[UpdateTaskStageRadioErase] = STAGE_DEF(UpdateTaskStageGroupRadio, 35),
|
[UpdateTaskStageRadioErase] = STAGE_DEF(UpdateTaskStageGroupRadio, 25),
|
||||||
[UpdateTaskStageRadioWrite] = STAGE_DEF(UpdateTaskStageGroupRadio, 60),
|
[UpdateTaskStageRadioWrite] = STAGE_DEF(UpdateTaskStageGroupRadio, 40),
|
||||||
[UpdateTaskStageRadioInstall] = STAGE_DEF(UpdateTaskStageGroupRadio, 30),
|
[UpdateTaskStageRadioInstall] = STAGE_DEF(UpdateTaskStageGroupRadio, 30),
|
||||||
[UpdateTaskStageRadioBusy] = STAGE_DEF(UpdateTaskStageGroupRadio, 5),
|
[UpdateTaskStageRadioBusy] = STAGE_DEF(UpdateTaskStageGroupRadio, 5),
|
||||||
|
|
||||||
[UpdateTaskStageOBValidation] = STAGE_DEF(UpdateTaskStageGroupOptionBytes, 2),
|
[UpdateTaskStageOBValidation] = STAGE_DEF(UpdateTaskStageGroupOptionBytes, 2),
|
||||||
|
|
||||||
[UpdateTaskStageValidateDFUImage] = STAGE_DEF(UpdateTaskStageGroupFirmware, 30),
|
[UpdateTaskStageValidateDFUImage] = STAGE_DEF(UpdateTaskStageGroupFirmware, 33),
|
||||||
[UpdateTaskStageFlashWrite] = STAGE_DEF(UpdateTaskStageGroupFirmware, 150),
|
[UpdateTaskStageFlashWrite] = STAGE_DEF(UpdateTaskStageGroupFirmware, 100),
|
||||||
[UpdateTaskStageFlashValidate] = STAGE_DEF(UpdateTaskStageGroupFirmware, 15),
|
[UpdateTaskStageFlashValidate] = STAGE_DEF(UpdateTaskStageGroupFirmware, 20),
|
||||||
|
|
||||||
[UpdateTaskStageLfsRestore] = STAGE_DEF(UpdateTaskStageGroupPostUpdate, 5),
|
[UpdateTaskStageLfsRestore] = STAGE_DEF(UpdateTaskStageGroupPostUpdate, 5),
|
||||||
|
|
||||||
[UpdateTaskStageResourcesUpdate] = STAGE_DEF(UpdateTaskStageGroupResources, 255),
|
[UpdateTaskStageResourcesFileCleanup] = STAGE_DEF(UpdateTaskStageGroupResources, 100),
|
||||||
|
[UpdateTaskStageResourcesDirCleanup] = STAGE_DEF(UpdateTaskStageGroupResources, 50),
|
||||||
|
[UpdateTaskStageResourcesFileUnpack] = STAGE_DEF(UpdateTaskStageGroupResources, 255),
|
||||||
[UpdateTaskStageSplashscreenInstall] = STAGE_DEF(UpdateTaskStageGroupSplashscreen, 5),
|
[UpdateTaskStageSplashscreenInstall] = STAGE_DEF(UpdateTaskStageGroupSplashscreen, 5),
|
||||||
|
|
||||||
[UpdateTaskStageCompleted] = STAGE_DEF(UpdateTaskStageGroupMisc, 1),
|
[UpdateTaskStageCompleted] = STAGE_DEF(UpdateTaskStageGroupMisc, 1),
|
||||||
@@ -288,6 +306,7 @@ static void update_task_calc_completed_stages(UpdateTask* update_task) {
|
|||||||
|
|
||||||
void update_task_set_progress(UpdateTask* update_task, UpdateTaskStage stage, uint8_t progress) {
|
void update_task_set_progress(UpdateTask* update_task, UpdateTaskStage stage, uint8_t progress) {
|
||||||
if(stage != UpdateTaskStageProgress) {
|
if(stage != UpdateTaskStageProgress) {
|
||||||
|
FURI_LOG_I(TAG, "Stage %d, progress %d", stage, progress);
|
||||||
/* do not override more specific error states */
|
/* do not override more specific error states */
|
||||||
if((stage >= UpdateTaskStageError) && (update_task->state.stage >= UpdateTaskStageError)) {
|
if((stage >= UpdateTaskStageError) && (update_task->state.stage >= UpdateTaskStageError)) {
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -31,7 +31,9 @@ typedef enum {
|
|||||||
UpdateTaskStageFlashValidate,
|
UpdateTaskStageFlashValidate,
|
||||||
|
|
||||||
UpdateTaskStageLfsRestore,
|
UpdateTaskStageLfsRestore,
|
||||||
UpdateTaskStageResourcesUpdate,
|
UpdateTaskStageResourcesFileCleanup,
|
||||||
|
UpdateTaskStageResourcesDirCleanup,
|
||||||
|
UpdateTaskStageResourcesFileUnpack,
|
||||||
UpdateTaskStageSplashscreenInstall,
|
UpdateTaskStageSplashscreenInstall,
|
||||||
|
|
||||||
UpdateTaskStageCompleted,
|
UpdateTaskStageCompleted,
|
||||||
|
|||||||
@@ -35,36 +35,23 @@ static bool update_task_pre_update(UpdateTask* update_task) {
|
|||||||
furi_string_free(backup_file_path);
|
furi_string_free(backup_file_path);
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
UpdateTaskResourcesWeightsFileCleanup = 20,
|
|
||||||
UpdateTaskResourcesWeightsDirCleanup = 20,
|
|
||||||
UpdateTaskResourcesWeightsFileUnpack = 60,
|
|
||||||
} UpdateTaskResourcesWeights;
|
|
||||||
|
|
||||||
#define UPDATE_TASK_RESOURCES_FILE_TO_TOTAL_PERCENT 90
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
UpdateTask* update_task;
|
UpdateTask* update_task;
|
||||||
int32_t total_files, processed_files;
|
TarArchive* archive;
|
||||||
} TarUnpackProgress;
|
} TarUnpackProgress;
|
||||||
|
|
||||||
static bool update_task_resource_unpack_cb(const char* name, bool is_directory, void* context) {
|
static bool update_task_resource_unpack_cb(const char* name, bool is_directory, void* context) {
|
||||||
UNUSED(name);
|
UNUSED(name);
|
||||||
UNUSED(is_directory);
|
UNUSED(is_directory);
|
||||||
TarUnpackProgress* unpack_progress = context;
|
TarUnpackProgress* unpack_progress = context;
|
||||||
unpack_progress->processed_files++;
|
int32_t progress = 0, total = 0;
|
||||||
|
tar_archive_get_read_progress(unpack_progress->archive, &progress, &total);
|
||||||
update_task_set_progress(
|
update_task_set_progress(
|
||||||
unpack_progress->update_task,
|
unpack_progress->update_task, UpdateTaskStageProgress, (progress * 100) / (total + 1));
|
||||||
UpdateTaskStageProgress,
|
|
||||||
/* For this stage, last progress segment = extraction */
|
|
||||||
(UpdateTaskResourcesWeightsFileCleanup + UpdateTaskResourcesWeightsDirCleanup) +
|
|
||||||
(unpack_progress->processed_files * UpdateTaskResourcesWeightsFileUnpack) /
|
|
||||||
(unpack_progress->total_files + 1));
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_task_cleanup_resources(UpdateTask* update_task, const uint32_t n_tar_entries) {
|
static void update_task_cleanup_resources(UpdateTask* update_task) {
|
||||||
ResourceManifestReader* manifest_reader = resource_manifest_reader_alloc(update_task->storage);
|
ResourceManifestReader* manifest_reader = resource_manifest_reader_alloc(update_task->storage);
|
||||||
do {
|
do {
|
||||||
FURI_LOG_D(TAG, "Cleaning up old manifest");
|
FURI_LOG_D(TAG, "Cleaning up old manifest");
|
||||||
@@ -73,20 +60,26 @@ static void update_task_cleanup_resources(UpdateTask* update_task, const uint32_
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
const uint32_t n_approx_file_entries =
|
|
||||||
n_tar_entries * UPDATE_TASK_RESOURCES_FILE_TO_TOTAL_PERCENT / 100 + 1;
|
|
||||||
uint32_t n_dir_entries = 1;
|
|
||||||
|
|
||||||
ResourceManifestEntry* entry_ptr = NULL;
|
ResourceManifestEntry* entry_ptr = NULL;
|
||||||
uint32_t n_processed_entries = 0;
|
/* Iterate over manifest and calculate entries count */
|
||||||
|
uint32_t n_file_entries = 1, n_dir_entries = 1;
|
||||||
|
while((entry_ptr = resource_manifest_reader_next(manifest_reader))) {
|
||||||
|
if(entry_ptr->type == ResourceManifestEntryTypeFile) {
|
||||||
|
n_file_entries++;
|
||||||
|
} else if(entry_ptr->type == ResourceManifestEntryTypeDirectory) {
|
||||||
|
n_dir_entries++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resource_manifest_rewind(manifest_reader);
|
||||||
|
|
||||||
|
update_task_set_progress(update_task, UpdateTaskStageResourcesFileCleanup, 0);
|
||||||
|
uint32_t n_processed_file_entries = 0;
|
||||||
while((entry_ptr = resource_manifest_reader_next(manifest_reader))) {
|
while((entry_ptr = resource_manifest_reader_next(manifest_reader))) {
|
||||||
if(entry_ptr->type == ResourceManifestEntryTypeFile) {
|
if(entry_ptr->type == ResourceManifestEntryTypeFile) {
|
||||||
update_task_set_progress(
|
update_task_set_progress(
|
||||||
update_task,
|
update_task,
|
||||||
UpdateTaskStageProgress,
|
UpdateTaskStageProgress,
|
||||||
/* For this stage, first pass = old manifest's file cleanup */
|
(n_processed_file_entries++ * 100) / n_file_entries);
|
||||||
(n_processed_entries++ * UpdateTaskResourcesWeightsFileCleanup) /
|
|
||||||
n_approx_file_entries);
|
|
||||||
|
|
||||||
FuriString* file_path = furi_string_alloc();
|
FuriString* file_path = furi_string_alloc();
|
||||||
path_concat(
|
path_concat(
|
||||||
@@ -108,16 +101,14 @@ static void update_task_cleanup_resources(UpdateTask* update_task, const uint32_
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n_processed_entries = 0;
|
update_task_set_progress(update_task, UpdateTaskStageResourcesDirCleanup, 0);
|
||||||
|
uint32_t n_processed_dir_entries = 0;
|
||||||
while((entry_ptr = resource_manifest_reader_previous(manifest_reader))) {
|
while((entry_ptr = resource_manifest_reader_previous(manifest_reader))) {
|
||||||
if(entry_ptr->type == ResourceManifestEntryTypeDirectory) {
|
if(entry_ptr->type == ResourceManifestEntryTypeDirectory) {
|
||||||
update_task_set_progress(
|
update_task_set_progress(
|
||||||
update_task,
|
update_task,
|
||||||
UpdateTaskStageProgress,
|
UpdateTaskStageProgress,
|
||||||
/* For this stage, second 10% of progress = cleanup directories */
|
(n_processed_dir_entries++ * 100) / n_dir_entries);
|
||||||
UpdateTaskResourcesWeightsFileCleanup +
|
|
||||||
(n_processed_entries++ * UpdateTaskResourcesWeightsDirCleanup) /
|
|
||||||
n_dir_entries);
|
|
||||||
|
|
||||||
FuriString* folder_path = furi_string_alloc();
|
FuriString* folder_path = furi_string_alloc();
|
||||||
|
|
||||||
@@ -166,26 +157,22 @@ static bool update_task_post_update(UpdateTask* update_task) {
|
|||||||
if(update_task->state.groups & UpdateTaskStageGroupResources) {
|
if(update_task->state.groups & UpdateTaskStageGroupResources) {
|
||||||
TarUnpackProgress progress = {
|
TarUnpackProgress progress = {
|
||||||
.update_task = update_task,
|
.update_task = update_task,
|
||||||
.total_files = 0,
|
.archive = archive,
|
||||||
.processed_files = 0,
|
|
||||||
};
|
};
|
||||||
update_task_set_progress(update_task, UpdateTaskStageResourcesUpdate, 0);
|
|
||||||
|
|
||||||
path_concat(
|
path_concat(
|
||||||
furi_string_get_cstr(update_task->update_path),
|
furi_string_get_cstr(update_task->update_path),
|
||||||
furi_string_get_cstr(update_task->manifest->resource_bundle),
|
furi_string_get_cstr(update_task->manifest->resource_bundle),
|
||||||
file_path);
|
file_path);
|
||||||
|
|
||||||
|
CHECK_RESULT(tar_archive_open(
|
||||||
|
archive, furi_string_get_cstr(file_path), TarOpenModeReadHeatshrink));
|
||||||
|
|
||||||
|
update_task_cleanup_resources(update_task);
|
||||||
|
|
||||||
|
update_task_set_progress(update_task, UpdateTaskStageResourcesFileUnpack, 0);
|
||||||
tar_archive_set_file_callback(archive, update_task_resource_unpack_cb, &progress);
|
tar_archive_set_file_callback(archive, update_task_resource_unpack_cb, &progress);
|
||||||
CHECK_RESULT(
|
CHECK_RESULT(tar_archive_unpack_to(archive, STORAGE_EXT_PATH_PREFIX, NULL));
|
||||||
tar_archive_open(archive, furi_string_get_cstr(file_path), TAR_OPEN_MODE_READ));
|
|
||||||
|
|
||||||
progress.total_files = tar_archive_get_entries_count(archive);
|
|
||||||
if(progress.total_files > 0) {
|
|
||||||
update_task_cleanup_resources(update_task, progress.total_files);
|
|
||||||
|
|
||||||
CHECK_RESULT(tar_archive_unpack_to(archive, STORAGE_EXT_PATH_PREFIX, NULL));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if(update_task->state.groups & UpdateTaskStageGroupSplashscreen) {
|
if(update_task->state.groups & UpdateTaskStageGroupSplashscreen) {
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ Even if something goes wrong, updater allows you to retry failed operations and
|
|||||||
| Writing flash | **10** | **0-100** | Block read/write error |
|
| Writing flash | **10** | **0-100** | Block read/write error |
|
||||||
| Validating flash | **11** | **0-100** | Block read/write error |
|
| Validating flash | **11** | **0-100** | Block read/write error |
|
||||||
| Restoring LFS | **12** | **0-100** | FS read/write error |
|
| Restoring LFS | **12** | **0-100** | FS read/write error |
|
||||||
| Updating resources | **13** | **0-100** | SD card read/write error |
|
| Updating resources | **13-15** | **0-100** | SD card read/write error |
|
||||||
|
|
||||||
## Building update packages
|
## Building update packages
|
||||||
|
|
||||||
|
|||||||
19
documentation/file_formats/TarHeatshrinkFormat.md
Normal file
19
documentation/file_formats/TarHeatshrinkFormat.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# Heatshrink-compressed Tarball Format
|
||||||
|
|
||||||
|
Flipper supports the use of Heatshrink compression library for .tar archives. This allows for smaller file sizes and faster OTA updates.
|
||||||
|
|
||||||
|
Heatshrink specification does not define a container format for storing compression parameters. This document describes the format used by Flipper to store Heatshrink-compressed data streams.
|
||||||
|
|
||||||
|
## Header
|
||||||
|
|
||||||
|
Header begins with a magic value, followed by a version number and compression parameters - window size and lookahead size.
|
||||||
|
|
||||||
|
Magic value consists of 4 bytes: `0x48 0x53 0x44 0x53` (ASCII "HSDS", HeatShrink DataStream).
|
||||||
|
|
||||||
|
Version number is a single byte, currently set to `0x01`.
|
||||||
|
|
||||||
|
Window size is a single byte, representing the size of the sliding window used by the compressor. It corresponds to `-w` parameter in Heatshrink CLI.
|
||||||
|
|
||||||
|
Lookahead size is a single byte, representing the size of the lookahead buffer used by the compressor. It corresponds to `-l` parameter in Heatshrink CLI.
|
||||||
|
|
||||||
|
Total header size is 7 bytes. Header is followed by compressed data.
|
||||||
@@ -13,9 +13,15 @@
|
|||||||
/** Defines encoder and decoder lookahead buffer size */
|
/** Defines encoder and decoder lookahead buffer size */
|
||||||
#define COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG (4u)
|
#define COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG (4u)
|
||||||
|
|
||||||
/** Buffer size for input data */
|
|
||||||
#define COMPRESS_ICON_ENCODED_BUFF_SIZE (256u)
|
#define COMPRESS_ICON_ENCODED_BUFF_SIZE (256u)
|
||||||
|
|
||||||
|
const CompressConfigHeatshrink compress_config_heatshrink_default = {
|
||||||
|
.window_sz2 = COMPRESS_EXP_BUFF_SIZE_LOG,
|
||||||
|
.lookahead_sz2 = COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG,
|
||||||
|
.input_buffer_sz = COMPRESS_ICON_ENCODED_BUFF_SIZE,
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Buffer size for input data */
|
||||||
static bool compress_decode_internal(
|
static bool compress_decode_internal(
|
||||||
heatshrink_decoder* decoder,
|
heatshrink_decoder* decoder,
|
||||||
const uint8_t* data_in,
|
const uint8_t* data_in,
|
||||||
@@ -83,16 +89,19 @@ void compress_icon_decode(CompressIcon* instance, const uint8_t* icon_data, uint
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct Compress {
|
struct Compress {
|
||||||
|
const void* config;
|
||||||
heatshrink_encoder* encoder;
|
heatshrink_encoder* encoder;
|
||||||
heatshrink_decoder* decoder;
|
heatshrink_decoder* decoder;
|
||||||
};
|
};
|
||||||
|
|
||||||
Compress* compress_alloc(uint16_t compress_buff_size) {
|
Compress* compress_alloc(CompressType type, const void* config) {
|
||||||
|
furi_check(type == CompressTypeHeatshrink);
|
||||||
|
furi_check(config);
|
||||||
|
|
||||||
Compress* compress = malloc(sizeof(Compress));
|
Compress* compress = malloc(sizeof(Compress));
|
||||||
compress->encoder =
|
compress->config = config;
|
||||||
heatshrink_encoder_alloc(COMPRESS_EXP_BUFF_SIZE_LOG, COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG);
|
compress->encoder = NULL;
|
||||||
compress->decoder = heatshrink_decoder_alloc(
|
compress->decoder = NULL;
|
||||||
compress_buff_size, COMPRESS_EXP_BUFF_SIZE_LOG, COMPRESS_LOOKAHEAD_BUFF_SIZE_LOG);
|
|
||||||
|
|
||||||
return compress;
|
return compress;
|
||||||
}
|
}
|
||||||
@@ -100,8 +109,12 @@ Compress* compress_alloc(uint16_t compress_buff_size) {
|
|||||||
void compress_free(Compress* compress) {
|
void compress_free(Compress* compress) {
|
||||||
furi_check(compress);
|
furi_check(compress);
|
||||||
|
|
||||||
heatshrink_encoder_free(compress->encoder);
|
if(compress->encoder) {
|
||||||
heatshrink_decoder_free(compress->decoder);
|
heatshrink_encoder_free(compress->encoder);
|
||||||
|
}
|
||||||
|
if(compress->decoder) {
|
||||||
|
heatshrink_decoder_free(compress->decoder);
|
||||||
|
}
|
||||||
free(compress);
|
free(compress);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -125,6 +138,7 @@ static bool compress_encode_internal(
|
|||||||
size_t sunk = 0;
|
size_t sunk = 0;
|
||||||
size_t res_buff_size = sizeof(CompressHeader);
|
size_t res_buff_size = sizeof(CompressHeader);
|
||||||
|
|
||||||
|
heatshrink_encoder_reset(encoder);
|
||||||
/* Sink data to encoding buffer */
|
/* Sink data to encoding buffer */
|
||||||
while((sunk < data_in_size) && !encode_failed) {
|
while((sunk < data_in_size) && !encode_failed) {
|
||||||
sink_res =
|
sink_res =
|
||||||
@@ -179,10 +193,116 @@ static bool compress_encode_internal(
|
|||||||
*data_res_size = 0;
|
*data_res_size = 0;
|
||||||
result = false;
|
result = false;
|
||||||
}
|
}
|
||||||
heatshrink_encoder_reset(encoder);
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool compress_decoder_poll(
|
||||||
|
heatshrink_decoder* decoder,
|
||||||
|
uint8_t* decompressed_chunk,
|
||||||
|
size_t decomp_buffer_size,
|
||||||
|
CompressIoCallback write_cb,
|
||||||
|
void* write_context) {
|
||||||
|
HSD_poll_res poll_res;
|
||||||
|
size_t poll_size;
|
||||||
|
|
||||||
|
do {
|
||||||
|
poll_res =
|
||||||
|
heatshrink_decoder_poll(decoder, decompressed_chunk, decomp_buffer_size, &poll_size);
|
||||||
|
if(poll_res < 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t write_size = write_cb(write_context, decompressed_chunk, poll_size);
|
||||||
|
if(write_size != poll_size) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} while(poll_res == HSDR_POLL_MORE);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool compress_decode_stream_internal(
|
||||||
|
heatshrink_decoder* decoder,
|
||||||
|
const size_t work_buffer_size,
|
||||||
|
CompressIoCallback read_cb,
|
||||||
|
void* read_context,
|
||||||
|
CompressIoCallback write_cb,
|
||||||
|
void* write_context) {
|
||||||
|
bool decode_failed = false;
|
||||||
|
HSD_sink_res sink_res;
|
||||||
|
HSD_finish_res finish_res;
|
||||||
|
size_t read_size = 0;
|
||||||
|
size_t sink_size = 0;
|
||||||
|
|
||||||
|
uint8_t* compressed_chunk = malloc(work_buffer_size);
|
||||||
|
uint8_t* decompressed_chunk = malloc(work_buffer_size);
|
||||||
|
|
||||||
|
/* Sink data to decoding buffer */
|
||||||
|
do {
|
||||||
|
read_size = read_cb(read_context, compressed_chunk, work_buffer_size);
|
||||||
|
|
||||||
|
size_t sunk = 0;
|
||||||
|
while(sunk < read_size && !decode_failed) {
|
||||||
|
sink_res = heatshrink_decoder_sink(
|
||||||
|
decoder, &compressed_chunk[sunk], read_size - sunk, &sink_size);
|
||||||
|
if(sink_res < 0) {
|
||||||
|
decode_failed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sunk += sink_size;
|
||||||
|
|
||||||
|
if(!compress_decoder_poll(
|
||||||
|
decoder, decompressed_chunk, work_buffer_size, write_cb, write_context)) {
|
||||||
|
decode_failed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} while(!decode_failed && read_size);
|
||||||
|
|
||||||
|
/* Notify sinking complete and poll decoded data */
|
||||||
|
if(!decode_failed) {
|
||||||
|
while((finish_res = heatshrink_decoder_finish(decoder)) != HSDR_FINISH_DONE) {
|
||||||
|
if(finish_res < 0) {
|
||||||
|
decode_failed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!compress_decoder_poll(
|
||||||
|
decoder, decompressed_chunk, work_buffer_size, write_cb, write_context)) {
|
||||||
|
decode_failed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
free(compressed_chunk);
|
||||||
|
free(decompressed_chunk);
|
||||||
|
|
||||||
|
return !decode_failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
uint8_t* data_ptr;
|
||||||
|
size_t data_size;
|
||||||
|
bool is_source;
|
||||||
|
} MemoryStreamState;
|
||||||
|
|
||||||
|
static int32_t memory_stream_io_callback(void* context, uint8_t* ptr, size_t size) {
|
||||||
|
MemoryStreamState* state = (MemoryStreamState*)context;
|
||||||
|
|
||||||
|
if(size > state->data_size) {
|
||||||
|
size = state->data_size;
|
||||||
|
}
|
||||||
|
if(state->is_source) {
|
||||||
|
memcpy(ptr, state->data_ptr, size);
|
||||||
|
} else {
|
||||||
|
memcpy(state->data_ptr, ptr, size);
|
||||||
|
}
|
||||||
|
state->data_ptr += size;
|
||||||
|
state->data_size -= size;
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
static bool compress_decode_internal(
|
static bool compress_decode_internal(
|
||||||
heatshrink_decoder* decoder,
|
heatshrink_decoder* decoder,
|
||||||
const uint8_t* data_in,
|
const uint8_t* data_in,
|
||||||
@@ -196,59 +316,29 @@ static bool compress_decode_internal(
|
|||||||
furi_check(data_res_size);
|
furi_check(data_res_size);
|
||||||
|
|
||||||
bool result = false;
|
bool result = false;
|
||||||
bool decode_failed = false;
|
|
||||||
HSD_sink_res sink_res;
|
|
||||||
HSD_poll_res poll_res;
|
|
||||||
HSD_finish_res finish_res;
|
|
||||||
size_t sink_size = 0;
|
|
||||||
size_t res_buff_size = 0;
|
|
||||||
size_t poll_size = 0;
|
|
||||||
|
|
||||||
CompressHeader* header = (CompressHeader*)data_in;
|
CompressHeader* header = (CompressHeader*)data_in;
|
||||||
if(header->is_compressed) {
|
if(header->is_compressed) {
|
||||||
/* Sink data to decoding buffer */
|
MemoryStreamState compressed_context = {
|
||||||
size_t compressed_size = header->compressed_buff_size;
|
.data_ptr = (uint8_t*)&data_in[sizeof(CompressHeader)],
|
||||||
size_t sunk = 0;
|
.data_size = header->compressed_buff_size,
|
||||||
while(sunk < compressed_size && !decode_failed) {
|
.is_source = true,
|
||||||
sink_res = heatshrink_decoder_sink(
|
};
|
||||||
|
MemoryStreamState decompressed_context = {
|
||||||
|
.data_ptr = data_out,
|
||||||
|
.data_size = data_out_size,
|
||||||
|
.is_source = false,
|
||||||
|
};
|
||||||
|
heatshrink_decoder_reset(decoder);
|
||||||
|
if((result = compress_decode_stream_internal(
|
||||||
decoder,
|
decoder,
|
||||||
(uint8_t*)&data_in[sizeof(CompressHeader) + sunk],
|
COMPRESS_ICON_ENCODED_BUFF_SIZE,
|
||||||
compressed_size - sunk,
|
memory_stream_io_callback,
|
||||||
&sink_size);
|
&compressed_context,
|
||||||
if(sink_res < 0) {
|
memory_stream_io_callback,
|
||||||
decode_failed = true;
|
&decompressed_context))) {
|
||||||
break;
|
*data_res_size = data_out_size - decompressed_context.data_size;
|
||||||
}
|
|
||||||
sunk += sink_size;
|
|
||||||
do {
|
|
||||||
poll_res = heatshrink_decoder_poll(
|
|
||||||
decoder, &data_out[res_buff_size], data_out_size - res_buff_size, &poll_size);
|
|
||||||
if((poll_res < 0) || ((data_out_size - res_buff_size) == 0)) {
|
|
||||||
decode_failed = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
res_buff_size += poll_size;
|
|
||||||
} while(poll_res == HSDR_POLL_MORE);
|
|
||||||
}
|
}
|
||||||
/* Notify sinking complete and poll decoded data */
|
|
||||||
if(!decode_failed) {
|
|
||||||
finish_res = heatshrink_decoder_finish(decoder);
|
|
||||||
if(finish_res < 0) {
|
|
||||||
decode_failed = true;
|
|
||||||
} else {
|
|
||||||
do {
|
|
||||||
poll_res = heatshrink_decoder_poll(
|
|
||||||
decoder,
|
|
||||||
&data_out[res_buff_size],
|
|
||||||
data_out_size - res_buff_size,
|
|
||||||
&poll_size);
|
|
||||||
res_buff_size += poll_size;
|
|
||||||
finish_res = heatshrink_decoder_finish(decoder);
|
|
||||||
} while(finish_res != HSDR_FINISH_DONE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*data_res_size = res_buff_size;
|
|
||||||
result = !decode_failed;
|
|
||||||
} else if(data_out_size >= data_in_size - 1) {
|
} else if(data_out_size >= data_in_size - 1) {
|
||||||
memcpy(data_out, &data_in[1], data_in_size);
|
memcpy(data_out, &data_in[1], data_in_size);
|
||||||
*data_res_size = data_in_size - 1;
|
*data_res_size = data_in_size - 1;
|
||||||
@@ -257,7 +347,6 @@ static bool compress_decode_internal(
|
|||||||
/* Not enough space in output buffer */
|
/* Not enough space in output buffer */
|
||||||
result = false;
|
result = false;
|
||||||
}
|
}
|
||||||
heatshrink_decoder_reset(decoder);
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,6 +357,11 @@ bool compress_encode(
|
|||||||
uint8_t* data_out,
|
uint8_t* data_out,
|
||||||
size_t data_out_size,
|
size_t data_out_size,
|
||||||
size_t* data_res_size) {
|
size_t* data_res_size) {
|
||||||
|
if(!compress->encoder) {
|
||||||
|
CompressConfigHeatshrink* hs_config = (CompressConfigHeatshrink*)compress->config;
|
||||||
|
compress->encoder =
|
||||||
|
heatshrink_encoder_alloc(hs_config->window_sz2, hs_config->lookahead_sz2);
|
||||||
|
}
|
||||||
return compress_encode_internal(
|
return compress_encode_internal(
|
||||||
compress->encoder, data_in, data_in_size, data_out, data_out_size, data_res_size);
|
compress->encoder, data_in, data_in_size, data_out, data_out_size, data_res_size);
|
||||||
}
|
}
|
||||||
@@ -279,6 +373,201 @@ bool compress_decode(
|
|||||||
uint8_t* data_out,
|
uint8_t* data_out,
|
||||||
size_t data_out_size,
|
size_t data_out_size,
|
||||||
size_t* data_res_size) {
|
size_t* data_res_size) {
|
||||||
|
if(!compress->decoder) {
|
||||||
|
CompressConfigHeatshrink* hs_config = (CompressConfigHeatshrink*)compress->config;
|
||||||
|
compress->decoder = heatshrink_decoder_alloc(
|
||||||
|
hs_config->input_buffer_sz, hs_config->window_sz2, hs_config->lookahead_sz2);
|
||||||
|
}
|
||||||
return compress_decode_internal(
|
return compress_decode_internal(
|
||||||
compress->decoder, data_in, data_in_size, data_out, data_out_size, data_res_size);
|
compress->decoder, data_in, data_in_size, data_out, data_out_size, data_res_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool compress_decode_streamed(
|
||||||
|
Compress* compress,
|
||||||
|
CompressIoCallback read_cb,
|
||||||
|
void* read_context,
|
||||||
|
CompressIoCallback write_cb,
|
||||||
|
void* write_context) {
|
||||||
|
CompressConfigHeatshrink* hs_config = (CompressConfigHeatshrink*)compress->config;
|
||||||
|
if(!compress->decoder) {
|
||||||
|
compress->decoder = heatshrink_decoder_alloc(
|
||||||
|
hs_config->input_buffer_sz, hs_config->window_sz2, hs_config->lookahead_sz2);
|
||||||
|
}
|
||||||
|
|
||||||
|
heatshrink_decoder_reset(compress->decoder);
|
||||||
|
return compress_decode_stream_internal(
|
||||||
|
compress->decoder,
|
||||||
|
hs_config->input_buffer_sz,
|
||||||
|
read_cb,
|
||||||
|
read_context,
|
||||||
|
write_cb,
|
||||||
|
write_context);
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
struct CompressStreamDecoder {
|
||||||
|
heatshrink_decoder* decoder;
|
||||||
|
size_t stream_position;
|
||||||
|
size_t decode_buffer_size;
|
||||||
|
size_t decode_buffer_position;
|
||||||
|
uint8_t* decode_buffer;
|
||||||
|
CompressIoCallback read_cb;
|
||||||
|
void* read_context;
|
||||||
|
};
|
||||||
|
|
||||||
|
CompressStreamDecoder* compress_stream_decoder_alloc(
|
||||||
|
CompressType type,
|
||||||
|
const void* config,
|
||||||
|
CompressIoCallback read_cb,
|
||||||
|
void* read_context) {
|
||||||
|
furi_check(type == CompressTypeHeatshrink);
|
||||||
|
furi_check(config);
|
||||||
|
|
||||||
|
const CompressConfigHeatshrink* hs_config = (const CompressConfigHeatshrink*)config;
|
||||||
|
CompressStreamDecoder* instance = malloc(sizeof(CompressStreamDecoder));
|
||||||
|
instance->decoder = heatshrink_decoder_alloc(
|
||||||
|
hs_config->input_buffer_sz, hs_config->window_sz2, hs_config->lookahead_sz2);
|
||||||
|
instance->stream_position = 0;
|
||||||
|
instance->decode_buffer_size = hs_config->input_buffer_sz;
|
||||||
|
instance->decode_buffer_position = 0;
|
||||||
|
instance->decode_buffer = malloc(hs_config->input_buffer_sz);
|
||||||
|
instance->read_cb = read_cb;
|
||||||
|
instance->read_context = read_context;
|
||||||
|
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
void compress_stream_decoder_free(CompressStreamDecoder* instance) {
|
||||||
|
furi_check(instance);
|
||||||
|
heatshrink_decoder_free(instance->decoder);
|
||||||
|
free(instance->decode_buffer);
|
||||||
|
free(instance);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool compress_decode_stream_chunk(
|
||||||
|
CompressStreamDecoder* sd,
|
||||||
|
CompressIoCallback read_cb,
|
||||||
|
void* read_context,
|
||||||
|
uint8_t* decompressed_chunk,
|
||||||
|
size_t decomp_chunk_size) {
|
||||||
|
HSD_sink_res sink_res;
|
||||||
|
HSD_poll_res poll_res;
|
||||||
|
|
||||||
|
/*
|
||||||
|
First, try to output data from decoder to the output buffer.
|
||||||
|
If the we could fill the output buffer, return
|
||||||
|
If the output buffer is not full, keep polling the decoder
|
||||||
|
until it has no more data to output.
|
||||||
|
Then, read more data from the input and sink it to the decoder.
|
||||||
|
Repeat until the input is exhausted or output buffer is full.
|
||||||
|
*/
|
||||||
|
|
||||||
|
bool failed = false;
|
||||||
|
bool can_sink_more = true;
|
||||||
|
bool can_read_more = true;
|
||||||
|
|
||||||
|
do {
|
||||||
|
do {
|
||||||
|
size_t poll_size = 0;
|
||||||
|
poll_res = heatshrink_decoder_poll(
|
||||||
|
sd->decoder, decompressed_chunk, decomp_chunk_size, &poll_size);
|
||||||
|
if(poll_res < 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
decomp_chunk_size -= poll_size;
|
||||||
|
decompressed_chunk += poll_size;
|
||||||
|
} while((poll_res == HSDR_POLL_MORE) && decomp_chunk_size);
|
||||||
|
|
||||||
|
if(!decomp_chunk_size) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(can_read_more && (sd->decode_buffer_position < sd->decode_buffer_size)) {
|
||||||
|
size_t read_size = read_cb(
|
||||||
|
read_context,
|
||||||
|
&sd->decode_buffer[sd->decode_buffer_position],
|
||||||
|
sd->decode_buffer_size - sd->decode_buffer_position);
|
||||||
|
sd->decode_buffer_position += read_size;
|
||||||
|
can_read_more = read_size > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
while(sd->decode_buffer_position && can_sink_more) {
|
||||||
|
size_t sink_size = 0;
|
||||||
|
sink_res = heatshrink_decoder_sink(
|
||||||
|
sd->decoder, sd->decode_buffer, sd->decode_buffer_position, &sink_size);
|
||||||
|
can_sink_more = sink_res == HSDR_SINK_OK;
|
||||||
|
if(sink_res < 0) {
|
||||||
|
failed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sd->decode_buffer_position -= sink_size;
|
||||||
|
|
||||||
|
/* If some data was left in the buffer, move it to the beginning */
|
||||||
|
if(sink_size && sd->decode_buffer_position) {
|
||||||
|
memmove(
|
||||||
|
sd->decode_buffer, &sd->decode_buffer[sink_size], sd->decode_buffer_position);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} while(!failed);
|
||||||
|
|
||||||
|
return decomp_chunk_size == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool compress_stream_decoder_read(
|
||||||
|
CompressStreamDecoder* instance,
|
||||||
|
uint8_t* data_out,
|
||||||
|
size_t data_out_size) {
|
||||||
|
furi_check(instance);
|
||||||
|
furi_check(data_out);
|
||||||
|
|
||||||
|
if(compress_decode_stream_chunk(
|
||||||
|
instance, instance->read_cb, instance->read_context, data_out, data_out_size)) {
|
||||||
|
instance->stream_position += data_out_size;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool compress_stream_decoder_seek(CompressStreamDecoder* instance, size_t position) {
|
||||||
|
furi_check(instance);
|
||||||
|
|
||||||
|
/* Check if requested position is ahead of current position
|
||||||
|
we can't rewind the input stream */
|
||||||
|
furi_check(position >= instance->stream_position);
|
||||||
|
|
||||||
|
/* Read and discard data up to requested position */
|
||||||
|
uint8_t* dummy_buffer = malloc(instance->decode_buffer_size);
|
||||||
|
bool success = true;
|
||||||
|
|
||||||
|
while(instance->stream_position < position) {
|
||||||
|
size_t bytes_to_read = position - instance->stream_position;
|
||||||
|
if(bytes_to_read > instance->decode_buffer_size) {
|
||||||
|
bytes_to_read = instance->decode_buffer_size;
|
||||||
|
}
|
||||||
|
if(!compress_stream_decoder_read(instance, dummy_buffer, bytes_to_read)) {
|
||||||
|
success = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
free(dummy_buffer);
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t compress_stream_decoder_tell(CompressStreamDecoder* instance) {
|
||||||
|
furi_check(instance);
|
||||||
|
return instance->stream_position;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool compress_stream_decoder_rewind(CompressStreamDecoder* instance) {
|
||||||
|
furi_check(instance);
|
||||||
|
|
||||||
|
/* Reset decoder and read buffer */
|
||||||
|
heatshrink_decoder_reset(instance->decoder);
|
||||||
|
instance->stream_position = 0;
|
||||||
|
instance->decode_buffer_position = 0;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|||||||
@@ -44,17 +44,34 @@ void compress_icon_free(CompressIcon* instance);
|
|||||||
*/
|
*/
|
||||||
void compress_icon_decode(CompressIcon* instance, const uint8_t* icon_data, uint8_t** output);
|
void compress_icon_decode(CompressIcon* instance, const uint8_t* icon_data, uint8_t** output);
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
/** Compress control structure */
|
/** Compress control structure */
|
||||||
typedef struct Compress Compress;
|
typedef struct Compress Compress;
|
||||||
|
|
||||||
|
/** Supported compression types */
|
||||||
|
typedef enum {
|
||||||
|
CompressTypeHeatshrink = 0,
|
||||||
|
} CompressType;
|
||||||
|
|
||||||
|
/** Configuration for heatshrink compression */
|
||||||
|
typedef struct {
|
||||||
|
uint16_t window_sz2;
|
||||||
|
uint16_t lookahead_sz2;
|
||||||
|
uint16_t input_buffer_sz;
|
||||||
|
} CompressConfigHeatshrink;
|
||||||
|
|
||||||
|
/** Default configuration for heatshrink compression. Used for image assets. */
|
||||||
|
extern const CompressConfigHeatshrink compress_config_heatshrink_default;
|
||||||
|
|
||||||
/** Allocate encoder and decoder
|
/** Allocate encoder and decoder
|
||||||
*
|
*
|
||||||
* @param compress_buff_size size of decoder and encoder buffer to
|
* @param type Compression type
|
||||||
* allocate
|
* @param[in] config Configuration for compression, specific to type
|
||||||
*
|
*
|
||||||
* @return Compress instance
|
* @return Compress instance
|
||||||
*/
|
*/
|
||||||
Compress* compress_alloc(uint16_t compress_buff_size);
|
Compress* compress_alloc(CompressType type, const void* config);
|
||||||
|
|
||||||
/** Free encoder and decoder
|
/** Free encoder and decoder
|
||||||
*
|
*
|
||||||
@@ -71,6 +88,8 @@ void compress_free(Compress* compress);
|
|||||||
* @param[in] data_out_size The data out size
|
* @param[in] data_out_size The data out size
|
||||||
* @param data_res_size pointer to result output data size
|
* @param data_res_size pointer to result output data size
|
||||||
*
|
*
|
||||||
|
* @note Prepends compressed stream with a header. If data is not compressible,
|
||||||
|
* it will be stored as is after the header.
|
||||||
* @return true on success
|
* @return true on success
|
||||||
*/
|
*/
|
||||||
bool compress_encode(
|
bool compress_encode(
|
||||||
@@ -90,6 +109,7 @@ bool compress_encode(
|
|||||||
* @param[in] data_out_size The data out size
|
* @param[in] data_out_size The data out size
|
||||||
* @param data_res_size pointer to result output data size
|
* @param data_res_size pointer to result output data size
|
||||||
*
|
*
|
||||||
|
* @note Expects compressed stream with a header, as produced by `compress_encode`.
|
||||||
* @return true on success
|
* @return true on success
|
||||||
*/
|
*/
|
||||||
bool compress_decode(
|
bool compress_decode(
|
||||||
@@ -100,6 +120,100 @@ bool compress_decode(
|
|||||||
size_t data_out_size,
|
size_t data_out_size,
|
||||||
size_t* data_res_size);
|
size_t* data_res_size);
|
||||||
|
|
||||||
|
/** I/O callback for streamed compression/decompression
|
||||||
|
*
|
||||||
|
* @param context user context
|
||||||
|
* @param buffer buffer to read/write
|
||||||
|
* @param size size of buffer
|
||||||
|
*
|
||||||
|
* @return number of bytes read/written, 0 on end of stream, negative on error
|
||||||
|
*/
|
||||||
|
typedef int32_t (*CompressIoCallback)(void* context, uint8_t* buffer, size_t size);
|
||||||
|
|
||||||
|
/** Decompress streamed data
|
||||||
|
*
|
||||||
|
* @param compress Compress instance
|
||||||
|
* @param read_cb read callback
|
||||||
|
* @param read_context read callback context
|
||||||
|
* @param write_cb write callback
|
||||||
|
* @param write_context write callback context
|
||||||
|
*
|
||||||
|
* @note Does not expect a header, just compressed data stream.
|
||||||
|
* @return true on success
|
||||||
|
*/
|
||||||
|
bool compress_decode_streamed(
|
||||||
|
Compress* compress,
|
||||||
|
CompressIoCallback read_cb,
|
||||||
|
void* read_context,
|
||||||
|
CompressIoCallback write_cb,
|
||||||
|
void* write_context);
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/** CompressStreamDecoder control structure */
|
||||||
|
typedef struct CompressStreamDecoder CompressStreamDecoder;
|
||||||
|
|
||||||
|
/** Allocate stream decoder
|
||||||
|
*
|
||||||
|
* @param type Compression type
|
||||||
|
* @param[in] config Configuration for compression, specific to type
|
||||||
|
* @param read_cb The read callback for input (compressed) data
|
||||||
|
* @param read_context The read context
|
||||||
|
*
|
||||||
|
* @return CompressStreamDecoder instance
|
||||||
|
*/
|
||||||
|
CompressStreamDecoder* compress_stream_decoder_alloc(
|
||||||
|
CompressType type,
|
||||||
|
const void* config,
|
||||||
|
CompressIoCallback read_cb,
|
||||||
|
void* read_context);
|
||||||
|
|
||||||
|
/** Free stream decoder
|
||||||
|
*
|
||||||
|
* @param instance The CompressStreamDecoder instance
|
||||||
|
*/
|
||||||
|
void compress_stream_decoder_free(CompressStreamDecoder* instance);
|
||||||
|
|
||||||
|
/** Read uncompressed data chunk from stream decoder
|
||||||
|
*
|
||||||
|
* @param instance The CompressStreamDecoder instance
|
||||||
|
* @param data_out The data out
|
||||||
|
* @param[in] data_out_size The data out size
|
||||||
|
*
|
||||||
|
* @return true on success
|
||||||
|
*/
|
||||||
|
bool compress_stream_decoder_read(
|
||||||
|
CompressStreamDecoder* instance,
|
||||||
|
uint8_t* data_out,
|
||||||
|
size_t data_out_size);
|
||||||
|
|
||||||
|
/** Seek to position in uncompressed data stream
|
||||||
|
*
|
||||||
|
* @param instance The CompressStreamDecoder instance
|
||||||
|
* @param[in] position The position
|
||||||
|
*
|
||||||
|
* @return true on success
|
||||||
|
* @warning Backward seeking is not supported
|
||||||
|
*/
|
||||||
|
bool compress_stream_decoder_seek(CompressStreamDecoder* instance, size_t position);
|
||||||
|
|
||||||
|
/** Get current position in uncompressed data stream
|
||||||
|
*
|
||||||
|
* @param instance The CompressStreamDecoder instance
|
||||||
|
*
|
||||||
|
* @return current position
|
||||||
|
*/
|
||||||
|
size_t compress_stream_decoder_tell(CompressStreamDecoder* instance);
|
||||||
|
|
||||||
|
/** Reset stream decoder to the beginning
|
||||||
|
* @warning Read callback must be repositioned by caller separately
|
||||||
|
*
|
||||||
|
* @param instance The CompressStreamDecoder instance
|
||||||
|
*
|
||||||
|
* @return true on success
|
||||||
|
*/
|
||||||
|
bool compress_stream_decoder_rewind(CompressStreamDecoder* instance);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ void path_extract_filename(FuriString* path, FuriString* name, bool trim_ext) {
|
|||||||
void path_extract_extension(FuriString* path, char* ext, size_t ext_len_max) {
|
void path_extract_extension(FuriString* path, char* ext, size_t ext_len_max) {
|
||||||
furi_check(path);
|
furi_check(path);
|
||||||
furi_check(ext);
|
furi_check(ext);
|
||||||
|
furi_check(ext_len_max > 0);
|
||||||
|
|
||||||
size_t dot = furi_string_search_rchar(path, '.');
|
size_t dot = furi_string_search_rchar(path, '.');
|
||||||
size_t filename_start = furi_string_search_rchar(path, '/');
|
size_t filename_start = furi_string_search_rchar(path, '/');
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
#include <storage/storage.h>
|
#include <storage/storage.h>
|
||||||
#include <furi.h>
|
#include <furi.h>
|
||||||
#include <toolbox/path.h>
|
#include <toolbox/path.h>
|
||||||
|
#include <toolbox/compress.h>
|
||||||
|
|
||||||
#define TAG "TarArch"
|
#define TAG "TarArch"
|
||||||
#define MAX_NAME_LEN 255
|
#define MAX_NAME_LEN 255
|
||||||
@@ -12,14 +13,29 @@
|
|||||||
#define FILE_OPEN_NTRIES 10
|
#define FILE_OPEN_NTRIES 10
|
||||||
#define FILE_OPEN_RETRY_DELAY 25
|
#define FILE_OPEN_RETRY_DELAY 25
|
||||||
|
|
||||||
|
TarOpenMode tar_archive_get_mode_for_path(const char* path) {
|
||||||
|
char ext[8];
|
||||||
|
|
||||||
|
FuriString* path_str = furi_string_alloc_set_str(path);
|
||||||
|
path_extract_extension(path_str, ext, sizeof(ext));
|
||||||
|
furi_string_free(path_str);
|
||||||
|
|
||||||
|
if(strcmp(ext, ".ths") == 0) {
|
||||||
|
return TarOpenModeReadHeatshrink;
|
||||||
|
} else {
|
||||||
|
return TarOpenModeRead;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
typedef struct TarArchive {
|
typedef struct TarArchive {
|
||||||
Storage* storage;
|
Storage* storage;
|
||||||
|
File* stream;
|
||||||
mtar_t tar;
|
mtar_t tar;
|
||||||
tar_unpack_file_cb unpack_cb;
|
tar_unpack_file_cb unpack_cb;
|
||||||
void* unpack_cb_context;
|
void* unpack_cb_context;
|
||||||
} TarArchive;
|
} TarArchive;
|
||||||
|
|
||||||
/* API WRAPPER */
|
/* Plain file backend - uncompressed, supports read and write */
|
||||||
static int mtar_storage_file_write(void* stream, const void* data, unsigned size) {
|
static int mtar_storage_file_write(void* stream, const void* data, unsigned size) {
|
||||||
uint16_t bytes_written = storage_file_write(stream, data, size);
|
uint16_t bytes_written = storage_file_write(stream, data, size);
|
||||||
return (bytes_written == size) ? bytes_written : MTAR_EWRITEFAIL;
|
return (bytes_written == size) ? bytes_written : MTAR_EWRITEFAIL;
|
||||||
@@ -38,7 +54,6 @@ static int mtar_storage_file_seek(void* stream, unsigned offset) {
|
|||||||
static int mtar_storage_file_close(void* stream) {
|
static int mtar_storage_file_close(void* stream) {
|
||||||
if(stream) {
|
if(stream) {
|
||||||
storage_file_close(stream);
|
storage_file_close(stream);
|
||||||
storage_file_free(stream);
|
|
||||||
}
|
}
|
||||||
return MTAR_ESUCCESS;
|
return MTAR_ESUCCESS;
|
||||||
}
|
}
|
||||||
@@ -50,41 +65,133 @@ const struct mtar_ops filesystem_ops = {
|
|||||||
.close = mtar_storage_file_close,
|
.close = mtar_storage_file_close,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Heatshrink stream backend - compressed, read-only */
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
CompressConfigHeatshrink heatshrink_config;
|
||||||
|
File* stream;
|
||||||
|
CompressStreamDecoder* decoder;
|
||||||
|
} HeatshrinkStream;
|
||||||
|
|
||||||
|
/* HSDS 'heatshrink data stream' header magic */
|
||||||
|
static const uint32_t HEATSHRINK_MAGIC = 0x53445348;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
uint32_t magic;
|
||||||
|
uint8_t version;
|
||||||
|
uint8_t window_sz2;
|
||||||
|
uint8_t lookahead_sz2;
|
||||||
|
} FURI_PACKED HeatshrinkStreamHeader;
|
||||||
|
_Static_assert(sizeof(HeatshrinkStreamHeader) == 7, "Invalid HeatshrinkStreamHeader size");
|
||||||
|
|
||||||
|
static int mtar_heatshrink_file_close(void* stream) {
|
||||||
|
HeatshrinkStream* hs_stream = stream;
|
||||||
|
if(hs_stream) {
|
||||||
|
if(hs_stream->decoder) {
|
||||||
|
compress_stream_decoder_free(hs_stream->decoder);
|
||||||
|
}
|
||||||
|
storage_file_close(hs_stream->stream);
|
||||||
|
storage_file_free(hs_stream->stream);
|
||||||
|
free(hs_stream);
|
||||||
|
}
|
||||||
|
return MTAR_ESUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mtar_heatshrink_file_read(void* stream, void* data, unsigned size) {
|
||||||
|
HeatshrinkStream* hs_stream = stream;
|
||||||
|
bool read_success = compress_stream_decoder_read(hs_stream->decoder, data, size);
|
||||||
|
return read_success ? (int)size : MTAR_EREADFAIL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mtar_heatshrink_file_seek(void* stream, unsigned offset) {
|
||||||
|
HeatshrinkStream* hs_stream = stream;
|
||||||
|
bool success = false;
|
||||||
|
if(offset == 0) {
|
||||||
|
success = storage_file_seek(hs_stream->stream, sizeof(HeatshrinkStreamHeader), true) &&
|
||||||
|
compress_stream_decoder_rewind(hs_stream->decoder);
|
||||||
|
} else {
|
||||||
|
success = compress_stream_decoder_seek(hs_stream->decoder, offset);
|
||||||
|
}
|
||||||
|
return success ? MTAR_ESUCCESS : MTAR_ESEEKFAIL;
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct mtar_ops heatshrink_ops = {
|
||||||
|
.read = mtar_heatshrink_file_read,
|
||||||
|
.write = NULL, // not supported
|
||||||
|
.seek = mtar_heatshrink_file_seek,
|
||||||
|
.close = mtar_heatshrink_file_close,
|
||||||
|
};
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
TarArchive* tar_archive_alloc(Storage* storage) {
|
TarArchive* tar_archive_alloc(Storage* storage) {
|
||||||
furi_check(storage);
|
furi_check(storage);
|
||||||
TarArchive* archive = malloc(sizeof(TarArchive));
|
TarArchive* archive = malloc(sizeof(TarArchive));
|
||||||
archive->storage = storage;
|
archive->storage = storage;
|
||||||
|
archive->stream = storage_file_alloc(archive->storage);
|
||||||
archive->unpack_cb = NULL;
|
archive->unpack_cb = NULL;
|
||||||
return archive;
|
return archive;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t file_read_cb(void* context, uint8_t* buffer, size_t buffer_size) {
|
||||||
|
File* file = context;
|
||||||
|
return storage_file_read(file, buffer, buffer_size);
|
||||||
|
}
|
||||||
|
|
||||||
bool tar_archive_open(TarArchive* archive, const char* path, TarOpenMode mode) {
|
bool tar_archive_open(TarArchive* archive, const char* path, TarOpenMode mode) {
|
||||||
furi_check(archive);
|
furi_check(archive);
|
||||||
FS_AccessMode access_mode;
|
FS_AccessMode access_mode;
|
||||||
FS_OpenMode open_mode;
|
FS_OpenMode open_mode;
|
||||||
|
bool compressed = false;
|
||||||
int mtar_access = 0;
|
int mtar_access = 0;
|
||||||
|
|
||||||
switch(mode) {
|
switch(mode) {
|
||||||
case TAR_OPEN_MODE_READ:
|
case TarOpenModeRead:
|
||||||
mtar_access = MTAR_READ;
|
mtar_access = MTAR_READ;
|
||||||
access_mode = FSAM_READ;
|
access_mode = FSAM_READ;
|
||||||
open_mode = FSOM_OPEN_EXISTING;
|
open_mode = FSOM_OPEN_EXISTING;
|
||||||
break;
|
break;
|
||||||
case TAR_OPEN_MODE_WRITE:
|
case TarOpenModeWrite:
|
||||||
mtar_access = MTAR_WRITE;
|
mtar_access = MTAR_WRITE;
|
||||||
access_mode = FSAM_WRITE;
|
access_mode = FSAM_WRITE;
|
||||||
open_mode = FSOM_CREATE_ALWAYS;
|
open_mode = FSOM_CREATE_ALWAYS;
|
||||||
break;
|
break;
|
||||||
|
case TarOpenModeReadHeatshrink:
|
||||||
|
mtar_access = MTAR_READ;
|
||||||
|
access_mode = FSAM_READ;
|
||||||
|
open_mode = FSOM_OPEN_EXISTING;
|
||||||
|
compressed = true;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
File* stream = storage_file_alloc(archive->storage);
|
File* stream = archive->stream;
|
||||||
if(!storage_file_open(stream, path, access_mode, open_mode)) {
|
if(!storage_file_open(stream, path, access_mode, open_mode)) {
|
||||||
storage_file_free(stream);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
mtar_init(&archive->tar, mtar_access, &filesystem_ops, stream);
|
|
||||||
|
if(compressed) {
|
||||||
|
/* Read and validate stream header */
|
||||||
|
HeatshrinkStreamHeader header;
|
||||||
|
if(storage_file_read(stream, &header, sizeof(HeatshrinkStreamHeader)) !=
|
||||||
|
sizeof(HeatshrinkStreamHeader) ||
|
||||||
|
header.magic != HEATSHRINK_MAGIC) {
|
||||||
|
storage_file_close(stream);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeatshrinkStream* hs_stream = malloc(sizeof(HeatshrinkStream));
|
||||||
|
hs_stream->stream = stream;
|
||||||
|
hs_stream->heatshrink_config.window_sz2 = header.window_sz2;
|
||||||
|
hs_stream->heatshrink_config.lookahead_sz2 = header.lookahead_sz2;
|
||||||
|
hs_stream->heatshrink_config.input_buffer_sz = FILE_BLOCK_SIZE;
|
||||||
|
hs_stream->decoder = compress_stream_decoder_alloc(
|
||||||
|
CompressTypeHeatshrink, &hs_stream->heatshrink_config, file_read_cb, stream);
|
||||||
|
mtar_init(&archive->tar, mtar_access, &heatshrink_ops, hs_stream);
|
||||||
|
} else {
|
||||||
|
mtar_init(&archive->tar, mtar_access, &filesystem_ops, stream);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -94,6 +201,7 @@ void tar_archive_free(TarArchive* archive) {
|
|||||||
if(mtar_is_open(&archive->tar)) {
|
if(mtar_is_open(&archive->tar)) {
|
||||||
mtar_close(&archive->tar);
|
mtar_close(&archive->tar);
|
||||||
}
|
}
|
||||||
|
storage_file_free(archive->stream);
|
||||||
free(archive);
|
free(archive);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,6 +229,21 @@ int32_t tar_archive_get_entries_count(TarArchive* archive) {
|
|||||||
return counter;
|
return counter;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool tar_archive_get_read_progress(TarArchive* archive, int32_t* processed, int32_t* total) {
|
||||||
|
furi_check(archive);
|
||||||
|
if(mtar_access_mode(&archive->tar) != MTAR_READ) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(processed) {
|
||||||
|
*processed = storage_file_tell(archive->stream);
|
||||||
|
}
|
||||||
|
if(total) {
|
||||||
|
*total = storage_file_size(archive->stream);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool tar_archive_dir_add_element(TarArchive* archive, const char* dirpath) {
|
bool tar_archive_dir_add_element(TarArchive* archive, const char* dirpath) {
|
||||||
furi_check(archive);
|
furi_check(archive);
|
||||||
return (mtar_write_dir_header(&archive->tar, dirpath) == MTAR_ESUCCESS);
|
return (mtar_write_dir_header(&archive->tar, dirpath) == MTAR_ESUCCESS);
|
||||||
@@ -258,7 +381,7 @@ static int archive_extract_foreach_cb(mtar_t* tar, const mtar_header_t* header,
|
|||||||
|
|
||||||
furi_string_free(converted_fname);
|
furi_string_free(converted_fname);
|
||||||
furi_string_free(full_extracted_fname);
|
furi_string_free(full_extracted_fname);
|
||||||
return success ? 0 : -1;
|
return success ? 0 : MTAR_EFAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool tar_archive_unpack_to(
|
bool tar_archive_unpack_to(
|
||||||
|
|||||||
@@ -12,62 +12,197 @@ typedef struct TarArchive TarArchive;
|
|||||||
|
|
||||||
typedef struct Storage Storage;
|
typedef struct Storage Storage;
|
||||||
|
|
||||||
|
/** Tar archive open mode
|
||||||
|
*/
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TAR_OPEN_MODE_READ = 'r',
|
TarOpenModeRead = 'r',
|
||||||
TAR_OPEN_MODE_WRITE = 'w',
|
TarOpenModeWrite = 'w',
|
||||||
TAR_OPEN_MODE_STDOUT = 's' /* to be implemented */
|
/* read-only heatshrink compressed tar */
|
||||||
|
TarOpenModeReadHeatshrink = 'h',
|
||||||
} TarOpenMode;
|
} TarOpenMode;
|
||||||
|
|
||||||
|
/** Get expected open mode for archive at the path.
|
||||||
|
* Used for automatic mode detection based on the file extension.
|
||||||
|
*
|
||||||
|
* @param[in] path Path to the archive
|
||||||
|
*
|
||||||
|
* @return open mode from TarOpenMode enum
|
||||||
|
*/
|
||||||
|
TarOpenMode tar_archive_get_mode_for_path(const char* path);
|
||||||
|
|
||||||
|
/** Tar archive constructor
|
||||||
|
*
|
||||||
|
* @param storage Storage API pointer
|
||||||
|
*
|
||||||
|
* @return allocated object
|
||||||
|
*/
|
||||||
TarArchive* tar_archive_alloc(Storage* storage);
|
TarArchive* tar_archive_alloc(Storage* storage);
|
||||||
|
|
||||||
|
/** Open tar archive
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object
|
||||||
|
* @param[in] path Path to the tar archive
|
||||||
|
* @param mode Open mode
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_open(TarArchive* archive, const char* path, TarOpenMode mode);
|
bool tar_archive_open(TarArchive* archive, const char* path, TarOpenMode mode);
|
||||||
|
|
||||||
|
/** Tar archive destructor
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object
|
||||||
|
*/
|
||||||
void tar_archive_free(TarArchive* archive);
|
void tar_archive_free(TarArchive* archive);
|
||||||
|
|
||||||
/* High-level API - assumes archive is open */
|
/* High-level API - assumes archive is open */
|
||||||
|
|
||||||
|
/** Unpack tar archive to destination
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in read mode
|
||||||
|
* @param[in] destination Destination path
|
||||||
|
* @param converter Storage name converter
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_unpack_to(
|
bool tar_archive_unpack_to(
|
||||||
TarArchive* archive,
|
TarArchive* archive,
|
||||||
const char* destination,
|
const char* destination,
|
||||||
Storage_name_converter converter);
|
Storage_name_converter converter);
|
||||||
|
|
||||||
|
/** Add file to tar archive
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in write mode
|
||||||
|
* @param[in] fs_file_path Path to the file on the filesystem
|
||||||
|
* @param[in] archive_fname Name of the file in the archive
|
||||||
|
* @param file_size Size of the file
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_add_file(
|
bool tar_archive_add_file(
|
||||||
TarArchive* archive,
|
TarArchive* archive,
|
||||||
const char* fs_file_path,
|
const char* fs_file_path,
|
||||||
const char* archive_fname,
|
const char* archive_fname,
|
||||||
const int32_t file_size);
|
const int32_t file_size);
|
||||||
|
|
||||||
|
/** Add directory to tar archive
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in write mode
|
||||||
|
* @param fs_full_path Path to the directory on the filesystem
|
||||||
|
* @param path_prefix Prefix to add to the directory name in the archive
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_add_dir(TarArchive* archive, const char* fs_full_path, const char* path_prefix);
|
bool tar_archive_add_dir(TarArchive* archive, const char* fs_full_path, const char* path_prefix);
|
||||||
|
|
||||||
|
/** Get number of entries in the archive
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object
|
||||||
|
*
|
||||||
|
* @return number of entries. -1 on error
|
||||||
|
*/
|
||||||
int32_t tar_archive_get_entries_count(TarArchive* archive);
|
int32_t tar_archive_get_entries_count(TarArchive* archive);
|
||||||
|
|
||||||
|
/** Get read progress
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in read mode
|
||||||
|
* @param[in] processed Number of processed entries
|
||||||
|
* @param[in] total Total number of entries
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
|
bool tar_archive_get_read_progress(TarArchive* archive, int32_t* processed, int32_t* total);
|
||||||
|
|
||||||
|
/** Unpack single file from tar archive
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in read mode
|
||||||
|
* @param[in] archive_fname Name of the file in the archive
|
||||||
|
* @param[in] destination Destination path
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_unpack_file(
|
bool tar_archive_unpack_file(
|
||||||
TarArchive* archive,
|
TarArchive* archive,
|
||||||
const char* archive_fname,
|
const char* archive_fname,
|
||||||
const char* destination);
|
const char* destination);
|
||||||
|
|
||||||
/* Optional per-entry callback on unpacking - return false to skip entry */
|
/** Optional per-entry callback on unpacking
|
||||||
|
* @param name Name of the file or directory
|
||||||
|
* @param is_directory True if the entry is a directory
|
||||||
|
* @param[in] context User context
|
||||||
|
* @return true to process the entry, false to skip
|
||||||
|
*/
|
||||||
typedef bool (*tar_unpack_file_cb)(const char* name, bool is_directory, void* context);
|
typedef bool (*tar_unpack_file_cb)(const char* name, bool is_directory, void* context);
|
||||||
|
|
||||||
|
/** Set per-entry callback on unpacking
|
||||||
|
* @param archive Tar archive object
|
||||||
|
* @param callback Callback function
|
||||||
|
* @param[in] context User context
|
||||||
|
*/
|
||||||
void tar_archive_set_file_callback(TarArchive* archive, tar_unpack_file_cb callback, void* context);
|
void tar_archive_set_file_callback(TarArchive* archive, tar_unpack_file_cb callback, void* context);
|
||||||
|
|
||||||
/* Low-level API */
|
/* Low-level API */
|
||||||
|
|
||||||
|
/** Add tar archive directory header
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in write mode
|
||||||
|
* @param[in] dirpath Path to the directory
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_dir_add_element(TarArchive* archive, const char* dirpath);
|
bool tar_archive_dir_add_element(TarArchive* archive, const char* dirpath);
|
||||||
|
|
||||||
|
/** Add tar archive file header
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in write mode
|
||||||
|
* @param[in] path Path to the file
|
||||||
|
* @param data_len Size of the file
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_file_add_header(TarArchive* archive, const char* path, const int32_t data_len);
|
bool tar_archive_file_add_header(TarArchive* archive, const char* path, const int32_t data_len);
|
||||||
|
|
||||||
|
/** Add tar archive file data block
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in write mode
|
||||||
|
* @param[in] data_block Data block
|
||||||
|
* @param block_len Size of the data block
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_file_add_data_block(
|
bool tar_archive_file_add_data_block(
|
||||||
TarArchive* archive,
|
TarArchive* archive,
|
||||||
const uint8_t* data_block,
|
const uint8_t* data_block,
|
||||||
const int32_t block_len);
|
const int32_t block_len);
|
||||||
|
|
||||||
|
/** Finalize tar archive file
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in write mode
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_file_finalize(TarArchive* archive);
|
bool tar_archive_file_finalize(TarArchive* archive);
|
||||||
|
|
||||||
|
/** Store data in tar archive
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in write mode
|
||||||
|
* @param[in] path Path to the file
|
||||||
|
* @param[in] data Data to store
|
||||||
|
* @param data_len Size of the data
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_store_data(
|
bool tar_archive_store_data(
|
||||||
TarArchive* archive,
|
TarArchive* archive,
|
||||||
const char* path,
|
const char* path,
|
||||||
const uint8_t* data,
|
const uint8_t* data,
|
||||||
const int32_t data_len);
|
const int32_t data_len);
|
||||||
|
|
||||||
|
/** Finalize tar archive
|
||||||
|
*
|
||||||
|
* @param archive Tar archive object. Must be opened in write mode
|
||||||
|
*
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
bool tar_archive_finalize(TarArchive* archive);
|
bool tar_archive_finalize(TarArchive* archive);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|||||||
@@ -161,3 +161,9 @@ ResourceManifestEntry*
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool resource_manifest_rewind(ResourceManifestReader* resource_manifest) {
|
||||||
|
furi_assert(resource_manifest);
|
||||||
|
|
||||||
|
return stream_seek(resource_manifest->stream, 0, StreamOffsetFromStart);
|
||||||
|
}
|
||||||
|
|||||||
@@ -47,6 +47,13 @@ void resource_manifest_reader_free(ResourceManifestReader* resource_manifest);
|
|||||||
*/
|
*/
|
||||||
bool resource_manifest_reader_open(ResourceManifestReader* resource_manifest, const char* filename);
|
bool resource_manifest_reader_open(ResourceManifestReader* resource_manifest, const char* filename);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Rewind manifest to the beginning
|
||||||
|
* @param resource_manifest allocated object
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
|
bool resource_manifest_rewind(ResourceManifestReader* resource_manifest);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Read next file/dir entry from manifest
|
* @brief Read next file/dir entry from manifest
|
||||||
* @param resource_manifest allocated object
|
* @param resource_manifest allocated object
|
||||||
|
|||||||
26
scripts/flipper/assets/heatshrink_stream.py
Normal file
26
scripts/flipper/assets/heatshrink_stream.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
import struct
|
||||||
|
|
||||||
|
|
||||||
|
class HeatshrinkDataStreamHeader:
|
||||||
|
MAGIC = 0x53445348
|
||||||
|
VERSION = 1
|
||||||
|
|
||||||
|
def __init__(self, window_size, lookahead_size):
|
||||||
|
self.window_size = window_size
|
||||||
|
self.lookahead_size = lookahead_size
|
||||||
|
|
||||||
|
def pack(self):
|
||||||
|
return struct.pack(
|
||||||
|
"<IBBB", self.MAGIC, self.VERSION, self.window_size, self.lookahead_size
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def unpack(data):
|
||||||
|
if len(data) != 7:
|
||||||
|
raise ValueError("Invalid header length")
|
||||||
|
magic, version, window_size, lookahead_size = struct.unpack("<IBBB", data)
|
||||||
|
if magic != HeatshrinkDataStreamHeader.MAGIC:
|
||||||
|
raise ValueError("Invalid magic number")
|
||||||
|
if version != HeatshrinkDataStreamHeader.VERSION:
|
||||||
|
raise ValueError("Invalid version")
|
||||||
|
return HeatshrinkDataStreamHeader(window_size, lookahead_size)
|
||||||
41
scripts/flipper/assets/tarball.py
Normal file
41
scripts/flipper/assets/tarball.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
import io
|
||||||
|
import tarfile
|
||||||
|
|
||||||
|
import heatshrink2
|
||||||
|
|
||||||
|
from .heatshrink_stream import HeatshrinkDataStreamHeader
|
||||||
|
|
||||||
|
FLIPPER_TAR_FORMAT = tarfile.USTAR_FORMAT
|
||||||
|
TAR_HEATSRINK_EXTENSION = ".ths"
|
||||||
|
|
||||||
|
|
||||||
|
def tar_sanitizer_filter(tarinfo: tarfile.TarInfo):
|
||||||
|
tarinfo.gid = tarinfo.uid = 0
|
||||||
|
tarinfo.mtime = 0
|
||||||
|
tarinfo.uname = tarinfo.gname = "furippa"
|
||||||
|
return tarinfo
|
||||||
|
|
||||||
|
|
||||||
|
def compress_tree_tarball(
|
||||||
|
src_dir, output_name, filter=tar_sanitizer_filter, hs_window=13, hs_lookahead=6
|
||||||
|
):
|
||||||
|
plain_tar = io.BytesIO()
|
||||||
|
with tarfile.open(
|
||||||
|
fileobj=plain_tar,
|
||||||
|
mode="w:",
|
||||||
|
format=FLIPPER_TAR_FORMAT,
|
||||||
|
) as tarball:
|
||||||
|
tarball.add(src_dir, arcname="", filter=filter)
|
||||||
|
plain_tar.seek(0)
|
||||||
|
|
||||||
|
src_data = plain_tar.read()
|
||||||
|
compressed = heatshrink2.compress(
|
||||||
|
src_data, window_sz2=hs_window, lookahead_sz2=hs_lookahead
|
||||||
|
)
|
||||||
|
|
||||||
|
header = HeatshrinkDataStreamHeader(hs_window, hs_lookahead)
|
||||||
|
with open(output_name, "wb") as f:
|
||||||
|
f.write(header.pack())
|
||||||
|
f.write(compressed)
|
||||||
|
|
||||||
|
return len(src_data), len(compressed)
|
||||||
145
scripts/hs.py
Executable file
145
scripts/hs.py
Executable file
@@ -0,0 +1,145 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import heatshrink2 as hs
|
||||||
|
from flipper.app import App
|
||||||
|
from flipper.assets.heatshrink_stream import HeatshrinkDataStreamHeader
|
||||||
|
from flipper.assets.tarball import compress_tree_tarball
|
||||||
|
|
||||||
|
|
||||||
|
class HSWrapper(App):
|
||||||
|
DEFAULT_WINDOW = 13
|
||||||
|
DEFAULT_LOOKAHEAD = 6
|
||||||
|
|
||||||
|
def init(self):
|
||||||
|
self.subparsers = self.parser.add_subparsers(
|
||||||
|
title="subcommands", dest="subcommand"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.parser_compress = self.subparsers.add_parser(
|
||||||
|
"compress", help="compress file using heatshrink"
|
||||||
|
)
|
||||||
|
self.parser_compress.add_argument(
|
||||||
|
"-w", "--window", help="window size", type=int, default=self.DEFAULT_WINDOW
|
||||||
|
)
|
||||||
|
self.parser_compress.add_argument(
|
||||||
|
"-l",
|
||||||
|
"--lookahead",
|
||||||
|
help="lookahead size",
|
||||||
|
type=int,
|
||||||
|
default=self.DEFAULT_LOOKAHEAD,
|
||||||
|
)
|
||||||
|
self.parser_compress.add_argument("file", help="file to compress")
|
||||||
|
self.parser_compress.add_argument(
|
||||||
|
"-o", "--output", help="output file", required=True
|
||||||
|
)
|
||||||
|
self.parser_compress.set_defaults(func=self.compress)
|
||||||
|
|
||||||
|
self.parser_decompress = self.subparsers.add_parser(
|
||||||
|
"decompress", help="decompress file using heatshrink"
|
||||||
|
)
|
||||||
|
self.parser_decompress.add_argument("file", help="file to decompress")
|
||||||
|
self.parser_decompress.add_argument(
|
||||||
|
"-o", "--output", help="output file", required=True
|
||||||
|
)
|
||||||
|
self.parser_decompress.set_defaults(func=self.decompress)
|
||||||
|
|
||||||
|
self.parser_info = self.subparsers.add_parser("info", help="show file info")
|
||||||
|
self.parser_info.add_argument("file", help="file to show info for")
|
||||||
|
self.parser_info.set_defaults(func=self.info)
|
||||||
|
|
||||||
|
self.parser_tar = self.subparsers.add_parser(
|
||||||
|
"tar", help="create a tarball and compress it"
|
||||||
|
)
|
||||||
|
self.parser_tar.add_argument("dir", help="directory to tar")
|
||||||
|
self.parser_tar.add_argument(
|
||||||
|
"-o", "--output", help="output file", required=True
|
||||||
|
)
|
||||||
|
self.parser_tar.add_argument(
|
||||||
|
"-w", "--window", help="window size", type=int, default=self.DEFAULT_WINDOW
|
||||||
|
)
|
||||||
|
self.parser_tar.add_argument(
|
||||||
|
"-l",
|
||||||
|
"--lookahead",
|
||||||
|
help="lookahead size",
|
||||||
|
type=int,
|
||||||
|
default=self.DEFAULT_LOOKAHEAD,
|
||||||
|
)
|
||||||
|
self.parser_tar.set_defaults(func=self.tar)
|
||||||
|
|
||||||
|
def compress(self):
|
||||||
|
args = self.args
|
||||||
|
|
||||||
|
with open(args.file, "rb") as f:
|
||||||
|
data = f.read()
|
||||||
|
|
||||||
|
compressed = hs.compress(
|
||||||
|
data, window_sz2=args.window, lookahead_sz2=args.lookahead
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(args.output, "wb") as f:
|
||||||
|
header = HeatshrinkDataStreamHeader(args.window, args.lookahead)
|
||||||
|
f.write(header.pack())
|
||||||
|
f.write(compressed)
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
f"Compressed {len(data)} bytes to {len(compressed)} bytes, "
|
||||||
|
f"compression ratio: {len(compressed) * 100 / len(data):.2f}%"
|
||||||
|
)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def decompress(self):
|
||||||
|
args = self.args
|
||||||
|
|
||||||
|
with open(args.file, "rb") as f:
|
||||||
|
header = HeatshrinkDataStreamHeader.unpack(f.read(7))
|
||||||
|
compressed = f.read()
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
f"Decompressing with window size {header.window_size} and lookahead size {header.lookahead_size}"
|
||||||
|
)
|
||||||
|
|
||||||
|
data = hs.decompress(
|
||||||
|
compressed,
|
||||||
|
window_sz2=header.window_size,
|
||||||
|
lookahead_sz2=header.lookahead_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(args.output, "wb") as f:
|
||||||
|
f.write(data)
|
||||||
|
|
||||||
|
self.logger.info(f"Decompressed {len(compressed)} bytes to {len(data)} bytes")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def info(self):
|
||||||
|
args = self.args
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(args.file, "rb") as f:
|
||||||
|
header = HeatshrinkDataStreamHeader.unpack(f.read(7))
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Error: {e}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
f"Window size: {header.window_size}, lookahead size: {header.lookahead_size}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def tar(self):
|
||||||
|
args = self.args
|
||||||
|
|
||||||
|
orig_size, compressed_size = compress_tree_tarball(
|
||||||
|
args.dir, args.output, hs_window=args.window, hs_lookahead=args.lookahead
|
||||||
|
)
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
f"Tarred and compressed {orig_size} bytes to {compressed_size} bytes, "
|
||||||
|
f"compression ratio: {compressed_size * 100 / orig_size:.2f}%"
|
||||||
|
)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
HSWrapper()()
|
||||||
@@ -9,6 +9,7 @@ from os.path import basename, exists, join, relpath
|
|||||||
|
|
||||||
from ansi.color import fg
|
from ansi.color import fg
|
||||||
from flipper.app import App
|
from flipper.app import App
|
||||||
|
from flipper.assets.tarball import FLIPPER_TAR_FORMAT, tar_sanitizer_filter
|
||||||
from update import Main as UpdateMain
|
from update import Main as UpdateMain
|
||||||
|
|
||||||
|
|
||||||
@@ -266,20 +267,15 @@ class Main(App):
|
|||||||
),
|
),
|
||||||
"w:gz",
|
"w:gz",
|
||||||
compresslevel=9,
|
compresslevel=9,
|
||||||
format=tarfile.USTAR_FORMAT,
|
format=FLIPPER_TAR_FORMAT,
|
||||||
) as tar:
|
) as tar:
|
||||||
self.note_dist_component(
|
self.note_dist_component(
|
||||||
"update", "tgz", self.get_dist_path(bundle_tgz)
|
"update", "tgz", self.get_dist_path(bundle_tgz)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Strip uid and gid in case of overflow
|
tar.add(
|
||||||
def tar_filter(tarinfo):
|
bundle_dir, arcname=bundle_dir_name, filter=tar_sanitizer_filter
|
||||||
tarinfo.uid = tarinfo.gid = 0
|
)
|
||||||
tarinfo.mtime = 0
|
|
||||||
tarinfo.uname = tarinfo.gname = "furippa"
|
|
||||||
return tarinfo
|
|
||||||
|
|
||||||
tar.add(bundle_dir, arcname=bundle_dir_name, filter=tar_filter)
|
|
||||||
return bundle_result
|
return bundle_result
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import io
|
||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
@@ -7,9 +8,12 @@ import tarfile
|
|||||||
import zlib
|
import zlib
|
||||||
from os.path import exists, join
|
from os.path import exists, join
|
||||||
|
|
||||||
|
import heatshrink2
|
||||||
from flipper.app import App
|
from flipper.app import App
|
||||||
from flipper.assets.coprobin import CoproBinary, get_stack_type
|
from flipper.assets.coprobin import CoproBinary, get_stack_type
|
||||||
|
from flipper.assets.heatshrink_stream import HeatshrinkDataStreamHeader
|
||||||
from flipper.assets.obdata import ObReferenceValues, OptionBytesData
|
from flipper.assets.obdata import ObReferenceValues, OptionBytesData
|
||||||
|
from flipper.assets.tarball import compress_tree_tarball, tar_sanitizer_filter
|
||||||
from flipper.utils.fff import FlipperFormatFile
|
from flipper.utils.fff import FlipperFormatFile
|
||||||
from slideshow import Main as SlideshowMain
|
from slideshow import Main as SlideshowMain
|
||||||
|
|
||||||
@@ -20,8 +24,7 @@ class Main(App):
|
|||||||
|
|
||||||
# No compression, plain tar
|
# No compression, plain tar
|
||||||
RESOURCE_TAR_MODE = "w:"
|
RESOURCE_TAR_MODE = "w:"
|
||||||
RESOURCE_TAR_FORMAT = tarfile.USTAR_FORMAT
|
RESOURCE_FILE_NAME = "resources.ths" # .Tar.HeatShrink
|
||||||
RESOURCE_FILE_NAME = "resources.tar"
|
|
||||||
RESOURCE_ENTRY_NAME_MAX_LENGTH = 100
|
RESOURCE_ENTRY_NAME_MAX_LENGTH = 100
|
||||||
|
|
||||||
WHITELISTED_STACK_TYPES = set(
|
WHITELISTED_STACK_TYPES = set(
|
||||||
@@ -34,6 +37,9 @@ class Main(App):
|
|||||||
FLASH_BASE = 0x8000000
|
FLASH_BASE = 0x8000000
|
||||||
MIN_LFS_PAGES = 6
|
MIN_LFS_PAGES = 6
|
||||||
|
|
||||||
|
HEATSHRINK_WINDOW_SIZE = 13
|
||||||
|
HEATSHRINK_LOOKAHEAD_SIZE = 6
|
||||||
|
|
||||||
# Post-update slideshow
|
# Post-update slideshow
|
||||||
SPLASH_BIN_NAME = "splash.bin"
|
SPLASH_BIN_NAME = "splash.bin"
|
||||||
|
|
||||||
@@ -221,23 +227,19 @@ class Main(App):
|
|||||||
f"Cannot package resource: name '{tarinfo.name}' too long"
|
f"Cannot package resource: name '{tarinfo.name}' too long"
|
||||||
)
|
)
|
||||||
raise ValueError("Resource name too long")
|
raise ValueError("Resource name too long")
|
||||||
tarinfo.gid = tarinfo.uid = 0
|
return tar_sanitizer_filter(tarinfo)
|
||||||
tarinfo.mtime = 0
|
|
||||||
tarinfo.uname = tarinfo.gname = "furippa"
|
|
||||||
return tarinfo
|
|
||||||
|
|
||||||
def package_resources(self, srcdir: str, dst_name: str):
|
def package_resources(self, srcdir: str, dst_name: str):
|
||||||
try:
|
try:
|
||||||
with tarfile.open(
|
src_size, compressed_size = compress_tree_tarball(
|
||||||
dst_name, self.RESOURCE_TAR_MODE, format=self.RESOURCE_TAR_FORMAT
|
srcdir, dst_name, filter=self._tar_filter
|
||||||
) as tarball:
|
)
|
||||||
tarball.add(
|
|
||||||
srcdir,
|
self.logger.info(
|
||||||
arcname="",
|
f"Resources compression ratio: {compressed_size * 100 / src_size:.2f}%"
|
||||||
filter=self._tar_filter,
|
)
|
||||||
)
|
|
||||||
return True
|
return True
|
||||||
except ValueError as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Cannot package resources: {e}")
|
self.logger.error(f"Cannot package resources: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
entry,status,name,type,params
|
entry,status,name,type,params
|
||||||
Version,+,66.2,,
|
Version,+,67.0,,
|
||||||
Header,+,applications/services/bt/bt_service/bt.h,,
|
Header,+,applications/services/bt/bt_service/bt.h,,
|
||||||
Header,+,applications/services/bt/bt_service/bt_keys_storage.h,,
|
Header,+,applications/services/bt/bt_service/bt_keys_storage.h,,
|
||||||
Header,+,applications/services/cli/cli.h,,
|
Header,+,applications/services/cli/cli.h,,
|
||||||
@@ -780,13 +780,20 @@ Function,+,composite_api_resolver_add,void,"CompositeApiResolver*, const ElfApiI
|
|||||||
Function,+,composite_api_resolver_alloc,CompositeApiResolver*,
|
Function,+,composite_api_resolver_alloc,CompositeApiResolver*,
|
||||||
Function,+,composite_api_resolver_free,void,CompositeApiResolver*
|
Function,+,composite_api_resolver_free,void,CompositeApiResolver*
|
||||||
Function,+,composite_api_resolver_get,const ElfApiInterface*,CompositeApiResolver*
|
Function,+,composite_api_resolver_get,const ElfApiInterface*,CompositeApiResolver*
|
||||||
Function,+,compress_alloc,Compress*,uint16_t
|
Function,+,compress_alloc,Compress*,"CompressType, const void*"
|
||||||
Function,+,compress_decode,_Bool,"Compress*, uint8_t*, size_t, uint8_t*, size_t, size_t*"
|
Function,+,compress_decode,_Bool,"Compress*, uint8_t*, size_t, uint8_t*, size_t, size_t*"
|
||||||
|
Function,+,compress_decode_streamed,_Bool,"Compress*, CompressIoCallback, void*, CompressIoCallback, void*"
|
||||||
Function,+,compress_encode,_Bool,"Compress*, uint8_t*, size_t, uint8_t*, size_t, size_t*"
|
Function,+,compress_encode,_Bool,"Compress*, uint8_t*, size_t, uint8_t*, size_t, size_t*"
|
||||||
Function,+,compress_free,void,Compress*
|
Function,+,compress_free,void,Compress*
|
||||||
Function,+,compress_icon_alloc,CompressIcon*,size_t
|
Function,+,compress_icon_alloc,CompressIcon*,size_t
|
||||||
Function,+,compress_icon_decode,void,"CompressIcon*, const uint8_t*, uint8_t**"
|
Function,+,compress_icon_decode,void,"CompressIcon*, const uint8_t*, uint8_t**"
|
||||||
Function,+,compress_icon_free,void,CompressIcon*
|
Function,+,compress_icon_free,void,CompressIcon*
|
||||||
|
Function,+,compress_stream_decoder_alloc,CompressStreamDecoder*,"CompressType, const void*, CompressIoCallback, void*"
|
||||||
|
Function,+,compress_stream_decoder_free,void,CompressStreamDecoder*
|
||||||
|
Function,+,compress_stream_decoder_read,_Bool,"CompressStreamDecoder*, uint8_t*, size_t"
|
||||||
|
Function,+,compress_stream_decoder_rewind,_Bool,CompressStreamDecoder*
|
||||||
|
Function,+,compress_stream_decoder_seek,_Bool,"CompressStreamDecoder*, size_t"
|
||||||
|
Function,+,compress_stream_decoder_tell,size_t,CompressStreamDecoder*
|
||||||
Function,-,copysign,double,"double, double"
|
Function,-,copysign,double,"double, double"
|
||||||
Function,-,copysignf,float,"float, float"
|
Function,-,copysignf,float,"float, float"
|
||||||
Function,-,copysignl,long double,"long double, long double"
|
Function,-,copysignl,long double,"long double, long double"
|
||||||
@@ -2617,6 +2624,8 @@ Function,+,tar_archive_file_finalize,_Bool,TarArchive*
|
|||||||
Function,+,tar_archive_finalize,_Bool,TarArchive*
|
Function,+,tar_archive_finalize,_Bool,TarArchive*
|
||||||
Function,+,tar_archive_free,void,TarArchive*
|
Function,+,tar_archive_free,void,TarArchive*
|
||||||
Function,+,tar_archive_get_entries_count,int32_t,TarArchive*
|
Function,+,tar_archive_get_entries_count,int32_t,TarArchive*
|
||||||
|
Function,+,tar_archive_get_mode_for_path,TarOpenMode,const char*
|
||||||
|
Function,+,tar_archive_get_read_progress,_Bool,"TarArchive*, int32_t*, int32_t*"
|
||||||
Function,+,tar_archive_open,_Bool,"TarArchive*, const char*, TarOpenMode"
|
Function,+,tar_archive_open,_Bool,"TarArchive*, const char*, TarOpenMode"
|
||||||
Function,+,tar_archive_set_file_callback,void,"TarArchive*, tar_unpack_file_cb, void*"
|
Function,+,tar_archive_set_file_callback,void,"TarArchive*, tar_unpack_file_cb, void*"
|
||||||
Function,+,tar_archive_store_data,_Bool,"TarArchive*, const char*, const uint8_t*, const int32_t"
|
Function,+,tar_archive_store_data,_Bool,"TarArchive*, const char*, const uint8_t*, const int32_t"
|
||||||
@@ -2816,6 +2825,7 @@ Variable,-,_sys_nerr,int,
|
|||||||
Variable,-,ble_profile_hid,const FuriHalBleProfileTemplate*,
|
Variable,-,ble_profile_hid,const FuriHalBleProfileTemplate*,
|
||||||
Variable,-,ble_profile_serial,const FuriHalBleProfileTemplate*,
|
Variable,-,ble_profile_serial,const FuriHalBleProfileTemplate*,
|
||||||
Variable,+,cli_vcp,CliSession,
|
Variable,+,cli_vcp,CliSession,
|
||||||
|
Variable,+,compress_config_heatshrink_default,const CompressConfigHeatshrink,
|
||||||
Variable,+,firmware_api_interface,const ElfApiInterface*,
|
Variable,+,firmware_api_interface,const ElfApiInterface*,
|
||||||
Variable,+,furi_hal_i2c_bus_external,FuriHalI2cBus,
|
Variable,+,furi_hal_i2c_bus_external,FuriHalI2cBus,
|
||||||
Variable,+,furi_hal_i2c_bus_power,FuriHalI2cBus,
|
Variable,+,furi_hal_i2c_bus_power,FuriHalI2cBus,
|
||||||
|
|||||||
|
@@ -1,5 +1,5 @@
|
|||||||
entry,status,name,type,params
|
entry,status,name,type,params
|
||||||
Version,+,66.2,,
|
Version,+,67.0,,
|
||||||
Header,+,applications/drivers/subghz/cc1101_ext/cc1101_ext_interconnect.h,,
|
Header,+,applications/drivers/subghz/cc1101_ext/cc1101_ext_interconnect.h,,
|
||||||
Header,+,applications/services/bt/bt_service/bt.h,,
|
Header,+,applications/services/bt/bt_service/bt.h,,
|
||||||
Header,+,applications/services/bt/bt_service/bt_keys_storage.h,,
|
Header,+,applications/services/bt/bt_service/bt_keys_storage.h,,
|
||||||
@@ -857,13 +857,20 @@ Function,+,composite_api_resolver_add,void,"CompositeApiResolver*, const ElfApiI
|
|||||||
Function,+,composite_api_resolver_alloc,CompositeApiResolver*,
|
Function,+,composite_api_resolver_alloc,CompositeApiResolver*,
|
||||||
Function,+,composite_api_resolver_free,void,CompositeApiResolver*
|
Function,+,composite_api_resolver_free,void,CompositeApiResolver*
|
||||||
Function,+,composite_api_resolver_get,const ElfApiInterface*,CompositeApiResolver*
|
Function,+,composite_api_resolver_get,const ElfApiInterface*,CompositeApiResolver*
|
||||||
Function,+,compress_alloc,Compress*,uint16_t
|
Function,+,compress_alloc,Compress*,"CompressType, const void*"
|
||||||
Function,+,compress_decode,_Bool,"Compress*, uint8_t*, size_t, uint8_t*, size_t, size_t*"
|
Function,+,compress_decode,_Bool,"Compress*, uint8_t*, size_t, uint8_t*, size_t, size_t*"
|
||||||
|
Function,+,compress_decode_streamed,_Bool,"Compress*, CompressIoCallback, void*, CompressIoCallback, void*"
|
||||||
Function,+,compress_encode,_Bool,"Compress*, uint8_t*, size_t, uint8_t*, size_t, size_t*"
|
Function,+,compress_encode,_Bool,"Compress*, uint8_t*, size_t, uint8_t*, size_t, size_t*"
|
||||||
Function,+,compress_free,void,Compress*
|
Function,+,compress_free,void,Compress*
|
||||||
Function,+,compress_icon_alloc,CompressIcon*,size_t
|
Function,+,compress_icon_alloc,CompressIcon*,size_t
|
||||||
Function,+,compress_icon_decode,void,"CompressIcon*, const uint8_t*, uint8_t**"
|
Function,+,compress_icon_decode,void,"CompressIcon*, const uint8_t*, uint8_t**"
|
||||||
Function,+,compress_icon_free,void,CompressIcon*
|
Function,+,compress_icon_free,void,CompressIcon*
|
||||||
|
Function,+,compress_stream_decoder_alloc,CompressStreamDecoder*,"CompressType, const void*, CompressIoCallback, void*"
|
||||||
|
Function,+,compress_stream_decoder_free,void,CompressStreamDecoder*
|
||||||
|
Function,+,compress_stream_decoder_read,_Bool,"CompressStreamDecoder*, uint8_t*, size_t"
|
||||||
|
Function,+,compress_stream_decoder_rewind,_Bool,CompressStreamDecoder*
|
||||||
|
Function,+,compress_stream_decoder_seek,_Bool,"CompressStreamDecoder*, size_t"
|
||||||
|
Function,+,compress_stream_decoder_tell,size_t,CompressStreamDecoder*
|
||||||
Function,-,copysign,double,"double, double"
|
Function,-,copysign,double,"double, double"
|
||||||
Function,-,copysignf,float,"float, float"
|
Function,-,copysignf,float,"float, float"
|
||||||
Function,-,copysignl,long double,"long double, long double"
|
Function,-,copysignl,long double,"long double, long double"
|
||||||
@@ -3451,6 +3458,8 @@ Function,+,tar_archive_file_finalize,_Bool,TarArchive*
|
|||||||
Function,+,tar_archive_finalize,_Bool,TarArchive*
|
Function,+,tar_archive_finalize,_Bool,TarArchive*
|
||||||
Function,+,tar_archive_free,void,TarArchive*
|
Function,+,tar_archive_free,void,TarArchive*
|
||||||
Function,+,tar_archive_get_entries_count,int32_t,TarArchive*
|
Function,+,tar_archive_get_entries_count,int32_t,TarArchive*
|
||||||
|
Function,+,tar_archive_get_mode_for_path,TarOpenMode,const char*
|
||||||
|
Function,+,tar_archive_get_read_progress,_Bool,"TarArchive*, int32_t*, int32_t*"
|
||||||
Function,+,tar_archive_open,_Bool,"TarArchive*, const char*, TarOpenMode"
|
Function,+,tar_archive_open,_Bool,"TarArchive*, const char*, TarOpenMode"
|
||||||
Function,+,tar_archive_set_file_callback,void,"TarArchive*, tar_unpack_file_cb, void*"
|
Function,+,tar_archive_set_file_callback,void,"TarArchive*, tar_unpack_file_cb, void*"
|
||||||
Function,+,tar_archive_store_data,_Bool,"TarArchive*, const char*, const uint8_t*, const int32_t"
|
Function,+,tar_archive_store_data,_Bool,"TarArchive*, const char*, const uint8_t*, const int32_t"
|
||||||
@@ -3650,6 +3659,7 @@ Variable,-,_sys_nerr,int,
|
|||||||
Variable,-,ble_profile_hid,const FuriHalBleProfileTemplate*,
|
Variable,-,ble_profile_hid,const FuriHalBleProfileTemplate*,
|
||||||
Variable,-,ble_profile_serial,const FuriHalBleProfileTemplate*,
|
Variable,-,ble_profile_serial,const FuriHalBleProfileTemplate*,
|
||||||
Variable,+,cli_vcp,CliSession,
|
Variable,+,cli_vcp,CliSession,
|
||||||
|
Variable,+,compress_config_heatshrink_default,const CompressConfigHeatshrink,
|
||||||
Variable,+,firmware_api_interface,const ElfApiInterface*,
|
Variable,+,firmware_api_interface,const ElfApiInterface*,
|
||||||
Variable,+,furi_hal_i2c_bus_external,FuriHalI2cBus,
|
Variable,+,furi_hal_i2c_bus_external,FuriHalI2cBus,
|
||||||
Variable,+,furi_hal_i2c_bus_power,FuriHalI2cBus,
|
Variable,+,furi_hal_i2c_bus_power,FuriHalI2cBus,
|
||||||
|
|||||||
|
Reference in New Issue
Block a user