File Coverage

_aich.c
Criterion Covered Total %
statement 32 213 15.0
branch 11 166 6.6
condition n/a
subroutine n/a
pod n/a
total 43 379 11.3


line stmt bran cond sub pod time code
1             /* aich.c - an implementation of EMule AICH Algorithm.
2             * Description: http://www.amule.org/wiki/index.php/AICH.
3             *
4             * Copyright (c) 2008, Aleksey Kravchenko
5             *
6             * Permission to use, copy, modify, and/or distribute this software for any
7             * purpose with or without fee is hereby granted.
8             *
9             * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
10             * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11             * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
12             * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13             * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14             * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15             * PERFORMANCE OF THIS SOFTWARE.
16             *
17             * The AICH Algorithm:
18             *
19             * Each ed2k chunk (9728000 bytes) is divided into 53 parts (52x 180KB and
20             * 1x 140KB) and each of these parts are hashed using the SHA1 algorithm.
21             * Each of these hashes is called a Block Hash. By combining pairs of Block
22             * Hashes (i.e. each part with the part next to it) algorithm will get a whole
23             * tree of hashes (this tree which is therefore a hashset made of all of the
24             * other Block Hashes is called the AICH Hashset). Each hash which is neither
25             * a Block Hash nor the Root Hash, is a Verifying Hash. The hash at the top
26             * level is the Root Hash and it is supposed to be provided by the ed2k link
27             * when releasing.
28             */
29              
30             #include "aich.h"
31             #include "util.h"
32             #include
33             #include
34             #include
35             #include
36              
37             #if defined(USE_OPENSSL)
38             #define SHA1_INIT(ctx) ((pinit_t)ctx->sha1_methods.init)(&ctx->sha1_context)
39             #define SHA1_UPDATE(ctx, msg, size) ((pupdate_t)ctx->sha1_methods.update)(&ctx->sha1_context, (msg), (size))
40             #define SHA1_FINAL(ctx, result) ((pfinal_t)ctx->sha1_methods.final)(&ctx->sha1_context, (result))
41             #else
42             # define SHA1_INIT(ctx) rhash_sha1_init(&ctx->sha1_context)
43             # define SHA1_UPDATE(ctx, msg, size) rhash_sha1_update(&ctx->sha1_context, (msg), (size))
44             # define SHA1_FINAL(ctx, result) rhash_sha1_final(&ctx->sha1_context, (result))
45             #endif
46              
47             #define ED2K_CHUNK_SIZE 9728000
48             #define FULL_BLOCK_SIZE 184320
49             #define LAST_BLOCK_SIZE 143360
50             #define BLOCKS_PER_CHUNK 53
51             #define BLOCK_HASHES_SIZE (BLOCKS_PER_CHUNK * sha1_hash_size)
52              
53             /*
54             * AICH algorithm could be implemented a bit faster if it knows
55             * a hashed message size beforehand. It would allow
56             * to build balanced tree while hashing the message.
57             *
58             * This AICH implementation works with unknown
59             * message size like other well-known hash algorithms.
60             * So, it just stores sha1 hashes and builds balanced tree
61             * only on the last step, when the full message processed
62             * and its size got to be known.
63             */
64              
65             /**
66             * Initialize algorithm context before calculating hash.
67             *
68             * @param ctx context to initialize
69             */
70 2           void rhash_aich_init(aich_ctx* ctx)
71             {
72 2           memset(ctx, 0, sizeof(aich_ctx));
73              
74             #if defined(USE_OPENSSL)
75             assert(rhash_info_table[3].info->hash_id == RHASH_SHA1);
76             assert(rhash_info_table[3].context_size <= (sizeof(sha1_ctx) + sizeof(unsigned long)));
77             rhash_load_sha1_methods(&ctx->sha1_methods, METHODS_SELECTED);
78             #endif
79              
80 2           SHA1_INIT(ctx);
81 2           }
82              
83             /* define macrosses to access chunk table */
84             #define CT_BITS 8
85             #define CT_GROUP_SIZE (1 << CT_BITS)
86             typedef unsigned char hash_pair_t[2][sha1_hash_size];
87             typedef hash_pair_t hash_pairs_group_t[CT_GROUP_SIZE];
88              
89             #define CT_INDEX(chunk_num) ((chunk_num) & (CT_GROUP_SIZE - 1))
90             #define GET_HASH_PAIR(ctx, chunk_num) \
91             (((hash_pair_t*)(ctx->chunk_table[chunk_num >> CT_BITS]))[CT_INDEX(chunk_num)])
92              
93             /**
94             * Resize the table if needed to ensure it contains space for given chunk_num.
95             * and allocate hash_pairs_group_t element at this index.
96             *
97             * @param ctx algorithm context
98             * @param chunk_num the number of chunks required
99             */
100 0           static void rhash_aich_chunk_table_extend(aich_ctx* ctx, unsigned chunk_num)
101             {
102 0           unsigned index = (chunk_num >> CT_BITS);
103 0 0         assert(CT_INDEX(chunk_num) == 0);
104             RHASH_ASSERT(sizeof(hash_pair_t) == 40);
105             RHASH_ASSERT(sizeof(hash_pairs_group_t) == (40 * CT_GROUP_SIZE)); /* 10KiB */
106             RHASH_ASSERT(CT_GROUP_SIZE == 256);
107              
108             /* check main assumptions */
109 0 0         assert(ctx->chunk_table == 0 || ctx->chunk_table[index - 1] != 0); /* table is empty or full */
    0          
110 0 0         assert(index <= ctx->allocated);
111              
112             /* check if there is enough space allocated */
113 0 0         if (index >= ctx->allocated) {
114             /* resize the table by allocating some extra space */
115 0 0         size_t new_size = (ctx->allocated == 0 ? 64 : ctx->allocated * 2);
116             void** new_block;
117 0 0         assert(index == ctx->allocated);
118              
119             /* re-size the chunk table to new_size */
120 0           new_block = (void**)realloc(ctx->chunk_table, new_size * sizeof(void*));
121 0 0         if (new_block == 0) {
122 0           free(ctx->chunk_table);
123 0           ctx->chunk_table = 0;
124 0           ctx->error = 1;
125 0           return;
126             }
127              
128 0           memset(new_block + ctx->allocated, 0, (new_size - ctx->allocated) * sizeof(void*));
129 0           ctx->chunk_table = new_block;
130 0           ctx->allocated = new_size;
131             }
132              
133             /* add new hash_pairs_group_t block to the table */
134 0 0         assert(index < ctx->allocated);
135 0 0         assert(ctx->chunk_table != 0);
136 0 0         assert(ctx->chunk_table[index] == 0);
137              
138 0           ctx->chunk_table[index] = malloc(sizeof(hash_pairs_group_t));
139 0 0         if (ctx->chunk_table[index] == 0)
140 0           ctx->error = 1;
141             }
142              
143             /**
144             * Free dynamically allocated memory for internal structures
145             * used by hashing algorithm.
146             *
147             * @param ctx AICH algorithm context to cleanup
148             */
149 1           void rhash_aich_cleanup(aich_ctx* ctx)
150             {
151             size_t i;
152 1           size_t table_size = (ctx->chunks_count + CT_GROUP_SIZE - 1) / CT_GROUP_SIZE;
153              
154 1 50         if (ctx->chunk_table != 0) {
155 0 0         assert(table_size <= ctx->allocated);
156 0 0         assert(table_size == ctx->allocated || ctx->chunk_table[table_size] == 0);
    0          
157 0 0         for (i = 0; i < table_size; i++)
158 0           free(ctx->chunk_table[i]);
159 0           free(ctx->chunk_table);
160 0           ctx->chunk_table = 0;
161             }
162              
163 1           free(ctx->block_hashes);
164 1           ctx->block_hashes = 0;
165 1           }
166              
167             #define AICH_HASH_FULL_TREE 0
168             #define AICH_HASH_LEFT_BRANCH 1
169             #define AICH_HASH_RIGHT_BRANCH 2
170              
171             /**
172             * Calculate an AICH tree hash, based ether on hashes of 180KB parts
173             * (for an ed2k chunk) or on stored ed2k chunks (for the whole tree hash).
174             *
175             * @param ctx algorithm context
176             * @param result pointer to receive calculated tree hash
177             * @param type the type of hash to calculate, can be one of constants
178             * AICH_HASH_LEFT_BRANCH, AICH_HASH_RIGHT_BRANCH or AICH_HASH_FULL_TREE.
179             */
180 0           static void rhash_aich_hash_tree(aich_ctx* ctx, unsigned char* result, int type)
181             {
182 0           unsigned index = 0; /* leaf index */
183             unsigned blocks;
184 0           int level = 0;
185 0           unsigned is_left_branch = (type == AICH_HASH_RIGHT_BRANCH ? 0x0 : 0x1);
186 0           uint64_t path = is_left_branch;
187             unsigned blocks_stack[56];
188             unsigned char sha1_stack[56][sha1_hash_size];
189              
190 0 0         if (ctx->error)
191 0           return;
192 0 0         assert(ctx->index <= ED2K_CHUNK_SIZE);
193 0 0         assert(type == AICH_HASH_FULL_TREE ? ctx->chunk_table != 0 : ctx->block_hashes != 0);
    0          
194              
195             /* calculate number of leafs in the tree */
196 0 0         blocks_stack[0] = blocks = (unsigned)(type == AICH_HASH_FULL_TREE ?
197 0           ctx->chunks_count : (ctx->index + FULL_BLOCK_SIZE - 1) / FULL_BLOCK_SIZE);
198              
199 0           while (1) {
200             unsigned char sha1_message[sha1_hash_size];
201             unsigned char* leaf_hash;
202              
203             /* go into the left branches until a leaf block is reached */
204 0 0         while (blocks > 1) {
205             /* step down into the left branch */
206 0           blocks = (blocks + ((unsigned)path & 0x1)) / 2;
207 0           level++;
208 0 0         assert(level < 56); /* assumption filesize < (2^56 * 9MiB) */
209 0           blocks_stack[level] = blocks;
210 0           path = (path << 1) | 0x1; /* mark branch as left */
211             }
212              
213             /* read a leaf hash */
214 0           leaf_hash = &(ctx->block_hashes[index][0]);
215              
216 0 0         if (type == AICH_HASH_FULL_TREE) {
217 0           is_left_branch = (unsigned)path & 0x1;
218              
219 0           leaf_hash = GET_HASH_PAIR(ctx, index)[is_left_branch];
220             }
221 0           index++;
222              
223             /* climb up the tree until a left branch is reached */
224 0 0         for (; level > 0 && (path & 0x01) == 0; path >>= 1) {
    0          
225 0           SHA1_INIT(ctx);
226 0           SHA1_UPDATE(ctx, sha1_stack[level], sha1_hash_size);
227 0           SHA1_UPDATE(ctx, leaf_hash, sha1_hash_size);
228 0           SHA1_FINAL(ctx, sha1_message);
229 0           leaf_hash = sha1_message;
230 0           level--;
231             }
232 0 0         memcpy((level > 0 ? sha1_stack[level] : result), leaf_hash, 20);
233              
234 0 0         if (level == 0) break;
235              
236             /* jump at the current level from left to right branch */
237 0           path &= ~0x1; /* mark branch as right */
238 0           is_left_branch = ((unsigned)path >> 1) & 1;
239              
240             /* calculate number of blocks at right branch of the current level */
241 0           blocks_stack[level] =
242 0           (blocks_stack[level - 1] + 1 - is_left_branch) / 2;
243 0           blocks = blocks_stack[level];
244             }
245             }
246              
247             #define AICH_PROCESS_FINAL_BLOCK 1
248             #define AICH_PROCESS_FLUSH_BLOCK 2
249              
250             /**
251             * Calculate and store a hash for a 180K/140K block.
252             * Also, if it is the last block of a 9.2MiB ed2k chunk or of the hashed message,
253             * then also calculate the AICH tree-hash of the current ed2k chunk.
254             *
255             * @param ctx algorithm context
256             * @param type the actions to take, can be combination of bits AICH_PROCESS_FINAL_BLOCK
257             * and AICH_PROCESS_FLUSH_BLOCK
258             */
259 0           static void rhash_aich_process_block(aich_ctx* ctx, int type)
260             {
261 0 0         assert(type != 0);
262 0 0         assert(ctx->index <= ED2K_CHUNK_SIZE);
263              
264             /* if there is unprocessed data left in the current 180K block. */
265 0 0         if ((type & AICH_PROCESS_FLUSH_BLOCK) != 0)
266             {
267             /* ensure that the block_hashes array is allocated to save the result */
268 0 0         if (ctx->block_hashes == NULL) {
269 0           ctx->block_hashes = (unsigned char (*)[sha1_hash_size])malloc(BLOCK_HASHES_SIZE);
270 0 0         if (ctx->block_hashes == NULL) {
271 0           ctx->error = 1;
272 0           return;
273             }
274             }
275              
276             /* store the 180-KiB block hash to the block_hashes array */
277 0 0         assert(((ctx->index - 1) / FULL_BLOCK_SIZE) < BLOCKS_PER_CHUNK);
278 0           SHA1_FINAL(ctx, ctx->block_hashes[(ctx->index - 1) / FULL_BLOCK_SIZE]);
279             }
280              
281             /* check, if it's time to calculate the tree hash for the current ed2k chunk */
282 0 0         if (ctx->index >= ED2K_CHUNK_SIZE || (type & AICH_PROCESS_FINAL_BLOCK)) {
    0          
283             unsigned char (*pair)[sha1_hash_size];
284              
285             /* ensure, that we have the space to store tree hash */
286 0 0         if (CT_INDEX(ctx->chunks_count) == 0) {
287 0           rhash_aich_chunk_table_extend(ctx, (unsigned)ctx->chunks_count);
288 0 0         if (ctx->error)
289 0           return;
290             }
291 0 0         assert(ctx->chunk_table != 0);
292 0 0         assert(ctx->block_hashes != 0);
293              
294             /* calculate tree hash and save results to chunk_table */
295 0           pair = GET_HASH_PAIR(ctx, ctx->chunks_count);
296              
297             /* small optimization: skip a left-branch-hash for the last chunk */
298 0 0         if (!(type & AICH_PROCESS_FINAL_BLOCK) || ctx->chunks_count == 0) {
    0          
299             /* calculate a tree hash to be used in left branch */
300 0           rhash_aich_hash_tree(ctx, pair[1], AICH_HASH_LEFT_BRANCH);
301             }
302              
303             /* small optimization: skip right-branch-hash for the very first chunk */
304 0 0         if (ctx->chunks_count > 0) {
305             /* calculate a tree hash to be used in right branch */
306 0           rhash_aich_hash_tree(ctx, pair[0], AICH_HASH_RIGHT_BRANCH);
307             }
308              
309 0           ctx->index = 0; /* mark that the entire ed2k chunk has been processed */
310 0           ctx->chunks_count++;
311             }
312             }
313              
314             /**
315             * Calculate message hash.
316             * Can be called repeatedly with chunks of the message to be hashed.
317             *
318             * @param ctx the algorithm context containing current hashing state
319             * @param msg message chunk
320             * @param size length of the message chunk
321             */
322 2           void rhash_aich_update(aich_ctx* ctx, const unsigned char* msg, size_t size)
323             {
324 2 50         if (ctx->error)
325 0           return;
326 2 50         while (size > 0) {
327 2           unsigned left_in_chunk = ED2K_CHUNK_SIZE - ctx->index;
328 2 50         unsigned block_left = (left_in_chunk <= LAST_BLOCK_SIZE ? left_in_chunk :
329 2           FULL_BLOCK_SIZE - ctx->index % FULL_BLOCK_SIZE);
330 2 50         assert(block_left > 0);
331              
332 2 50         if (size >= block_left) {
333 0           SHA1_UPDATE(ctx, msg, block_left);
334 0           msg += block_left;
335 0           size -= block_left;
336 0           ctx->index += block_left;
337              
338             /* process a 180KiB-blok */
339 0           rhash_aich_process_block(ctx, AICH_PROCESS_FLUSH_BLOCK);
340 0           SHA1_INIT(ctx);
341             } else {
342             /* add to a leaf block */
343 2           SHA1_UPDATE(ctx, msg, size);
344 2           ctx->index += (unsigned)size;
345 2           break;
346             }
347             }
348 2 50         assert(ctx->index < ED2K_CHUNK_SIZE);
349             }
350              
351             /**
352             * Store calculated hash into the given array.
353             *
354             * @param ctx the algorithm context containing current hashing state
355             * @param result calculated hash in binary form
356             */
357 2           void rhash_aich_final(aich_ctx* ctx, unsigned char result[20])
358             {
359 2           uint64_t total_size =
360 2           ((uint64_t)ctx->chunks_count * ED2K_CHUNK_SIZE) + ctx->index;
361 2           unsigned char* const hash = (unsigned char*)ctx->sha1_context.hash;
362              
363 2 50         if (ctx->chunks_count == 0 && ctx->block_hashes == NULL) {
    50          
364 2 50         assert(ctx->index < FULL_BLOCK_SIZE);
365             #if defined(USE_OPENSSL)
366             SHA1_FINAL(ctx, hash); /* return just sha1 hash */
367             #else
368 2           SHA1_FINAL(ctx, 0); /* return just sha1 hash */
369             #if IS_LITTLE_ENDIAN
370 2           rhash_u32_mem_swap(ctx->sha1_context.hash, 5);
371             #endif
372             #endif
373 2 50         if (result) memcpy(result, hash, sha1_hash_size);
374 2           return;
375             }
376              
377             /* if there is unprocessed data left in the last 180K block */
378 0 0         if ((ctx->index % FULL_BLOCK_SIZE) > 0) {
379             /* then process the last block */
380 0 0         rhash_aich_process_block(ctx, ctx->block_hashes != NULL ?
381             AICH_PROCESS_FINAL_BLOCK | AICH_PROCESS_FLUSH_BLOCK : AICH_PROCESS_FLUSH_BLOCK);
382             }
383              
384             /* if processed message was shorter than a ed2k chunk */
385 0 0         if (ctx->chunks_count == 0) {
386             /* then return the aich hash for the first chunk */
387 0           rhash_aich_hash_tree(ctx, hash, AICH_HASH_LEFT_BRANCH);
388             } else {
389 0 0         if (ctx->index > 0) {
390             /* process the last block of the message */
391 0           rhash_aich_process_block(ctx, AICH_PROCESS_FINAL_BLOCK);
392             }
393 0 0         assert(ctx->chunks_count > 0);
394 0 0         assert(ctx->block_hashes != NULL);
395              
396 0           rhash_aich_hash_tree(ctx, hash, AICH_HASH_FULL_TREE);
397             }
398              
399 0           rhash_aich_cleanup(ctx);
400 0           ctx->sha1_context.length = total_size; /* store total message size */
401 0 0         if (result) memcpy(result, hash, sha1_hash_size);
402             }
403              
404             #if !defined(NO_IMPORT_EXPORT)
405             # define AICH_CTX_OSSL_FLAG 0x10
406              
407             /**
408             * Export aich context to a memory region, or calculate the
409             * size required for context export.
410             *
411             * @param ctx the algorithm context containing current hashing state
412             * @param out pointer to the memory region or NULL
413             * @param size size of memory region
414             * @return the size of the exported data on success, 0 on fail.
415             */
416 0           size_t rhash_aich_export(const aich_ctx* ctx, void* out, size_t size)
417             {
418 0           const size_t head_size = sizeof(size_t);
419 0           const size_t ctx_head_size = offsetof(aich_ctx, block_hashes);
420 0 0         const size_t block_hashes_size = (ctx->block_hashes ? BLOCK_HASHES_SIZE : 0);
421 0           const size_t chunk_table_size = sizeof(hash_pair_t) * ctx->chunks_count;
422 0           const size_t exported_size = head_size + ctx_head_size + block_hashes_size + chunk_table_size;
423 0           char* out_ptr = (char*)out;
424 0 0         if (!out)
425 0           return exported_size;
426 0 0         if (size < exported_size)
427 0           return 0;
428 0           *(size_t*)out_ptr = sizeof(aich_ctx);
429 0           out_ptr += head_size;
430 0           memcpy(out_ptr, ctx, ctx_head_size);
431 0           out_ptr += ctx_head_size;
432 0 0         if (ctx->block_hashes) {
433 0           memcpy(out_ptr, ctx->block_hashes, BLOCK_HASHES_SIZE);
434 0           out_ptr += BLOCK_HASHES_SIZE;
435             }
436 0 0         if (chunk_table_size > 0) {
437 0           size_t left_size = chunk_table_size;
438             size_t index;
439 0 0         assert(ctx->chunk_table != NULL);
440 0 0         for (index = 0; left_size > 0; index++) {
441 0           size_t group_size = (left_size < sizeof(hash_pairs_group_t) ?
442             left_size : sizeof(hash_pairs_group_t));
443 0           memcpy(out_ptr, ctx->chunk_table[index], group_size);
444 0           out_ptr += group_size;
445 0           left_size -= group_size;
446             }
447 0 0         assert(left_size == 0);
448             }
449 0 0         assert(!out || (size_t)(out_ptr - (char*)out) == exported_size);
    0          
450             #if defined(USE_OPENSSL)
451             if (out_ptr && ARE_OPENSSL_METHODS(ctx->sha1_methods)) {
452             int* error_ptr = (int*)((char*)out + head_size + offsetof(aich_ctx, error));
453             *error_ptr |= AICH_CTX_OSSL_FLAG;
454             RHASH_ASSERT(sizeof(*error_ptr) == sizeof(ctx->error));
455             }
456             #endif
457 0           return exported_size;
458             }
459              
460             /**
461             * Import aich context from a memory region.
462             *
463             * @param ctx pointer to the algorithm context
464             * @param in pointer to the data to import
465             * @param size size of data to import
466             * @return the size of the imported data on success, 0 on fail.
467             */
468 0           size_t rhash_aich_import(aich_ctx* ctx, const void* in, size_t size)
469             {
470 0           const size_t head_size = sizeof(size_t);
471 0           const size_t ctx_head_size = offsetof(aich_ctx, block_hashes);
472 0           const char* in_ptr = (const char*)in;
473 0           size_t imported_size = head_size + ctx_head_size;
474             size_t block_hashes_size;
475             size_t chunk_table_size;
476 0 0         if (size < imported_size)
477 0           return 0;
478 0 0         if(*(size_t*)in_ptr != sizeof(aich_ctx))
479 0           return 0;
480 0           in_ptr += head_size;
481 0           memset(ctx, 0, sizeof(aich_ctx));
482 0           memcpy(ctx, in_ptr, ctx_head_size);
483 0           in_ptr += ctx_head_size;
484 0 0         block_hashes_size = (ctx->block_hashes ? BLOCK_HASHES_SIZE : 0);
485 0           chunk_table_size = sizeof(hash_pair_t) * ctx->chunks_count;
486 0           imported_size += block_hashes_size + chunk_table_size;
487 0 0         if (size < imported_size)
488 0           return 0;
489 0 0         if (ctx->block_hashes != NULL) {
490 0           ctx->block_hashes = (unsigned char (*)[sha1_hash_size])malloc(BLOCK_HASHES_SIZE);
491 0 0         if (!ctx->block_hashes)
492 0           return 0;
493 0           memcpy(ctx->block_hashes, in_ptr, BLOCK_HASHES_SIZE);
494 0           in_ptr += BLOCK_HASHES_SIZE;
495             }
496 0 0         if (ctx->allocated > 0) {
497             size_t index;
498 0           ctx->chunk_table = (void**)malloc(ctx->allocated * sizeof(void*));
499 0 0         if (!ctx->chunk_table) {
500 0           ctx->error = 1;
501 0           return 0;
502             }
503 0           memset(ctx->chunk_table, 0, ctx->allocated * sizeof(void*));
504 0 0         for (index = 0; chunk_table_size > 0; index++) {
505 0           size_t group_size = (chunk_table_size < sizeof(hash_pairs_group_t) ?
506             chunk_table_size : sizeof(hash_pairs_group_t));
507 0 0         assert(index < ctx->allocated);
508 0           ctx->chunk_table[index] = malloc(sizeof(hash_pairs_group_t));
509 0 0         if (ctx->chunk_table[index] == 0) {
510 0           ctx->error = 1;
511 0           return 0;
512             }
513 0           memcpy(ctx->chunk_table[index], in_ptr, group_size);
514 0           chunk_table_size -= group_size;
515 0           in_ptr += group_size;
516             }
517             }
518 0 0         assert((size_t)(in_ptr - (char*)in) == imported_size);
519             #if defined(USE_OPENSSL)
520             if ((ctx->error & AICH_CTX_OSSL_FLAG) != 0) {
521             ctx->error &= ~AICH_CTX_OSSL_FLAG;
522             rhash_load_sha1_methods(&ctx->sha1_methods, METHODS_OPENSSL);
523             } else {
524             rhash_load_sha1_methods(&ctx->sha1_methods, METHODS_RHASH);
525             }
526             #endif
527 0           return imported_size;
528             }
529             #endif /* !defined(NO_IMPORT_EXPORT) */