From 2b81156f2a64ae99a4d431983e7c5a4ec7b817f7 Mon Sep 17 00:00:00 2001 From: Dmitry Stogov Date: Fri, 24 Dec 2021 13:10:22 +0300 Subject: [PATCH 01/17] Fix memory leak in SCCP Fixes oss-fuzz #42878 --- ext/opcache/Optimizer/sccp.c | 1 + ext/opcache/tests/opt/sccp_035.phpt | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 ext/opcache/tests/opt/sccp_035.phpt diff --git a/ext/opcache/Optimizer/sccp.c b/ext/opcache/Optimizer/sccp.c index bdeb883aa562a..0818a20e0bf97 100644 --- a/ext/opcache/Optimizer/sccp.c +++ b/ext/opcache/Optimizer/sccp.c @@ -1547,6 +1547,7 @@ static void sccp_visit_instr(scdf_ctx *scdf, zend_op *opline, zend_ssa_op *ssa_o if (opline->opcode == ZEND_PRE_INC_OBJ || opline->opcode == ZEND_PRE_DEC_OBJ) { SET_RESULT(result, &tmp2); + zval_ptr_dtor_nogc(&tmp1); } else { SET_RESULT(result, &tmp1); } diff --git a/ext/opcache/tests/opt/sccp_035.phpt b/ext/opcache/tests/opt/sccp_035.phpt new file mode 100644 index 0000000000000..86965b13b5f3d --- /dev/null +++ b/ext/opcache/tests/opt/sccp_035.phpt @@ -0,0 +1,17 @@ +--TEST-- +SCCP 035: memory leak +--INI-- +opcache.enable=1 +opcache.enable_cli=1 +opcache.optimization_level=-1 +--FILE-- +$b = ~$b = $a=''; + $obj->$a--; +} +?> +DONE +--EXPECT-- +DONE From eee3b1d917591e5106aef9fd4fe93393e95fc13d Mon Sep 17 00:00:00 2001 From: Dmitry Stogov Date: Fri, 24 Dec 2021 15:36:52 +0300 Subject: [PATCH 02/17] Bucket->key must be removed before destructor call, because destructor may update the same HashTable. Fixes oss-fuzz #42894 --- Zend/zend_hash.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Zend/zend_hash.c b/Zend/zend_hash.c index 680263b5cb07b..6e809c873417a 100644 --- a/Zend/zend_hash.c +++ b/Zend/zend_hash.c @@ -1431,11 +1431,11 @@ static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bu } } - _zend_hash_del_el_ex(ht, idx, p, prev); if (p->key) { zend_string_release(p->key); p->key = NULL; } + _zend_hash_del_el_ex(ht, idx, p, prev); } ZEND_API void ZEND_FASTCALL zend_hash_packed_del_val(HashTable *ht, zval *zv) @@ -1476,9 +1476,9 @@ ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key (p->h == h && p->key && zend_string_equal_content(p->key, key))) { - _zend_hash_del_el_ex(ht, idx, p, prev); zend_string_release(p->key); p->key = NULL; + _zend_hash_del_el_ex(ht, idx, p, prev); return SUCCESS; } prev = p; @@ -1525,9 +1525,9 @@ ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND; } } else { - _zend_hash_del_el_ex(ht, idx, p, prev); zend_string_release(p->key); p->key = NULL; + _zend_hash_del_el_ex(ht, idx, p, prev); } return SUCCESS; } @@ -1571,9 +1571,9 @@ ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const ch HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND; } } else { - _zend_hash_del_el_ex(ht, idx, p, prev); zend_string_release(p->key); p->key = NULL; + _zend_hash_del_el_ex(ht, idx, p, prev); } return SUCCESS; } @@ -1604,9 +1604,9 @@ ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char * && p->key && (ZSTR_LEN(p->key) == len) && !memcmp(ZSTR_VAL(p->key), str, len)) { - _zend_hash_del_el_ex(ht, idx, p, prev); zend_string_release(p->key); p->key = NULL; + _zend_hash_del_el_ex(ht, idx, p, prev); return SUCCESS; } prev = p; From 46d1e503dd8c9d4a73805f5c291f1954b17288e8 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 12:17:29 +0100 Subject: [PATCH 03/17] Remove redundant code in zend_optimizer_replace_by_const() zend_optimizer_update_op1_const() already handles these cases. --- Zend/Optimizer/zend_optimizer.c | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/Zend/Optimizer/zend_optimizer.c b/Zend/Optimizer/zend_optimizer.c index ebc76017c6024..603e9e8d399c9 100644 --- a/Zend/Optimizer/zend_optimizer.c +++ b/Zend/Optimizer/zend_optimizer.c @@ -210,6 +210,9 @@ bool zend_optimizer_update_op1_const(zend_op_array *op_array, case ZEND_RETURN_BY_REF: case ZEND_INSTANCEOF: case ZEND_MAKE_REF: + case ZEND_SEPARATE: + case ZEND_SEND_VAR_NO_REF: + case ZEND_SEND_VAR_NO_REF_EX: return 0; case ZEND_CATCH: REQUIRES_STRING(val); @@ -289,10 +292,6 @@ bool zend_optimizer_update_op1_const(zend_op_array *op_array, opline->opcode = ZEND_IS_IDENTICAL; opline->op1.constant = zend_optimizer_add_literal(op_array, val); break; - case ZEND_SEPARATE: - case ZEND_SEND_VAR_NO_REF: - case ZEND_SEND_VAR_NO_REF_EX: - return 0; case ZEND_VERIFY_RETURN_TYPE: /* This would require a non-local change. * zend_optimizer_replace_by_const() supports this. */ @@ -540,26 +539,11 @@ bool zend_optimizer_replace_by_const(zend_op_array *op_array, if (opline->op1_type == type && opline->op1.var == var) { switch (opline->opcode) { - case ZEND_FETCH_DIM_W: - case ZEND_FETCH_DIM_RW: - case ZEND_FETCH_DIM_FUNC_ARG: - case ZEND_FETCH_DIM_UNSET: - case ZEND_FETCH_LIST_W: - case ZEND_ASSIGN_DIM: - case ZEND_SEPARATE: - case ZEND_RETURN_BY_REF: - return 0; - case ZEND_SEND_VAR: - opline->extended_value = 0; - opline->opcode = ZEND_SEND_VAL; - break; case ZEND_SEND_VAR_EX: case ZEND_SEND_FUNC_ARG: opline->extended_value = 0; opline->opcode = ZEND_SEND_VAL_EX; break; - case ZEND_SEND_VAR_NO_REF: - return 0; case ZEND_SEND_VAR_NO_REF_EX: opline->opcode = ZEND_SEND_VAL; break; From 4ad9dbbac9398a73cd9b7851a4106a1d39ea10d2 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 12:34:02 +0100 Subject: [PATCH 04/17] Don't replace SEND opcodes with different by-ref behavior update_op1_const() implements the right logic here -- these cannot be replaced by different opcodes, as the by-ref passing behavior is not the same. --- Zend/Optimizer/zend_optimizer.c | 11 ----------- Zend/tests/by_ref_optimization.phpt | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 11 deletions(-) create mode 100644 Zend/tests/by_ref_optimization.phpt diff --git a/Zend/Optimizer/zend_optimizer.c b/Zend/Optimizer/zend_optimizer.c index 603e9e8d399c9..0f1453cfdb4f7 100644 --- a/Zend/Optimizer/zend_optimizer.c +++ b/Zend/Optimizer/zend_optimizer.c @@ -539,17 +539,6 @@ bool zend_optimizer_replace_by_const(zend_op_array *op_array, if (opline->op1_type == type && opline->op1.var == var) { switch (opline->opcode) { - case ZEND_SEND_VAR_EX: - case ZEND_SEND_FUNC_ARG: - opline->extended_value = 0; - opline->opcode = ZEND_SEND_VAL_EX; - break; - case ZEND_SEND_VAR_NO_REF_EX: - opline->opcode = ZEND_SEND_VAL; - break; - case ZEND_SEND_USER: - opline->opcode = ZEND_SEND_VAL_EX; - break; /* In most cases IS_TMP_VAR operand may be used only once. * The operands are usually destroyed by the opcode handler. * However, there are some exception which keep the operand alive. In that case diff --git a/Zend/tests/by_ref_optimization.phpt b/Zend/tests/by_ref_optimization.phpt new file mode 100644 index 0000000000000..02431b6cbea4e --- /dev/null +++ b/Zend/tests/by_ref_optimization.phpt @@ -0,0 +1,19 @@ +--TEST-- +Don't optimize send opcodes that differ in by-ref behavior +--FILE-- + +--EXPECTF-- +Warning: ref(): Argument #1 ($x) must be passed by reference, value given in %s on line %d +bool(true) + +Notice: Only variables should be passed by reference in %s on line %d +bool(true) From 36dafade51080d4b42a01eda430e511797d472fd Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 12:39:52 +0100 Subject: [PATCH 05/17] Remove unnecessary try_replace_op1() special cases update_op1_const() can handle these nowadays. --- Zend/Optimizer/sccp.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/Zend/Optimizer/sccp.c b/Zend/Optimizer/sccp.c index 4876a198cb3a7..582b64879392e 100644 --- a/Zend/Optimizer/sccp.c +++ b/Zend/Optimizer/sccp.c @@ -297,23 +297,6 @@ static bool try_replace_op1( } else { // TODO: check the following special cases ??? switch (opline->opcode) { - case ZEND_CASE: - opline->opcode = ZEND_IS_EQUAL; - goto replace_op1_simple; - case ZEND_CASE_STRICT: - opline->opcode = ZEND_IS_IDENTICAL; - goto replace_op1_simple; - case ZEND_FETCH_LIST_R: - case ZEND_SWITCH_STRING: - case ZEND_SWITCH_LONG: - case ZEND_MATCH: -replace_op1_simple: - if (Z_TYPE(zv) == IS_STRING) { - zend_string_hash_val(Z_STR(zv)); - } - opline->op1.constant = zend_optimizer_add_literal(ctx->scdf.op_array, &zv); - opline->op1_type = IS_CONST; - return 1; case ZEND_INSTANCEOF: zval_ptr_dtor_nogc(&zv); ZVAL_FALSE(&zv); From 98dfde2c14e81e03cfe810c5b89a62f43f800aa8 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 13:02:10 +0100 Subject: [PATCH 06/17] Remove unnecessary INSTANCEOF special case in sccp While we can't replace the instanceof operand, we will evaluate the instanceof to false and replace its result anyway. Even in cases where the instanceof user cannot be replaced, we already have generic code to convert the opcode to QM_ASSIGN in that case. --- Zend/Optimizer/sccp.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/Zend/Optimizer/sccp.c b/Zend/Optimizer/sccp.c index 582b64879392e..256915eeb2c2e 100644 --- a/Zend/Optimizer/sccp.c +++ b/Zend/Optimizer/sccp.c @@ -294,28 +294,8 @@ static bool try_replace_op1( ZVAL_COPY(&zv, value); if (zend_optimizer_update_op1_const(ctx->scdf.op_array, opline, &zv)) { return 1; - } else { - // TODO: check the following special cases ??? - switch (opline->opcode) { - case ZEND_INSTANCEOF: - zval_ptr_dtor_nogc(&zv); - ZVAL_FALSE(&zv); - opline->opcode = ZEND_QM_ASSIGN; - opline->op1_type = IS_CONST; - opline->op1.constant = zend_optimizer_add_literal(ctx->scdf.op_array, &zv); - opline->op2_type = IS_UNUSED; - if (ssa_op->op2_use >= 0) { - ZEND_ASSERT(ssa_op->op2_def == -1); - zend_ssa_unlink_use_chain(ctx->scdf.ssa, ssa_op - ctx->scdf.ssa->ops, ssa_op->op2_use); - ssa_op->op2_use = -1; - ssa_op->op2_use_chain = -1; - } - return 1; - default: - break; - } - zval_ptr_dtor_nogc(&zv); } + zval_ptr_dtor_nogc(&zv); } return 0; } From 92e7cf596208d62bde3b2c5c49667afd33bfab83 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 16:32:02 +0100 Subject: [PATCH 07/17] Move FETCH_CLASS+INSTANCEOF special case out of update_op1_const() The generic code was rejecting this to go into a special code path in SCCP. We should directly do that in SCCP instead, to still allow the generic (and valid) replacement. --- Zend/Optimizer/sccp.c | 35 ++++++++++++++------------------- Zend/Optimizer/zend_optimizer.c | 5 ----- 2 files changed, 15 insertions(+), 25 deletions(-) diff --git a/Zend/Optimizer/sccp.c b/Zend/Optimizer/sccp.c index 256915eeb2c2e..6ecb8518dca88 100644 --- a/Zend/Optimizer/sccp.c +++ b/Zend/Optimizer/sccp.c @@ -305,29 +305,24 @@ static bool try_replace_op2( if (ssa_op->op2_use == var && can_replace_op2(ctx->scdf.op_array, opline, ssa_op)) { zval zv; ZVAL_COPY(&zv, value); + + if (opline->opcode == ZEND_FETCH_CLASS && (opline + 1)->opcode == ZEND_INSTANCEOF && + ssa_op->result_def == (ssa_op + 1)->op2_use && Z_TYPE(zv) == IS_STRING) { + if (zend_optimizer_update_op2_const(ctx->scdf.op_array, opline + 1, &zv)) { + zend_ssa_op *next_op = ssa_op + 1; + zend_ssa_unlink_use_chain(ctx->scdf.ssa, next_op - ctx->scdf.ssa->ops, next_op->op2_use); + next_op->op2_use = -1; + next_op->op2_use_chain = -1; + zend_ssa_remove_result_def(ctx->scdf.ssa, ssa_op); + MAKE_NOP(opline); + return 1; + } + } + if (zend_optimizer_update_op2_const(ctx->scdf.op_array, opline, &zv)) { return 1; - } else { - switch (opline->opcode) { - case ZEND_FETCH_CLASS: - if (Z_TYPE(zv) == IS_STRING) { - ZEND_ASSERT((opline + 1)->opcode == ZEND_INSTANCEOF); - ZEND_ASSERT(ssa_op->result_def == (ssa_op + 1)->op2_use); - if (zend_optimizer_update_op2_const(ctx->scdf.op_array, opline + 1, &zv)) { - zend_ssa_op *next_op = ssa_op + 1; - zend_ssa_unlink_use_chain(ctx->scdf.ssa, next_op - ctx->scdf.ssa->ops, next_op->op2_use); - next_op->op2_use = -1; - next_op->op2_use_chain = -1; - zend_ssa_remove_result_def(ctx->scdf.ssa, ssa_op); - MAKE_NOP(opline); - return 1; - } - } - default: - break; - } - zval_ptr_dtor_nogc(&zv); } + zval_ptr_dtor_nogc(&zv); } return 0; } diff --git a/Zend/Optimizer/zend_optimizer.c b/Zend/Optimizer/zend_optimizer.c index 0f1453cfdb4f7..7fb6162c28714 100644 --- a/Zend/Optimizer/zend_optimizer.c +++ b/Zend/Optimizer/zend_optimizer.c @@ -351,11 +351,6 @@ bool zend_optimizer_update_op2_const(zend_op_array *op_array, case ZEND_FAST_CALL: return 0; case ZEND_FETCH_CLASS: - if ((opline + 1)->opcode == ZEND_INSTANCEOF && - (opline + 1)->op2.var == opline->result.var) { - return 0; - } - ZEND_FALLTHROUGH; case ZEND_INSTANCEOF: REQUIRES_STRING(val); drop_leading_backslash(val); From 7eae7e5e54915d9ba9c770ffb6a5860ab487a3b7 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 16:45:38 +0100 Subject: [PATCH 08/17] Remove FETCH_CLASS+INSTANCEOF special case Instead propagate the FETCH_CLASS return value, so it can be directly replaced if possible, which will also eliminate the FETCH_CLASS subsequently. --- Zend/Optimizer/sccp.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/Zend/Optimizer/sccp.c b/Zend/Optimizer/sccp.c index 6ecb8518dca88..5f3b7639ccdc7 100644 --- a/Zend/Optimizer/sccp.c +++ b/Zend/Optimizer/sccp.c @@ -305,20 +305,6 @@ static bool try_replace_op2( if (ssa_op->op2_use == var && can_replace_op2(ctx->scdf.op_array, opline, ssa_op)) { zval zv; ZVAL_COPY(&zv, value); - - if (opline->opcode == ZEND_FETCH_CLASS && (opline + 1)->opcode == ZEND_INSTANCEOF && - ssa_op->result_def == (ssa_op + 1)->op2_use && Z_TYPE(zv) == IS_STRING) { - if (zend_optimizer_update_op2_const(ctx->scdf.op_array, opline + 1, &zv)) { - zend_ssa_op *next_op = ssa_op + 1; - zend_ssa_unlink_use_chain(ctx->scdf.ssa, next_op - ctx->scdf.ssa->ops, next_op->op2_use); - next_op->op2_use = -1; - next_op->op2_use_chain = -1; - zend_ssa_remove_result_def(ctx->scdf.ssa, ssa_op); - MAKE_NOP(opline); - return 1; - } - } - if (zend_optimizer_update_op2_const(ctx->scdf.op_array, opline, &zv)) { return 1; } @@ -1624,15 +1610,9 @@ static void sccp_visit_instr(scdf_ctx *scdf, zend_op *opline, zend_ssa_op *ssa_o } SET_RESULT(result, &zv); break; -#if 0 case ZEND_FETCH_CLASS: - if (!op1) { - SET_RESULT_BOT(result); - break; - } - SET_RESULT(result, op1); + SET_RESULT(result, op2); break; -#endif case ZEND_ISSET_ISEMPTY_CV: SKIP_IF_TOP(op1); if (ct_eval_isset_isempty(&zv, opline->extended_value, op1) == SUCCESS) { @@ -2138,6 +2118,7 @@ static int try_remove_definition(sccp_ctx *ctx, int var_num, zend_ssa_var *var, if (value && (opline->result_type & (IS_VAR|IS_TMP_VAR)) && opline->opcode != ZEND_QM_ASSIGN + && opline->opcode != ZEND_FETCH_CLASS && opline->opcode != ZEND_ROPE_INIT && opline->opcode != ZEND_ROPE_ADD && opline->opcode != ZEND_INIT_ARRAY From 088404840192ae8ce087678a04f1df0a9d4fab42 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 17:32:18 +0100 Subject: [PATCH 09/17] Don't exclude arrays from constant collection These are supported as constants nowadays, so we can drop the string check. Also fix a potential leak, though I believe this doesn't matter in current usage, as it will effectively be suppressed during persist. --- Zend/Optimizer/pass1.c | 6 ++---- Zend/Optimizer/zend_optimizer.c | 8 ++++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/Zend/Optimizer/pass1.c b/Zend/Optimizer/pass1.c index 2877d1d30841a..b25001182f591 100644 --- a/Zend/Optimizer/pass1.c +++ b/Zend/Optimizer/pass1.c @@ -314,9 +314,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) /* define("name", scalar); */ if (zend_string_equals_literal_ci(Z_STR(ZEND_OP2_LITERAL(init_opline)), "define")) { - if (Z_TYPE(ZEND_OP1_LITERAL(send1_opline)) == IS_STRING && - send2_opline && - Z_TYPE(ZEND_OP1_LITERAL(send2_opline)) <= IS_STRING) { + if (Z_TYPE(ZEND_OP1_LITERAL(send1_opline)) == IS_STRING && send2_opline) { if (collect_constants) { zend_optimizer_collect_constant(ctx, &ZEND_OP1_LITERAL(send1_opline), &ZEND_OP1_LITERAL(send2_opline)); @@ -502,7 +500,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) case ZEND_DECLARE_CONST: if (collect_constants && Z_TYPE(ZEND_OP1_LITERAL(opline)) == IS_STRING && - Z_TYPE(ZEND_OP2_LITERAL(opline)) <= IS_STRING) { + Z_TYPE(ZEND_OP2_LITERAL(opline)) != IS_CONSTANT_AST) { zend_optimizer_collect_constant(ctx, &ZEND_OP1_LITERAL(opline), &ZEND_OP2_LITERAL(opline)); } break; diff --git a/Zend/Optimizer/zend_optimizer.c b/Zend/Optimizer/zend_optimizer.c index 7fb6162c28714..0ba6bffaf0a60 100644 --- a/Zend/Optimizer/zend_optimizer.c +++ b/Zend/Optimizer/zend_optimizer.c @@ -47,14 +47,14 @@ static void zend_optimizer_zval_dtor_wrapper(zval *zvalue) void zend_optimizer_collect_constant(zend_optimizer_ctx *ctx, zval *name, zval* value) { - zval val; - if (!ctx->constants) { ctx->constants = zend_arena_alloc(&ctx->arena, sizeof(HashTable)); zend_hash_init(ctx->constants, 16, NULL, zend_optimizer_zval_dtor_wrapper, 0); } - ZVAL_COPY(&val, value); - zend_hash_add(ctx->constants, Z_STR_P(name), &val); + + if (zend_hash_add(ctx->constants, Z_STR_P(name), value)) { + Z_TRY_ADDREF_P(value); + } } zend_result zend_optimizer_eval_binary_op(zval *result, zend_uchar opcode, zval *op1, zval *op2) /* {{{ */ From 52676f2b7e8b6d740f2490bb0779df4dccb3f60d Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 17:39:05 +0100 Subject: [PATCH 10/17] Remove unnecessary wrapper function This seems to date back to a time where zval_ptr_dtor was a macro implicitly passing additional parameters. --- Zend/Optimizer/zend_optimizer.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/Zend/Optimizer/zend_optimizer.c b/Zend/Optimizer/zend_optimizer.c index 0ba6bffaf0a60..9de7a944da1a5 100644 --- a/Zend/Optimizer/zend_optimizer.c +++ b/Zend/Optimizer/zend_optimizer.c @@ -40,16 +40,11 @@ struct { int last; } zend_optimizer_registered_passes = {{NULL}, 0}; -static void zend_optimizer_zval_dtor_wrapper(zval *zvalue) -{ - zval_ptr_dtor_nogc(zvalue); -} - void zend_optimizer_collect_constant(zend_optimizer_ctx *ctx, zval *name, zval* value) { if (!ctx->constants) { ctx->constants = zend_arena_alloc(&ctx->arena, sizeof(HashTable)); - zend_hash_init(ctx->constants, 16, NULL, zend_optimizer_zval_dtor_wrapper, 0); + zend_hash_init(ctx->constants, 16, NULL, zval_ptr_dtor_nogc, 0); } if (zend_hash_add(ctx->constants, Z_STR_P(name), value)) { From 28287572a1fb13d8641140855101731f70528209 Mon Sep 17 00:00:00 2001 From: Anatol Belski Date: Sat, 25 Dec 2021 19:00:09 +0100 Subject: [PATCH 11/17] hash: Upgrade xxHash to 0.8.1 Signed-off-by: Anatol Belski --- ext/hash/xxhash/xxhash.h | 2230 ++++++++++++++++++++++++++------------ 1 file changed, 1522 insertions(+), 708 deletions(-) diff --git a/ext/hash/xxhash/xxhash.h b/ext/hash/xxhash/xxhash.h index 2d56d23c5d0be..08ab79457233e 100644 --- a/ext/hash/xxhash/xxhash.h +++ b/ext/hash/xxhash/xxhash.h @@ -32,7 +32,12 @@ * - xxHash homepage: https://www.xxhash.com * - xxHash source repository: https://github.com/Cyan4973/xxHash */ - +/*! + * @mainpage xxHash + * + * @file xxhash.h + * xxHash prototypes and implementation + */ /* TODO: update */ /* Notice extracted from xxHash homepage: @@ -44,7 +49,7 @@ Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo Name Speed Q.Score Author xxHash 5.4 GB/s 10 CrapWow 3.2 GB/s 2 Andrew -MumurHash 3a 2.7 GB/s 10 Austin Appleby +MurmurHash 3a 2.7 GB/s 10 Austin Appleby SpookyHash 2.0 GB/s 10 Bob Jenkins SBox 1.4 GB/s 9 Bret Mulvey Lookup3 1.2 GB/s 9 Bob Jenkins @@ -116,29 +121,80 @@ extern "C" { /* * This part deals with the special case where a unit wants to inline xxHash, - * but "xxhash.h" has previously been included without XXH_INLINE_ALL, such - * as part of some previously included *.h header file. + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, + * such as part of some previously included *.h header file. * Without further action, the new include would just be ignored, * and functions would effectively _not_ be inlined (silent failure). * The following macros solve this situation by prefixing all inlined names, * avoiding naming collision with previous inclusions. */ -# ifdef XXH_NAMESPACE -# error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported" - /* - * Note: Alternative: #undef all symbols (it's a pretty large list). - * Without #error: it compiles, but functions are actually not inlined. - */ -# endif + /* Before that, we unconditionally #undef all symbols, + * in case they were already defined with XXH_NAMESPACE. + * They will then be redefined for XXH_INLINE_ALL + */ +# undef XXH_versionNumber + /* XXH32 */ +# undef XXH32 +# undef XXH32_createState +# undef XXH32_freeState +# undef XXH32_reset +# undef XXH32_update +# undef XXH32_digest +# undef XXH32_copyState +# undef XXH32_canonicalFromHash +# undef XXH32_hashFromCanonical + /* XXH64 */ +# undef XXH64 +# undef XXH64_createState +# undef XXH64_freeState +# undef XXH64_reset +# undef XXH64_update +# undef XXH64_digest +# undef XXH64_copyState +# undef XXH64_canonicalFromHash +# undef XXH64_hashFromCanonical + /* XXH3_64bits */ +# undef XXH3_64bits +# undef XXH3_64bits_withSecret +# undef XXH3_64bits_withSeed +# undef XXH3_64bits_withSecretandSeed +# undef XXH3_createState +# undef XXH3_freeState +# undef XXH3_copyState +# undef XXH3_64bits_reset +# undef XXH3_64bits_reset_withSeed +# undef XXH3_64bits_reset_withSecret +# undef XXH3_64bits_update +# undef XXH3_64bits_digest +# undef XXH3_generateSecret + /* XXH3_128bits */ +# undef XXH128 +# undef XXH3_128bits +# undef XXH3_128bits_withSeed +# undef XXH3_128bits_withSecret +# undef XXH3_128bits_reset +# undef XXH3_128bits_reset_withSeed +# undef XXH3_128bits_reset_withSecret +# undef XXH3_128bits_reset_withSecretandSeed +# undef XXH3_128bits_update +# undef XXH3_128bits_digest +# undef XXH128_isEqual +# undef XXH128_cmp +# undef XXH128_canonicalFromHash +# undef XXH128_hashFromCanonical + /* Finally, free the namespace itself */ +# undef XXH_NAMESPACE + + /* employ the namespace for XXH_INLINE_ALL */ # define XXH_NAMESPACE XXH_INLINE_ /* - * Some identifiers (enums, type names) are not symbols, but they must - * still be renamed to avoid redeclaration. + * Some identifiers (enums, type names) are not symbols, + * but they must nonetheless be renamed to avoid redeclaration. * Alternative solution: do not redeclare them. - * However, this requires some #ifdefs, and is a more dispersed action. - * Meanwhile, renaming can be achieved in a single block + * However, this requires some #ifdefs, and has a more dispersed impact. + * Meanwhile, renaming can be achieved in a single place. */ -# define XXH_IPREF(Id) XXH_INLINE_ ## Id +# define XXH_IPREF(Id) XXH_NAMESPACE ## Id # define XXH_OK XXH_IPREF(XXH_OK) # define XXH_ERROR XXH_IPREF(XXH_ERROR) # define XXH_errorcode XXH_IPREF(XXH_errorcode) @@ -165,6 +221,12 @@ extern "C" { #ifndef XXHASH_H_5627135585666179 #define XXHASH_H_5627135585666179 1 + +/*! + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ /* specific declaration modes for Windows */ #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) @@ -178,8 +240,9 @@ extern "C" { # endif #endif +#ifdef XXH_DOXYGEN /*! - * XXH_NAMESPACE, aka Namespace Emulation: + * @brief Emulate a namespace by transparently prefixing all symbols. * * If you want to include _and expose_ xxHash functions from within your own * library, but also want to avoid symbol collisions with other libraries which @@ -191,6 +254,10 @@ extern "C" { * includes `xxhash.h`: Regular symbol names will be automatically translated * by this header. */ +# define XXH_NAMESPACE /* YOUR NAME HERE */ +# undef XXH_NAMESPACE +#endif + #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) @@ -219,23 +286,28 @@ extern "C" { # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) +# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) /* XXH3_128bits */ # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) +# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) @@ -250,13 +322,22 @@ extern "C" { ***************************************/ #define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MINOR 8 -#define XXH_VERSION_RELEASE 0 +#define XXH_VERSION_RELEASE 1 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) + +/*! + * @brief Obtains the xxHash version. + * + * This is mostly useful when xxHash is compiled as a shared library, + * since the returned value comes from the library, as opposed to header file. + * + * @return `XXH_VERSION_NUMBER` of the invoked library. + */ XXH_PUBLIC_API unsigned XXH_versionNumber (void); /* **************************** -* Definitions +* Common basic types ******************************/ #include /* size_t */ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; @@ -265,11 +346,20 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; /*-********************************************************************** * 32-bit hash ************************************************************************/ -#if !defined (__VMS) \ +#if defined(XXH_DOXYGEN) /* Don't show include */ +/*! + * @brief An unsigned 32-bit integer. + * + * Not necessarily defined to `uint32_t` but functionally equivalent. + */ +typedef uint32_t XXH32_hash_t; + +#elif !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint32_t XXH32_hash_t; + #else # include # if UINT_MAX == 0xFFFFFFFFUL @@ -284,22 +374,48 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; #endif /*! - * XXH32(): - * Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". - * The memory between input & input+length must be valid (allocated and read-accessible). - * "seed" can be used to alter the result predictably. - * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * @} + * + * @defgroup xxh32_family XXH32 family + * @ingroup public + * Contains functions used in the classic 32-bit xxHash algorithm. * - * Note: XXH3 provides competitive speed for both 32-bit and 64-bit systems, - * and offers true 64/128 bit hash results. It provides a superior level of - * dispersion, and greatly reduces the risks of collisions. + * @note + * XXH32 is useful for older platforms, with no or poor 64-bit performance. + * Note that @ref xxh3_family provides competitive speed + * for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results. + * + * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families + * @see @ref xxh32_impl for implementation details + * @{ */ -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); -/******* Streaming *******/ +/*! + * @brief Calculates the 32-bit hash of @p input using xxHash32. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 32-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 32-bit hash value. + * + * @see + * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); -/* - * Streaming functions generate the xxHash value from an incrememtal input. +/*! + * Streaming functions generate the xxHash value from an incremental input. * This method is slower than single-call functions, due to state management. * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. * @@ -319,15 +435,117 @@ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_ * digest, and generate new hash values later on by invoking `XXH*_digest()`. * * When done, release the state using `XXH*_freeState()`. + * + * Example code for incrementally hashing a file: + * @code{.c} + * #include + * #include + * #define BUFFER_SIZE 256 + * + * // Note: XXH64 and XXH3 use the same interface. + * XXH32_hash_t + * hashFile(FILE* stream) + * { + * XXH32_state_t* state; + * unsigned char buf[BUFFER_SIZE]; + * size_t amt; + * XXH32_hash_t hash; + * + * state = XXH32_createState(); // Create a state + * assert(state != NULL); // Error check here + * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed + * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) { + * XXH32_update(state, buf, amt); // Hash the file in chunks + * } + * hash = XXH32_digest(state); // Finalize the hash + * XXH32_freeState(state); // Clean up + * return hash; + * } + * @endcode + */ + +/*! + * @typedef struct XXH32_state_s XXH32_state_t + * @brief The opaque state struct for the XXH32 streaming API. + * + * @see XXH32_state_s for details. */ +typedef struct XXH32_state_s XXH32_state_t; -typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +/*! + * @brief Allocates an @ref XXH32_state_t. + * + * Must be freed with XXH32_freeState(). + * @return An allocated XXH32_state_t on success, `NULL` on failure. + */ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +/*! + * @brief Frees an @ref XXH32_state_t. + * + * Must be allocated with XXH32_createState(). + * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). + * @return XXH_OK. + */ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +/*! + * @brief Copies one @ref XXH32_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); +/*! + * @brief Resets an @ref XXH32_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH32_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 32-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH32_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH32_state_t. + * + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash32 value from that state. + */ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); /******* Canonical representation *******/ @@ -351,41 +569,151 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); * canonical format. */ -typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +/*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ +typedef struct { + unsigned char digest[4]; /*!< Hash bytes, big endian */ +} XXH32_canonical_t; + +/*! + * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. + * + * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param hash The @ref XXH32_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); + +/*! + * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. + * + * @param src The @ref XXH32_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +#ifdef __has_attribute +# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define XXH_HAS_ATTRIBUTE(x) 0 +#endif + +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute) +# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define XXH_HAS_C_ATTRIBUTE(x) 0 +#endif + +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define XXH_HAS_CPP_ATTRIBUTE(x) 0 +#endif + +/* +Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute +introduced in CPP17 and C23. +CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough +C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough +*/ +#if XXH_HAS_C_ATTRIBUTE(x) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_CPP_ATTRIBUTE(x) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_ATTRIBUTE(__fallthrough__) +# define XXH_FALLTHROUGH __attribute__ ((fallthrough)) +#else +# define XXH_FALLTHROUGH +#endif + +/*! + * @} + * @ingroup public + * @{ + */ + #ifndef XXH_NO_LONG_LONG /*-********************************************************************** * 64-bit hash ************************************************************************/ -#if !defined (__VMS) \ +#if defined(XXH_DOXYGEN) /* don't include */ +/*! + * @brief An unsigned 64-bit integer. + * + * Not necessarily defined to `uint64_t` but functionally equivalent. + */ +typedef uint64_t XXH64_hash_t; +#elif !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint64_t XXH64_hash_t; +# include + typedef uint64_t XXH64_hash_t; #else - /* the following type must have a width of 64-bit */ - typedef unsigned long long XXH64_hash_t; +# include +# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL + /* LP64 ABI says uint64_t is unsigned long */ + typedef unsigned long XXH64_hash_t; +# else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +# endif #endif /*! - * XXH64(): - * Returns the 64-bit hash of sequence of length @length stored at memory - * address @input. - * @seed can be used to alter the result predictably. + * @} + * + * @defgroup xxh64_family XXH64 family + * @ingroup public + * @{ + * Contains functions used in the classic 64-bit xxHash algorithm. + * + * @note + * XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. + * It provides better speed for systems with vector processing capabilities. + */ + + +/*! + * @brief Calculates the 64-bit hash of @p input using xxHash64. * * This function usually runs faster on 64-bit systems, but slower on 32-bit * systems (see benchmark). * - * Note: XXH3 provides competitive speed for both 32-bit and 64-bit systems, - * and offers true 64/128 bit hash results. It provides a superior level of - * dispersion, and greatly reduces the risks of collisions. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit hash. + * + * @see + * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. */ -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed); +XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed); /******* Streaming *******/ +/*! + * @brief The opaque state struct for the XXH64 streaming API. + * + * @see XXH64_state_s for details. + */ typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); @@ -400,13 +728,14 @@ typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); - -/*-********************************************************************** -* XXH3 64-bit variant -************************************************************************/ - -/* ************************************************************************ - * XXH3 is a new hash algorithm featuring: +/*! + * @} + * ************************************************************************ + * @defgroup xxh3_family XXH3 family + * @ingroup public + * @{ + * + * XXH3 is a more recent hash algorithm featuring: * - Improved speed for both small and large inputs * - True 64-bit and 128-bit outputs * - SIMD acceleration @@ -416,46 +745,38 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src * * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html * - * In general, expect XXH3 to run about ~2x faster on large inputs and >3x - * faster on small ones compared to XXH64, though exact differences depend on - * the platform. - * - * The algorithm is portable: Like XXH32 and XXH64, it generates the same hash - * on all platforms. - * - * It benefits greatly from SIMD and 64-bit arithmetic, but does not require it. + * Compared to XXH64, expect XXH3 to run approximately + * ~2x faster on large inputs and >3x faster on small ones, + * exact differences vary depending on platform. * - * Almost all 32-bit and 64-bit targets that can run XXH32 smoothly can run - * XXH3 at competitive speeds, even if XXH64 runs slowly. Further details are - * explained in the implementation. + * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, + * but does not require it. + * Any 32-bit and 64-bit targets that can run XXH32 smoothly + * can run XXH3 at competitive speeds, even without vector support. + * Further details are explained in the implementation. * * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8, - * ZVector and scalar targets. This can be controlled with the XXH_VECTOR macro. + * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro. + * + * XXH3 implementation is portable: + * it has a generic C90 formulation that can be compiled on any platform, + * all implementations generage exactly the same hash value on all platforms. + * Starting from v0.8.0, it's also labelled "stable", meaning that + * any future version will also generate the same hash value. * * XXH3 offers 2 variants, _64bits and _128bits. - * When only 64 bits are needed, prefer calling the _64bits variant, as it - * reduces the amount of mixing, resulting in faster speed on small inputs. * + * When only 64 bits are needed, prefer invoking the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. * It's also generally simpler to manipulate a scalar return type than a struct. * - * The 128-bit version adds additional strength, but it is slightly slower. - * - * The XXH3 algorithm is still in development. - * The results it produces may still change in future versions. - * - * Results produced by v0.7.x are not comparable with results from v0.7.y. - * However, the API is completely stable, and it can safely be used for - * ephemeral data (local sessions). - * - * Avoid storing values in long-term storage until the algorithm is finalized. - * XXH3's return values will be officially finalized upon reaching v0.8.0. - * - * After which, return values of XXH3 and XXH128 will no longer change in - * future versions. - * * The API supports one-shot hashing, streaming mode, and custom secrets. */ +/*-********************************************************************** +* XXH3 64-bit variant +************************************************************************/ + /* XXH3_64bits(): * default 64-bit variant, using default secret and default seed of 0. * It's the fastest variant. */ @@ -470,20 +791,32 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len); */ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); +/*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ +#define XXH3_SECRET_SIZE_MIN 136 + /* * XXH3_64bits_withSecret(): * It's possible to provide any blob of bytes as a "secret" to generate the hash. * This makes it more difficult for an external actor to prepare an intentional collision. * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). - * However, the quality of produced hash values depends on secret's entropy. - * Technically, the secret must look like a bunch of random bytes. + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. * Avoid "trivial" or structured data such as repeated sequences or a text document. - * Whenever unsure about the "randomness" of the blob of bytes, - * consider relabelling it as a "custom seed" instead, - * and employ "XXH3_generateSecret()" (see below) - * to generate a high entropy secret derived from the custom seed. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing "XXH3_generateSecret()" instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. */ -#define XXH3_SECRET_SIZE_MIN 136 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); @@ -494,6 +827,12 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, * As a consequence, streaming is slower than one-shot hashing. * For better performance, prefer one-shot functions whenever applicable. */ + +/*! + * @brief The state struct for the XXH3 streaming API. + * + * @see XXH3_state_s for details. + */ typedef struct XXH3_state_s XXH3_state_t; XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void); XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); @@ -533,9 +872,15 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr); * XXH3 128-bit variant ************************************************************************/ +/*! + * @brief The return value from 128-bit hashes. + * + * Stored in little endian order, although the fields themselves are in native + * endianness. + */ typedef struct { - XXH64_hash_t low64; - XXH64_hash_t high64; + XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ + XXH64_hash_t high64; /*!< `value >> 64` */ } XXH128_hash_t; XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len); @@ -592,6 +937,9 @@ XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* #endif /* XXH_NO_LONG_LONG */ +/*! + * @} + */ #endif /* XXHASH_H_5627135585666179 */ @@ -612,36 +960,57 @@ XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* * Never **ever** access their members directly. */ +/*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ struct XXH32_state_s { - XXH32_hash_t total_len_32; - XXH32_hash_t large_len; - XXH32_hash_t v1; - XXH32_hash_t v2; - XXH32_hash_t v3; - XXH32_hash_t v4; - XXH32_hash_t mem32[4]; - XXH32_hash_t memsize; - XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */ + XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ + XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ + XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read or write to it, it may be removed. */ }; /* typedef'd to XXH32_state_t */ #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ +/*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ struct XXH64_state_s { - XXH64_hash_t total_len; - XXH64_hash_t v1; - XXH64_hash_t v2; - XXH64_hash_t v3; - XXH64_hash_t v4; - XXH64_hash_t mem64[4]; - XXH32_hash_t memsize; - XXH32_hash_t reserved32; /* required for padding anyway */ - XXH64_hash_t reserved64; /* never read nor write, might be removed in a future version */ + XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ + XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ + XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it, it may be removed. */ }; /* typedef'd to XXH64_state_t */ -#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ # include # define XXH_ALIGN(n) alignas(n) +#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ +/* In C++ alignas() is a keyword */ +# define XXH_ALIGN(n) alignas(n) #elif defined(__GNUC__) # define XXH_ALIGN(n) __attribute__ ((aligned(n))) #elif defined(_MSC_VER) @@ -652,35 +1021,88 @@ struct XXH64_state_s { /* Old GCC versions only accept the attribute after the type in structures. */ #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ && defined(__GNUC__) # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) #else # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type #endif +/*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ #define XXH3_INTERNALBUFFER_SIZE 256 + +/*! + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ #define XXH3_SECRET_DEFAULT_SIZE 192 + +/*! + * @internal + * @brief Structure for XXH3 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. + * Otherwise it is an opaque type. + * Never use this definition in combination with dynamic library. + * This allows fields to safely be changed in the future. + * + * @note ** This structure has a strict alignment requirement of 64 bytes!! ** + * Do not allocate this with `malloc()` or `new`, + * it will not be sufficiently aligned. + * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. + * + * Typedef'd to @ref XXH3_state_t. + * Do never access the members of this struct directly. + * + * @see XXH3_INITSTATE() for stack initialization. + * @see XXH3_createState(), XXH3_freeState(). + * @see XXH32_state_s, XXH64_state_s + */ struct XXH3_state_s { XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); - /* used to store a custom secret generated from a seed */ + /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */ XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + /*!< Used to store a custom secret generated from a seed. */ XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + /*!< The internal buffer. @see XXH32_state_s::mem32 */ XXH32_hash_t bufferedSize; - XXH32_hash_t reserved32; + /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ + XXH32_hash_t useSeed; + /*!< Reserved field. Needed for padding on 64-bit. */ size_t nbStripesSoFar; + /*!< Number or stripes processed. */ XXH64_hash_t totalLen; + /*!< Total length hashed. 64-bit even on 32-bit targets. */ size_t nbStripesPerBlock; + /*!< Number of stripes per block. */ size_t secretLimit; + /*!< Size of @ref customSecret or @ref extSecret */ XXH64_hash_t seed; + /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ XXH64_hash_t reserved64; - const unsigned char* extSecret; /* reference to external secret; - * if == NULL, use .customSecret instead */ + /*!< Reserved field. */ + const unsigned char* extSecret; + /*!< Reference to an external secret for the _withSecret variants, NULL + * for other variants. */ /* note: there may be some padding at the end due to alignment on 64 bytes */ }; /* typedef'd to XXH3_state_t */ #undef XXH_ALIGN_MEMBER -/* When the XXH3_state_t structure is merely emplaced on stack, +/*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, * it should be initialized with XXH3_INITSTATE() or a memset() * in case its first reset uses XXH3_NNbits_reset_withSeed(). * This init can be omitted if the first reset uses default or _withSecret mode. @@ -691,6 +1113,12 @@ struct XXH3_state_s { #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; } +/* XXH128() : + * simple alias to pre-selected XXH3_128bits variant + */ +XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); + + /* === Experimental API === */ /* Symbols defined below must be considered tied to a specific library version. */ @@ -703,36 +1131,92 @@ struct XXH3_state_s { * as it becomes much more difficult for an external actor to guess how to impact the calculation logic. * * The function accepts as input a custom seed of any length and any content, - * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE - * into an already allocated buffer secretBuffer. - * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long. + * and derives from it a high-entropy secret of length @secretSize + * into an already allocated buffer @secretBuffer. + * @secretSize must be >= XXH3_SECRET_SIZE_MIN * * The generated secret can then be used with any `*_withSecret()` variant. * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`, * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()` * are part of this list. They all accept a `secret` parameter - * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN) + * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN) * _and_ feature very high entropy (consist of random-looking bytes). * These conditions can be a high bar to meet, so - * this function can be used to generate a secret of proper quality. + * XXH3_generateSecret() can be employed to ensure proper quality. * * customSeed can be anything. It can have any size, even small ones, - * and its content can be anything, even stupidly "low entropy" source such as a bunch of zeroes. - * The resulting `secret` will nonetheless provide all expected qualities. + * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes. + * The resulting `secret` will nonetheless provide all required qualities. * - * Supplying NULL as the customSeed copies the default secret into `secretBuffer`. * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior. */ -XXH_PUBLIC_API void XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize); +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize); -/* simple short-cut to pre-selected XXH3_128bits variant */ -XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); +/* + * XXH3_generateSecret_fromSeed(): + * + * Generate the same secret as the _withSeed() variants. + * + * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily). + * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes. + * + * The generated secret can be used in combination with + *`*_withSecret()` and `_withSecretandSeed()` variants. + * This generator is notably useful in combination with `_withSecretandSeed()`, + * as a way to emulate a faster `_withSeed()` variant. + */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed); +/* + * *_withSecretandSeed() : + * These variants generate hash values using either + * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes) + * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX). + * + * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. + * `_withSeed()` has to generate the secret on the fly for "large" keys. + * It's fast, but can be perceptible for "not so large" keys (< 1 KB). + * `_withSecret()` has to generate the masks on the fly for "small" keys, + * which requires more instructions than _withSeed() variants. + * Therefore, _withSecretandSeed variant combines the best of both worlds. + * + * When @secret has been generated by XXH3_generateSecret_fromSeed(), + * this variant produces *exactly* the same results as `_withSeed()` variant, + * hence offering only a pure speed benefit on "large" input, + * by skipping the need to regenerate the secret for every large input. + * + * Another usage scenario is to hash the secret to a 64-bit hash value, + * for example with XXH3_64bits(), which then becomes the seed, + * and then employ both the seed and the secret in _withSecretandSeed(). + * On top of speed, an added benefit is that each bit in the secret + * has a 50% chance to swap each bit in the output, + * via its impact to the seed. + * This is not guaranteed when using the secret directly in "small data" scenarios, + * because only portions of the secret are employed for small data. + */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(const void* data, size_t len, + const void* secret, size_t secretSize, + XXH64_hash_t seed); -#endif /* XXH_NO_LONG_LONG */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(const void* data, size_t len, + const void* secret, size_t secretSize, + XXH64_hash_t seed64); + +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, + const void* secret, size_t secretSize, + XXH64_hash_t seed64); + +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, + const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#endif /* XXH_NO_LONG_LONG */ #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) # define XXH_IMPLEMENTATION #endif @@ -774,8 +1258,24 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t s /* ************************************* * Tuning parameters ***************************************/ + +/*! + * @defgroup tuning Tuning parameters + * @{ + * + * Various macros to control xxHash's behavior. + */ +#ifdef XXH_DOXYGEN /*! - * XXH_FORCE_MEMORY_ACCESS: + * @brief Define this to disable 64-bit code. + * + * Useful if only using the @ref xxh32_family and you have a strict C90 compiler. + */ +# define XXH_NO_LONG_LONG +# undef XXH_NO_LONG_LONG /* don't actually */ +/*! + * @brief Controls how unaligned memory is accessed. + * * By default, access to unaligned memory is controlled by `memcpy()`, which is * safe and portable. * @@ -784,58 +1284,65 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t s * * The below switch allow selection of a different access method * in the search for improved performance. - * Method 0 (default): - * Use `memcpy()`. Safe and portable. Default. - * Method 1: - * `__attribute__((packed))` statement. It depends on compiler extensions - * and is therefore not portable. - * This method is safe if your compiler supports it, and *generally* as - * fast or faster than `memcpy`. - * Method 2: - * Direct access via cast. This method doesn't depend on the compiler but - * violates the C standard. - * It can generate buggy code on targets which do not support unaligned - * memory accesses. - * But in some circumstances, it's the only known way to get the most - * performance (example: GCC + ARMv6) - * Method 3: - * Byteshift. This can generate the best code on old compilers which don't + * + * @par Possible options: + * + * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` + * @par + * Use `memcpy()`. Safe and portable. Note that most modern compilers will + * eliminate the function call and treat it as an unaligned access. + * + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))` + * @par + * Depends on compiler extensions and is therefore not portable. + * This method is safe _if_ your compiler supports it, + * and *generally* as fast or faster than `memcpy`. + * + * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast + * @par + * Casts directly and dereferences. This method doesn't depend on the + * compiler, but it violates the C standard as it directly dereferences an + * unaligned pointer. It can generate buggy code on targets which do not + * support unaligned memory accesses, but in some circumstances, it's the + * only known way to get the most performance. + * + * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift + * @par + * Also portable. This can generate the best code on old compilers which don't * inline small `memcpy()` calls, and it might also be faster on big-endian - * systems which lack a native byteswap instruction. - * See https://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2 > 3) + * systems which lack a native byteswap instruction. However, some compilers + * will emit literal byteshifts even if the target supports unaligned access. + * . + * + * @warning + * Methods 1 and 2 rely on implementation-defined behavior. Use these with + * care, as what works on one compiler/platform/optimization level may cause + * another to read garbage data or even crash. + * + * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * + * Prefer these methods in priority order (0 > 3 > 1 > 2) */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6) -# define XXH_FORCE_MEMORY_ACCESS 2 -# elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ - (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7))) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -/*! - * XXH_ACCEPT_NULL_INPUT_POINTER: - * If the input pointer is NULL, xxHash's default behavior is to dereference it, - * triggering a segfault. - * When this macro is enabled, xxHash actively checks the input for a null pointer. - * If it is, the result for null input pointers is the same as a zero-length input. - */ -#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ -# define XXH_ACCEPT_NULL_INPUT_POINTER 0 -#endif +# define XXH_FORCE_MEMORY_ACCESS 0 /*! - * XXH_FORCE_ALIGN_CHECK: - * This is an important performance trick - * for architectures without decent unaligned memory access performance. - * It checks for input alignment, and when conditions are met, - * uses a "fast path" employing direct 32-bit/64-bit read, - * resulting in _dramatically faster_ read speed. + * @def XXH_FORCE_ALIGN_CHECK + * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() + * and XXH64() only). * - * The check costs one initial branch per hash, which is generally negligible, but not zero. - * Moreover, it's not useful to generate binary for an additional code path - * if memory access uses same instruction for both aligned and unaligned adresses. + * This is an important performance trick for architectures without decent + * unaligned memory access performance. + * + * It checks for input alignment, and when conditions are met, uses a "fast + * path" employing direct 32-bit/64-bit reads, resulting in _dramatically + * faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, + * but not zero. + * + * Moreover, it's not useful to generate an additional code path if memory + * access uses the same instruction for both aligned and unaligned + * addresses (e.g. x86 and aarch64). * * In these cases, the alignment check can be removed by setting this macro to 0. * Then the code will always use unaligned memory access. @@ -844,17 +1351,11 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t s * * This option does not affect XXH3 (only XXH32 and XXH64). */ -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \ - || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */ -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif +# define XXH_FORCE_ALIGN_CHECK 0 /*! - * XXH_NO_INLINE_HINTS: + * @def XXH_NO_INLINE_HINTS + * @brief When non-zero, sets all functions to `static`. * * By default, xxHash tries to force the compiler to inline almost all internal * functions. @@ -872,6 +1373,63 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t s * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using * -fno-inline with GCC or Clang, this will automatically be defined. */ +# define XXH_NO_INLINE_HINTS 0 + +/*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. + * + * For performance, `XXH32_finalize` uses multiple branches in the finalizer. + * This is generally preferable for performance, + * but depending on exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. + */ +# define XXH32_ENDJMP 0 + +/*! + * @internal + * @brief Redefines old internal names. + * + * For compatibility with code that uses xxHash's internals before the names + * were changed to improve namespacing. There is no other reason to use this. + */ +# define XXH_OLD_NAMES +# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ +#endif /* XXH_DOXYGEN */ +/*! + * @} + */ + +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ + /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */ +# if !defined(__clang__) && \ +( \ + (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + ( \ + defined(__GNUC__) && ( \ + (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \ + ( \ + defined(__mips__) && \ + (__mips <= 5 || __mips_isa_rev < 6) && \ + (!defined(__mips16) || defined(__mips_mips16e2)) \ + ) \ + ) \ + ) \ +) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + #ifndef XXH_NO_INLINE_HINTS # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \ || defined(__NO_INLINE__) /* -O0, -fno-inline */ @@ -881,36 +1439,44 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t s # endif #endif +#ifndef XXH32_ENDJMP +/* generally preferable for performance */ +# define XXH32_ENDJMP 0 +#endif + /*! - * XXH_REROLL: - * Whether to reroll XXH32_finalize, and XXH64_finalize, - * instead of using an unrolled jump table/if statement loop. - * - * This is automatically defined on -Os/-Oz on GCC and Clang. + * @defgroup impl Implementation + * @{ */ -#ifndef XXH_REROLL -# if defined(__OPTIMIZE_SIZE__) -# define XXH_REROLL 1 -# else -# define XXH_REROLL 0 -# endif -#endif /* ************************************* * Includes & Memory related functions ***************************************/ -/*! +/* * Modify the local functions below should you wish to use * different memory routines for malloc() and free() */ #include +/*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ static void* XXH_malloc(size_t s) { return malloc(s); } + +/*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ static void XXH_free(void* p) { free(p); } -/*! and for memcpy() */ #include + +/*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); @@ -927,19 +1493,19 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) #endif #if XXH_NO_INLINE_HINTS /* disable inlining hints */ -# if defined(__GNUC__) +# if defined(__GNUC__) || defined(__clang__) # define XXH_FORCE_INLINE static __attribute__((unused)) # else # define XXH_FORCE_INLINE static # endif # define XXH_NO_INLINE static /* enable inlining hints */ +#elif defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) #elif defined(_MSC_VER) /* Visual Studio */ # define XXH_FORCE_INLINE static __forceinline # define XXH_NO_INLINE static __declspec(noinline) -#elif defined(__GNUC__) -# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) -# define XXH_NO_INLINE static __attribute__((noinline)) #elif defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ # define XXH_FORCE_INLINE static inline @@ -954,7 +1520,11 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) /* ************************************* * Debug ***************************************/ -/* +/*! + * @ingroup tuning + * @def XXH_DEBUGLEVEL + * @brief Sets the debugging level. + * * XXH_DEBUGLEVEL is expected to be defined externally, typically via the * compiler's command line options. The value must be a number. */ @@ -974,8 +1544,39 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) #endif /* note: use after variable declarations */ -#define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0) +#ifndef XXH_STATIC_ASSERT +# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# include +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# else +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) +# endif +# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) +#endif +/*! + * @internal + * @def XXH_COMPILER_GUARD(var) + * @brief Used to prevent unwanted optimizations for @p var. + * + * It uses an empty GCC inline assembly statement with a register constraint + * which forces @p var into a general purpose register (eg eax, ebx, ecx + * on x86) and marks it as modified. + * + * This is used in a few places to avoid unwanted autovectorization (e.g. + * XXH32_round()). All vectorization we want is explicit via intrinsics, + * and _usually_ isn't wanted elsewhere. + * + * We also use it to prevent unwanted constant folding for AArch64 in + * XXH3_initCustomSecret_scalar(). + */ +#if defined(__GNUC__) || defined(__clang__) +# define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var)) +#else +# define XXH_COMPILER_GUARD(var) ((void)0) +#endif /* ************************************* * Basic Types @@ -998,6 +1599,56 @@ typedef XXH32_hash_t xxh_u32; /* *** Memory access *** */ +/*! + * @internal + * @fn xxh_u32 XXH_read32(const void* ptr) + * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit native endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32(const void* ptr) + * @brief Reads an unaligned 32-bit little endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readBE32(const void* ptr) + * @brief Reads an unaligned 32-bit big endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit big endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) + * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is + * always @ref XXH_alignment::XXH_unaligned. + * + * @param ptr The pointer to read from. + * @param align Whether @p ptr is aligned. + * @pre + * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte + * aligned. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) /* * Manual byteshift. Best for old compilers which don't inline memcpy. @@ -1032,28 +1683,35 @@ static xxh_u32 XXH_read32(const void* ptr) /* * Portable and safe solution. Generally efficient. - * see: https://stackoverflow.com/a/32095106/646947 + * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u32 XXH_read32(const void* memPtr) { xxh_u32 val; - memcpy(&val, memPtr, sizeof(val)); + XXH_memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ -/* *** Endianess *** */ -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; +/* *** Endianness *** */ /*! - * XXH_CPU_LITTLE_ENDIAN: + * @ingroup tuning + * @def XXH_CPU_LITTLE_ENDIAN + * @brief Whether the target is little endian. + * * Defined to 1 if the target is little endian, or 0 if it is big endian. * It can be defined externally, for example on the compiler command line. * - * If it is not defined, a runtime check (which is usually constant folded) - * is used instead. + * If it is not defined, + * a runtime check (which is usually constant folded) is used instead. + * + * @note + * This is not necessarily defined to an integer constant. + * + * @see XXH_isLittleEndian() for the runtime check. */ #ifndef XXH_CPU_LITTLE_ENDIAN /* @@ -1068,8 +1726,11 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) # define XXH_CPU_LITTLE_ENDIAN 0 # else -/* - * runtime test, presumed to simplify to a constant by compiler +/*! + * @internal + * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. + * + * Most compilers will constant fold this. */ static int XXH_isLittleEndian(void) { @@ -1098,6 +1759,19 @@ static int XXH_isLittleEndian(void) # define XXH_HAS_BUILTIN(x) 0 #endif +/*! + * @internal + * @def XXH_rotl32(x,r) + * @brief 32-bit rotate left. + * + * @param x The 32-bit integer to be rotated. + * @param r The number of bits to rotate. + * @pre + * @p r > 0 && @p r < 32 + * @note + * @p x and @p r may be evaluated multiple times. + * @return The rotated result. + */ #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ && XXH_HAS_BUILTIN(__builtin_rotateleft64) # define XXH_rotl32 __builtin_rotateleft32 @@ -1111,6 +1785,14 @@ static int XXH_isLittleEndian(void) # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) #endif +/*! + * @internal + * @fn xxh_u32 XXH_swap32(xxh_u32 x) + * @brief A 32-bit byteswap. + * + * @param x The 32-bit integer to byteswap. + * @return @p x, byteswapped. + */ #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap32 _byteswap_ulong #elif XXH_GCC_VERSION >= 403 @@ -1129,7 +1811,15 @@ static xxh_u32 XXH_swap32 (xxh_u32 x) /* *************************** * Memory reads *****************************/ -typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +/*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ +typedef enum { + XXH_aligned, /*!< Aligned */ + XXH_unaligned /*!< Possibly unaligned */ +} XXH_alignment; /* * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. @@ -1182,17 +1872,25 @@ XXH_readLE32_align(const void* ptr, XXH_alignment align) /* ************************************* * Misc ***************************************/ +/*! @ingroup public */ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } /* ******************************************************************* * 32-bit hash functions *********************************************************************/ -static const xxh_u32 XXH_PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */ -static const xxh_u32 XXH_PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */ -static const xxh_u32 XXH_PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */ -static const xxh_u32 XXH_PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */ -static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */ +/*! + * @} + * @defgroup xxh32_impl XXH32 implementation + * @ingroup impl + * @{ + */ + /* #define instead of static const, to be used as initializers */ +#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ +#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ +#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ +#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ +#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ #ifdef XXH_OLD_NAMES # define PRIME32_1 XXH_PRIME32_1 @@ -1202,18 +1900,28 @@ static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011 # define PRIME32_5 XXH_PRIME32_5 #endif +/*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) { acc += input * XXH_PRIME32_2; acc = XXH_rotl32(acc, 13); acc *= XXH_PRIME32_1; -#if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE) +#if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) /* * UGLY HACK: - * This inline assembly hack forces acc into a normal register. This is the - * only thing that prevents GCC and Clang from autovectorizing the XXH32 - * loop (pragmas and attributes don't work for some resason) without globally - * disabling SSE4.1. + * A compiler fence is the only thing that prevents GCC and Clang from + * autovectorizing the XXH32 loop (pragmas and attributes don't work for some + * reason) without globally disabling SSE4.1. * * The reason we want to avoid vectorization is because despite working on * 4 integers at a time, there are multiple factors slowing XXH32 down on @@ -1238,27 +1946,25 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) * can load data, while v3 can multiply. SSE forces them to operate * together. * - * How this hack works: - * __asm__("" // Declare an assembly block but don't declare any instructions - * : // However, as an Input/Output Operand, - * "+r" // constrain a read/write operand (+) as a general purpose register (r). - * (acc) // and set acc as the operand - * ); - * - * Because of the 'r', the compiler has promised that seed will be in a - * general purpose register and the '+' says that it will be 'read/write', - * so it has to assume it has changed. It is like volatile without all the - * loads and stores. - * - * Since the argument has to be in a normal register (not an SSE register), - * each time XXH32_round is called, it is impossible to vectorize. + * This is also enabled on AArch64, as Clang autovectorizes it incorrectly + * and it is pointless writing a NEON implementation that is basically the + * same speed as scalar for XXH32. */ - __asm__("" : "+r" (acc)); + XXH_COMPILER_GUARD(acc); #endif return acc; } -/* mix all bits */ +/*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param h32 The hash to avalanche. + * @return The avalanched hash. + */ static xxh_u32 XXH32_avalanche(xxh_u32 h32) { h32 ^= h32 >> 15; @@ -1271,6 +1977,20 @@ static xxh_u32 XXH32_avalanche(xxh_u32 h32) #define XXH_get32bits(p) XXH_readLE32_align(p, align) +/*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param h32 The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + */ static xxh_u32 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) { @@ -1285,8 +2005,10 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \ } while (0) - /* Compact rerolled version */ - if (XXH_REROLL) { + if (ptr==NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { len &= 15; while (len >= 4) { XXH_PROCESS4; @@ -1300,41 +2022,41 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) } else { switch(len&15) /* or switch(bEnd - p) */ { case 12: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 8: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 4: XXH_PROCESS4; return XXH32_avalanche(h32); case 13: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 9: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 5: XXH_PROCESS4; XXH_PROCESS1; return XXH32_avalanche(h32); case 14: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 10: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 6: XXH_PROCESS4; XXH_PROCESS1; XXH_PROCESS1; return XXH32_avalanche(h32); case 15: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 11: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 7: XXH_PROCESS4; - /* fallthrough */ + XXH_FALLTHROUGH; case 3: XXH_PROCESS1; - /* fallthrough */ + XXH_FALLTHROUGH; case 2: XXH_PROCESS1; - /* fallthrough */ + XXH_FALLTHROUGH; case 1: XXH_PROCESS1; - /* fallthrough */ + XXH_FALLTHROUGH; case 0: return XXH32_avalanche(h32); } XXH_ASSERT(0); @@ -1350,20 +2072,23 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) # undef XXH_PROCESS4 #endif +/*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ XXH_FORCE_INLINE xxh_u32 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) { - const xxh_u8* bEnd = input + len; xxh_u32 h32; -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - if (input==NULL) { - len=0; - bEnd=input=(const xxh_u8*)(size_t)16; - } -#endif + if (input==NULL) XXH_ASSERT(len == 0); if (len>=16) { + const xxh_u8* const bEnd = input + len; const xxh_u8* const limit = bEnd - 15; xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; xxh_u32 v2 = seed + XXH_PRIME32_2; @@ -1388,7 +2113,7 @@ XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment return XXH32_finalize(h32, input, len&15, align); } - +/*! @ingroup xxh32_family */ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) { #if 0 @@ -1397,9 +2122,7 @@ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t s XXH32_reset(&state, seed); XXH32_update(&state, (const xxh_u8*)input, len); return XXH32_digest(&state); - #else - if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); @@ -1412,45 +2135,49 @@ XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t s /******* Hash streaming *******/ - +/*! + * @ingroup xxh32_family + */ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) { return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); } +/*! @ingroup xxh32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } +/*! @ingroup xxh32_family */ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) { - memcpy(dstState, srcState, sizeof(*dstState)); + XXH_memcpy(dstState, srcState, sizeof(*dstState)); } +/*! @ingroup xxh32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) { XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ memset(&state, 0, sizeof(state)); - state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; - state.v2 = seed + XXH_PRIME32_2; - state.v3 = seed + 0; - state.v4 = seed - XXH_PRIME32_1; + state.v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + state.v[1] = seed + XXH_PRIME32_2; + state.v[2] = seed + 0; + state.v[3] = seed - XXH_PRIME32_1; /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + XXH_memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); return XXH_OK; } +/*! @ingroup xxh32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t* state, const void* input, size_t len) { - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (input==NULL) { + XXH_ASSERT(len == 0); return XXH_OK; -#else - return XXH_ERROR; -#endif + } { const xxh_u8* p = (const xxh_u8*)input; const xxh_u8* const bEnd = p + len; @@ -1467,10 +2194,10 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len) if (state->memsize) { /* some data left from previous update */ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); { const xxh_u32* p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32)); + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); } p += 16-state->memsize; state->memsize = 0; @@ -1478,22 +2205,14 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len) if (p <= bEnd-16) { const xxh_u8* const limit = bEnd - 16; - xxh_u32 v1 = state->v1; - xxh_u32 v2 = state->v2; - xxh_u32 v3 = state->v3; - xxh_u32 v4 = state->v4; do { - v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4; - v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4; - v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4; - v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; } while (p<=limit); - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; } if (p < bEnd) { @@ -1506,17 +2225,18 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len) } -XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state) +/*! @ingroup xxh32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) { xxh_u32 h32; if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) - + XXH_rotl32(state->v2, 7) - + XXH_rotl32(state->v3, 12) - + XXH_rotl32(state->v4, 18); + h32 = XXH_rotl32(state->v[0], 1) + + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + + XXH_rotl32(state->v[3], 18); } else { - h32 = state->v3 /* == seed */ + XXH_PRIME32_5; + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; } h32 += state->total_len_32; @@ -1527,7 +2247,8 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state) /******* Canonical representation *******/ -/* +/*! + * @ingroup xxh32_family * The default return values from XXH functions are unsigned 32 and 64 bit * integers. * @@ -1544,9 +2265,9 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t { XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - memcpy(dst, &hash, sizeof(*dst)); + XXH_memcpy(dst, &hash, sizeof(*dst)); } - +/*! @ingroup xxh32_family */ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) { return XXH_readBE32(src); @@ -1558,7 +2279,11 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src /* ******************************************************************* * 64-bit hash functions *********************************************************************/ - +/*! + * @} + * @ingroup impl + * @{ + */ /******* Memory access *******/ typedef XXH64_hash_t xxh_u64; @@ -1567,35 +2292,6 @@ typedef XXH64_hash_t xxh_u64; # define U64 xxh_u64 #endif -/*! - * XXH_REROLL_XXH64: - * Whether to reroll the XXH64_finalize() loop. - * - * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a - * performance gain on 64-bit hosts, as only one jump is required. - * - * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit - * registers, and 64-bit arithmetic needs to be simulated, it isn't beneficial - * to unroll. The code becomes ridiculously large (the largest function in the - * binary on i386!), and rerolling it saves anywhere from 3kB to 20kB. It is - * also slightly faster because it fits into cache better and is more likely - * to be inlined by the compiler. - * - * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. - */ -#ifndef XXH_REROLL_XXH64 -# if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \ - || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \ - || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \ - || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \ - || defined(__mips64__) || defined(__mips64)) /* mips64 */ \ - || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */ -# define XXH_REROLL_XXH64 1 -# else -# define XXH_REROLL_XXH64 0 -# endif -#endif /* !defined(XXH_REROLL_XXH64) */ - #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) /* * Manual byteshift. Best for old compilers which don't inline memcpy. @@ -1604,7 +2300,10 @@ typedef XXH64_hash_t xxh_u64; #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; } +static xxh_u64 XXH_read64(const void* memPtr) +{ + return *(const xxh_u64*) memPtr; +} #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) @@ -1627,12 +2326,12 @@ static xxh_u64 XXH_read64(const void* ptr) /* * Portable and safe solution. Generally efficient. - * see: https://stackoverflow.com/a/32095106/646947 + * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u64 XXH_read64(const void* memPtr) { xxh_u64 val; - memcpy(&val, memPtr, sizeof(val)); + XXH_memcpy(&val, memPtr, sizeof(val)); return val; } @@ -1643,7 +2342,7 @@ static xxh_u64 XXH_read64(const void* memPtr) #elif XXH_GCC_VERSION >= 403 # define XXH_swap64 __builtin_bswap64 #else -static xxh_u64 XXH_swap64 (xxh_u64 x) +static xxh_u64 XXH_swap64(xxh_u64 x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | @@ -1709,12 +2408,18 @@ XXH_readLE64_align(const void* ptr, XXH_alignment align) /******* xxh64 *******/ - -static const xxh_u64 XXH_PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */ -static const xxh_u64 XXH_PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */ -static const xxh_u64 XXH_PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */ -static const xxh_u64 XXH_PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */ -static const xxh_u64 XXH_PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */ +/*! + * @} + * @defgroup xxh64_impl XXH64 implementation + * @ingroup impl + * @{ + */ +/* #define rather that static const, to be used as initializers */ +#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ +#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ +#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ +#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ +#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ #ifdef XXH_OLD_NAMES # define PRIME64_1 XXH_PRIME64_1 @@ -1756,126 +2461,27 @@ static xxh_u64 XXH64_avalanche(xxh_u64 h64) static xxh_u64 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) { -#define XXH_PROCESS1_64 do { \ - h64 ^= (*ptr++) * XXH_PRIME64_5; \ - h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; \ -} while (0) - -#define XXH_PROCESS4_64 do { \ - h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; \ - ptr += 4; \ - h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; \ -} while (0) - -#define XXH_PROCESS8_64 do { \ - xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \ - ptr += 8; \ - h64 ^= k1; \ - h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4; \ -} while (0) - - /* Rerolled version for 32-bit targets is faster and much smaller. */ - if (XXH_REROLL || XXH_REROLL_XXH64) { - len &= 31; - while (len >= 8) { - XXH_PROCESS8_64; - len -= 8; - } - if (len >= 4) { - XXH_PROCESS4_64; - len -= 4; - } - while (len > 0) { - XXH_PROCESS1_64; - --len; - } - return XXH64_avalanche(h64); - } else { - switch(len & 31) { - case 24: XXH_PROCESS8_64; - /* fallthrough */ - case 16: XXH_PROCESS8_64; - /* fallthrough */ - case 8: XXH_PROCESS8_64; - return XXH64_avalanche(h64); - - case 28: XXH_PROCESS8_64; - /* fallthrough */ - case 20: XXH_PROCESS8_64; - /* fallthrough */ - case 12: XXH_PROCESS8_64; - /* fallthrough */ - case 4: XXH_PROCESS4_64; - return XXH64_avalanche(h64); - - case 25: XXH_PROCESS8_64; - /* fallthrough */ - case 17: XXH_PROCESS8_64; - /* fallthrough */ - case 9: XXH_PROCESS8_64; - XXH_PROCESS1_64; - return XXH64_avalanche(h64); - - case 29: XXH_PROCESS8_64; - /* fallthrough */ - case 21: XXH_PROCESS8_64; - /* fallthrough */ - case 13: XXH_PROCESS8_64; - /* fallthrough */ - case 5: XXH_PROCESS4_64; - XXH_PROCESS1_64; - return XXH64_avalanche(h64); - - case 26: XXH_PROCESS8_64; - /* fallthrough */ - case 18: XXH_PROCESS8_64; - /* fallthrough */ - case 10: XXH_PROCESS8_64; - XXH_PROCESS1_64; - XXH_PROCESS1_64; - return XXH64_avalanche(h64); - - case 30: XXH_PROCESS8_64; - /* fallthrough */ - case 22: XXH_PROCESS8_64; - /* fallthrough */ - case 14: XXH_PROCESS8_64; - /* fallthrough */ - case 6: XXH_PROCESS4_64; - XXH_PROCESS1_64; - XXH_PROCESS1_64; - return XXH64_avalanche(h64); - - case 27: XXH_PROCESS8_64; - /* fallthrough */ - case 19: XXH_PROCESS8_64; - /* fallthrough */ - case 11: XXH_PROCESS8_64; - XXH_PROCESS1_64; - XXH_PROCESS1_64; - XXH_PROCESS1_64; - return XXH64_avalanche(h64); - - case 31: XXH_PROCESS8_64; - /* fallthrough */ - case 23: XXH_PROCESS8_64; - /* fallthrough */ - case 15: XXH_PROCESS8_64; - /* fallthrough */ - case 7: XXH_PROCESS4_64; - /* fallthrough */ - case 3: XXH_PROCESS1_64; - /* fallthrough */ - case 2: XXH_PROCESS1_64; - /* fallthrough */ - case 1: XXH_PROCESS1_64; - /* fallthrough */ - case 0: return XXH64_avalanche(h64); - } + if (ptr==NULL) XXH_ASSERT(len == 0); + len &= 31; + while (len >= 8) { + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); + ptr += 8; + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4; + len -= 8; + } + if (len >= 4) { + h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + ptr += 4; + h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + len -= 4; + } + while (len > 0) { + h64 ^= (*ptr++) * XXH_PRIME64_5; + h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; + --len; } - /* impossible to reach */ - XXH_ASSERT(0); - return 0; /* unreachable, but some compilers complain without it */ + return XXH64_avalanche(h64); } #ifdef XXH_OLD_NAMES @@ -1891,18 +2497,12 @@ XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) XXH_FORCE_INLINE xxh_u64 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) { - const xxh_u8* bEnd = input + len; xxh_u64 h64; - -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - if (input==NULL) { - len=0; - bEnd=input=(const xxh_u8*)(size_t)32; - } -#endif + if (input==NULL) XXH_ASSERT(len == 0); if (len>=32) { - const xxh_u8* const limit = bEnd - 32; + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 31; xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; xxh_u64 v2 = seed + XXH_PRIME64_2; xxh_u64 v3 = seed + 0; @@ -1913,7 +2513,7 @@ XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; - } while (input<=limit); + } while (input=1) + if (input==NULL) { + XXH_ASSERT(len == 0); return XXH_OK; -#else - return XXH_ERROR; -#endif + } { const xxh_u8* p = (const xxh_u8*)input; const xxh_u8* const bEnd = p + len; @@ -2005,32 +2607,24 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len) if (state->memsize) { /* tmp buffer is full */ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3)); - p += 32-state->memsize; + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); + p += 32 - state->memsize; state->memsize = 0; } if (p+32 <= bEnd) { const xxh_u8* const limit = bEnd - 32; - xxh_u64 v1 = state->v1; - xxh_u64 v2 = state->v2; - xxh_u64 v3 = state->v3; - xxh_u64 v4 = state->v4; do { - v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8; + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; } while (p<=limit); - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; } if (p < bEnd) { @@ -2043,23 +2637,19 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len) } -XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state) +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state) { xxh_u64 h64; if (state->total_len >= 32) { - xxh_u64 const v1 = state->v1; - xxh_u64 const v2 = state->v2; - xxh_u64 const v3 = state->v3; - xxh_u64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); } else { - h64 = state->v3 /*seed*/ + XXH_PRIME64_5; + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; } h64 += (xxh_u64) state->total_len; @@ -2070,28 +2660,38 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state) /******* Canonical representation *******/ +/*! @ingroup xxh64_family */ XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); + XXH_memcpy(dst, &hash, sizeof(*dst)); } +/*! @ingroup xxh64_family */ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) { return XXH_readBE64(src); } - +#ifndef XXH_NO_XXH3 /* ********************************************************************* * XXH3 * New generation hash designed for speed on small keys and vectorization ************************************************************************ */ +/*! + * @} + * @defgroup xxh3_impl XXH3 implementation + * @ingroup impl + * @{ + */ /* === Compiler specifics === */ -#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ +# define XXH_RESTRICT /* disable */ +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ # define XXH_RESTRICT restrict #else /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */ @@ -2198,12 +2798,62 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src /* ========================================== * Vectorization detection * ========================================== */ -#define XXH_SCALAR 0 /* Portable scalar version */ -#define XXH_SSE2 1 /* SSE2 for Pentium 4 and all x86_64 */ -#define XXH_AVX2 2 /* AVX2 for Haswell and Bulldozer */ -#define XXH_AVX512 3 /* AVX512 for Skylake and Icelake */ -#define XXH_NEON 4 /* NEON for most ARMv7-A and all AArch64 */ -#define XXH_VSX 5 /* VSX and ZVector for POWER8/z13 */ + +#ifdef XXH_DOXYGEN +/*! + * @ingroup tuning + * @brief Overrides the vectorization implementation chosen for XXH3. + * + * Can be defined to 0 to disable SIMD or any of the values mentioned in + * @ref XXH_VECTOR_TYPE. + * + * If this is not defined, it uses predefined macros to determine the best + * implementation. + */ +# define XXH_VECTOR XXH_SCALAR +/*! + * @ingroup tuning + * @brief Possible values for @ref XXH_VECTOR. + * + * Note that these are actually implemented as macros. + * + * If this is not defined, it is detected automatically. + * @ref XXH_X86DISPATCH overrides this. + */ +enum XXH_VECTOR_TYPE /* fake enum */ { + XXH_SCALAR = 0, /*!< Portable scalar version */ + XXH_SSE2 = 1, /*!< + * SSE2 for Pentium 4, Opteron, all x86_64. + * + * @note SSE2 is also guaranteed on Windows 10, macOS, and + * Android x86. + */ + XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ + XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ + XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */ + XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ +}; +/*! + * @ingroup tuning + * @brief Selects the minimum alignment for XXH3's accumulators. + * + * When using SIMD, this should match the alignment reqired for said vector + * type, so, for example, 32 for AVX2. + * + * Default: Auto detected. + */ +# define XXH_ACC_ALIGN 8 +#endif + +/* Actual definition */ +#ifndef XXH_DOXYGEN +# define XXH_SCALAR 0 +# define XXH_SSE2 1 +# define XXH_AVX2 2 +# define XXH_AVX512 3 +# define XXH_NEON 4 +# define XXH_VSX 5 +#endif #ifndef XXH_VECTOR /* can be defined on command line */ # if defined(__AVX512F__) @@ -2212,10 +2862,13 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src # define XXH_VECTOR XXH_AVX2 # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) # define XXH_VECTOR XXH_SSE2 -# elif defined(__GNUC__) /* msvc support maybe later */ \ - && (defined(__ARM_NEON__) || defined(__ARM_NEON)) \ - && (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) +# elif ( \ + defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM64) || defined(_M_ARM_ARMV7VE) /* msvc */ \ + ) && ( \ + defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ + ) # define XXH_VECTOR XXH_NEON # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ || (defined(__s390x__) && defined(__VEC__)) \ @@ -2356,7 +3009,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src * This is available on ARMv7-A, but is less efficient than a single VZIP.32. */ -/* +/*! * Function-like macro: * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi) * { @@ -2367,7 +3020,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src */ # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \ && defined(__GNUC__) \ - && !defined(__aarch64__) && !defined(__arm64__) + && !defined(__aarch64__) && !defined(__arm64__) && !defined(_M_ARM64) # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ do { \ /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \ @@ -2429,10 +3082,12 @@ typedef __vector unsigned xxh_u32x4; # endif /* !defined(XXH_VSX_BE) */ # if XXH_VSX_BE -/* A wrapper for POWER9's vec_revb. */ # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) # define XXH_vec_revb vec_revb # else +/*! + * A polyfill for POWER9's vec_revb(). + */ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) { xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, @@ -2442,13 +3097,13 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) # endif # endif /* XXH_VSX_BE */ -/* - * Performs an unaligned load and byte swaps it on big endian. +/*! + * Performs an unaligned vector load and byte swaps it on big endian. */ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) { xxh_u64x2 ret; - memcpy(&ret, ptr, sizeof(xxh_u64x2)); + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); # if XXH_VSX_BE ret = XXH_vec_revb(ret); # endif @@ -2493,7 +3148,7 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) #if defined(XXH_NO_PREFETCH) # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ #else -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ +# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ # include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) @@ -2514,7 +3169,7 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) # error "default keyset is not large enough" #endif -/* Pseudorandom secret taken directly from FARSH */ +/*! Pseudorandom secret taken directly from FARSH. */ XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, @@ -2535,23 +3190,29 @@ XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { # define kSecret XXH3_kSecret #endif -/* - * Calculates a 32-bit to 64-bit long multiply. +#ifdef XXH_DOXYGEN +/*! + * @brief Calculates a 32-bit to 64-bit long multiply. * - * Wraps __emulu on MSVC x86 because it tends to call __allmul when it doesn't + * Implemented as a macro. + * + * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't * need to (but it shouldn't need to anyways, it is about 7 instructions to do - * a 64x64 multiply...). Since we know that this will _always_ emit MULL, we + * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we * use that instead of the normal method. * * If you are compiling for platforms like Thumb-1 and don't have a better option, * you may also want to write your own long multiply routine here. * - * XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) - * { - * return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); - * } + * @param x, y Numbers to be multiplied + * @return 64-bit product of the low 32 bits of @p x and @p y. */ -#if defined(_MSC_VER) && defined(_M_IX86) +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64(xxh_u64 x, xxh_u64 y) +{ + return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); +} +#elif defined(_MSC_VER) && defined(_M_IX86) # include # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) #else @@ -2565,10 +3226,14 @@ XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) #endif -/* - * Calculates a 64->128-bit long multiply. +/*! + * @brief Calculates a 64->128-bit long multiply. * - * Uses __uint128_t and _umul128 if available, otherwise uses a scalar version. + * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar + * version. + * + * @param lhs , rhs The 64-bit integers to be multiplied + * @return The 128-bit result represented in an @ref XXH128_hash_t. */ static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) @@ -2617,6 +3282,21 @@ XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) r128.high64 = product_high; return r128; + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. + */ +#elif defined(_M_ARM64) + +#ifndef _MSC_VER +# pragma intrinsic(__umulh) +#endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + #else /* * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. @@ -2679,11 +3359,15 @@ XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) #endif } -/* - * Does a 64-bit to 128-bit multiply, then XOR folds it. +/*! + * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. * * The reason for the separate function is to prevent passing too many structs * around by value. This will hopefully inline the multiply, but we don't force it. + * + * @param lhs , rhs The 64-bit integers to multiply + * @return The low 64 bits of the product XOR'd by the high 64 bits. + * @see XXH_mult64to128() */ static xxh_u64 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) @@ -2692,7 +3376,7 @@ XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) return product.low64 ^ product.high64; } -/* Seems to produce slightly better code on GCC for some reason. */ +/*! Seems to produce slightly better code on GCC for some reason. */ XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) { XXH_ASSERT(0 <= shift && shift < 64); @@ -2787,7 +3471,7 @@ XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_h { XXH_ASSERT(input != NULL); XXH_ASSERT(secret != NULL); - XXH_ASSERT(4 <= len && len < 8); + XXH_ASSERT(4 <= len && len <= 8); seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; { xxh_u32 const input1 = XXH_readLE32(input); xxh_u32 const input2 = XXH_readLE32(input + len - 4); @@ -2803,7 +3487,7 @@ XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_ { XXH_ASSERT(input != NULL); XXH_ASSERT(secret != NULL); - XXH_ASSERT(8 <= len && len <= 16); + XXH_ASSERT(9 <= len && len <= 16); { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; @@ -2873,7 +3557,7 @@ XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, * GCC generates much better scalar code than Clang for the rest of XXH3, * which is why finding a more optimal codepath is an interest. */ - __asm__ ("" : "+r" (seed64)); + XXH_COMPILER_GUARD(seed64); #endif { xxh_u64 const input_lo = XXH_readLE64(input); xxh_u64 const input_hi = XXH_readLE64(input+8); @@ -2983,7 +3667,7 @@ XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) { if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); - memcpy(dst, &v64, sizeof(v64)); + XXH_memcpy(dst, &v64, sizeof(v64)); } /* Several intrinsic functions below are supposed to accept __int64 as argument, @@ -3023,7 +3707,8 @@ XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) * Both XXH3_64bits and XXH3_128bits use this subroutine. */ -#if (XXH_VECTOR == XXH_AVX512) || defined(XXH_X86DISPATCH) +#if (XXH_VECTOR == XXH_AVX512) \ + || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) #ifndef XXH_TARGET_AVX512 # define XXH_TARGET_AVX512 /* disable attribute target */ @@ -3034,7 +3719,7 @@ XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT input, const void* XXH_RESTRICT secret) { - XXH_ALIGN(64) __m512i* const xacc = (__m512i *) acc; + __m512i* const xacc = (__m512i *) acc; XXH_ASSERT((((size_t)acc) & 63) == 0); XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); @@ -3083,7 +3768,7 @@ XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 63) == 0); XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); - { XXH_ALIGN(64) __m512i* const xacc = (__m512i*) acc; + { __m512i* const xacc = (__m512i*) acc; const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); /* xacc[0] ^= (xacc[0] >> 47) */ @@ -3110,17 +3795,19 @@ XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) XXH_ASSERT(((size_t)customSecret & 63) == 0); (void)(&XXH_writeLE64); { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); - __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, -(xxh_i64)seed64); + __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64)); - XXH_ALIGN(64) const __m512i* const src = (const __m512i*) XXH3_kSecret; - XXH_ALIGN(64) __m512i* const dest = ( __m512i*) customSecret; + const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); + __m512i* const dest = ( __m512i*) customSecret; int i; + XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 63) == 0); for (i=0; i < nbRounds; ++i) { /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*', - * this will warn "discards ‘const’ qualifier". */ + * this will warn "discards 'const' qualifier". */ union { - XXH_ALIGN(64) const __m512i* cp; - XXH_ALIGN(64) void* p; + const __m512i* cp; + void* p; } remote_const_void; remote_const_void.cp = src + i; dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed); @@ -3129,7 +3816,8 @@ XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) #endif -#if (XXH_VECTOR == XXH_AVX2) || defined(XXH_X86DISPATCH) +#if (XXH_VECTOR == XXH_AVX2) \ + || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) #ifndef XXH_TARGET_AVX2 # define XXH_TARGET_AVX2 /* disable attribute target */ @@ -3141,7 +3829,7 @@ XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 31) == 0); - { XXH_ALIGN(32) __m256i* const xacc = (__m256i *) acc; + { __m256i* const xacc = (__m256i *) acc; /* Unaligned. This is mainly for pointer arithmetic, and because * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ const __m256i* const xinput = (const __m256i *) input; @@ -3173,7 +3861,7 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 31) == 0); - { XXH_ALIGN(32) __m256i* const xacc = (__m256i*) acc; + { __m256i* const xacc = (__m256i*) acc; /* Unaligned. This is mainly for pointer arithmetic, and because * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ const __m256i* const xsecret = (const __m256i *) secret; @@ -3205,23 +3893,21 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTR XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); (void)(&XXH_writeLE64); XXH_PREFETCH(customSecret); - { __m256i const seed = _mm256_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64, -(xxh_i64)seed64, (xxh_i64)seed64); + { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); - XXH_ALIGN(64) const __m256i* const src = (const __m256i*) XXH3_kSecret; - XXH_ALIGN(64) __m256i* dest = ( __m256i*) customSecret; + const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); + __m256i* dest = ( __m256i*) customSecret; # if defined(__GNUC__) || defined(__clang__) /* * On GCC & Clang, marking 'dest' as modified will cause the compiler: * - do not extract the secret from sse registers in the internal loop * - use less common registers, and avoid pushing these reg into stack - * The asm hack causes Clang to assume that XXH3_kSecretPtr aliases with - * customSecret, and on aarch64, this prevented LDP from merging two - * loads together for free. Putting the loads together before the stores - * properly generates LDP. */ - __asm__("" : "+r" (dest)); + XXH_COMPILER_GUARD(dest); # endif + XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 31) == 0); /* GCC -O2 need unroll loop manually */ dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed); @@ -3235,6 +3921,7 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTR #endif +/* x86dispatch always generates SSE2 */ #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) #ifndef XXH_TARGET_SSE2 @@ -3248,7 +3935,7 @@ XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, { /* SSE2 is just a half-scale version of the AVX2 version. */ XXH_ASSERT((((size_t)acc) & 15) == 0); - { XXH_ALIGN(16) __m128i* const xacc = (__m128i *) acc; + { __m128i* const xacc = (__m128i *) acc; /* Unaligned. This is mainly for pointer arithmetic, and because * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ const __m128i* const xinput = (const __m128i *) input; @@ -3280,7 +3967,7 @@ XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 15) == 0); - { XXH_ALIGN(16) __m128i* const xacc = (__m128i*) acc; + { __m128i* const xacc = (__m128i*) acc; /* Unaligned. This is mainly for pointer arithmetic, and because * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ const __m128i* const xsecret = (const __m128i *) secret; @@ -3312,27 +3999,29 @@ XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTR { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 - // MSVC 32bit mode does not support _mm_set_epi64x before 2015 - XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, -(xxh_i64)seed64 }; + /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ + XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); # else - __m128i const seed = _mm_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64); + __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); # endif int i; - XXH_ALIGN(64) const float* const src = (float const*) XXH3_kSecret; - XXH_ALIGN(XXH_SEC_ALIGN) __m128i* dest = (__m128i*) customSecret; + const void* const src16 = XXH3_kSecret; + __m128i* dst16 = (__m128i*) customSecret; # if defined(__GNUC__) || defined(__clang__) /* * On GCC & Clang, marking 'dest' as modified will cause the compiler: * - do not extract the secret from sse registers in the internal loop * - use less common registers, and avoid pushing these reg into stack */ - __asm__("" : "+r" (dest)); + XXH_COMPILER_GUARD(dst16); # endif + XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dst16 & 15) == 0); for (i=0; i < nbRounds; ++i) { - dest[i] = _mm_add_epi64(_mm_castps_si128(_mm_load_ps(src+i*4)), seed); + dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); } } } @@ -3347,7 +4036,7 @@ XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, { XXH_ASSERT((((size_t)acc) & 15) == 0); { - XXH_ALIGN(16) uint64x2_t* const xacc = (uint64x2_t *) acc; + uint64x2_t* const xacc = (uint64x2_t *) acc; /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ uint8_t const* const xinput = (const uint8_t *) input; uint8_t const* const xsecret = (const uint8_t *) secret; @@ -3394,8 +4083,8 @@ XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) uint64x2_t data_vec = veorq_u64 (acc_vec, shifted); /* xacc[i] ^= xsecret[i]; */ - uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); - uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec)); + uint8x16_t key_vec = vld1q_u8 (xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec)); /* xacc[i] *= XXH_PRIME32_1 */ uint32x2_t data_key_lo, data_key_hi; @@ -3439,7 +4128,8 @@ XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, const void* XXH_RESTRICT input, const void* XXH_RESTRICT secret) { - xxh_u64x2* const xacc = (xxh_u64x2*) acc; /* presumed aligned */ + /* presumed aligned */ + unsigned long long* const xacc = (unsigned long long*) acc; xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */ xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */ xxh_u64x2 const v32 = { 32, 32 }; @@ -3454,14 +4144,18 @@ XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); - xacc[i] += product; + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = vec_xl(0, xacc + 2 * i); + acc_vec += product; /* swap high and low halves */ #ifdef __s390x__ - xacc[i] += vec_permi(data_vec, data_vec, 2); + acc_vec += vec_permi(data_vec, data_vec, 2); #else - xacc[i] += vec_xxpermdi(data_vec, data_vec, 2); + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); #endif + /* xacc[i] = acc_vec; */ + vec_xst(acc_vec, 0, xacc + 2 * i); } } @@ -3504,7 +4198,7 @@ XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT input, const void* XXH_RESTRICT secret) { - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */ const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ size_t i; @@ -3520,7 +4214,7 @@ XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, XXH_FORCE_INLINE void XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ size_t i; XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); @@ -3574,7 +4268,7 @@ XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) * without hack: 2654.4 MB/s * with hack: 3202.9 MB/s */ - __asm__("" : "+r" (kSecretPtr)); + XXH_COMPILER_GUARD(kSecretPtr); #endif /* * Note: in debug mode, this overrides the asm optimization @@ -3739,7 +4433,7 @@ XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secre * without hack: 2063.7 MB/s * with hack: 2560.7 MB/s */ - __asm__("" : "+r" (result64)); + XXH_COMPILER_GUARD(result64); #endif } @@ -3768,9 +4462,11 @@ XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, } /* - * It's important for performance that XXH3_hashLong is not inlined. + * It's important for performance to transmit secret's size (when it's static) + * so that the compiler can properly optimize the vectorized loop. + * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. */ -XXH_NO_INLINE XXH64_hash_t +XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) { @@ -3779,11 +4475,10 @@ XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, } /* - * It's important for performance that XXH3_hashLong is not inlined. - * Since the function is not inlined, the compiler may not be able to understand that, - * in some scenarios, its `secret` argument is actually a compile time constant. - * This variant enforces that the compiler can detect that, - * and uses this opportunity to streamline the generated code for better performance. + * It's preferable for performance that XXH3_hashLong is not inlined, + * as it results in a smaller function for small data, easier to the instruction cache. + * Note that inside this no_inline function, we do inline the internal loop, + * and provide a statically defined secret size to allow optimization of vector loop. */ XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, @@ -3863,23 +4558,34 @@ XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, /* === Public entry point === */ +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len) { return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) { return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) { return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); } +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize); +} + /* === XXH3 streaming === */ @@ -3948,6 +4654,7 @@ static void XXH_alignedFree(void* p) XXH_free(base); } } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) { XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); @@ -3956,22 +4663,24 @@ XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) return state; } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) { XXH_alignedFree(statePtr); return XXH_OK; } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state) { - memcpy(dst_state, src_state, sizeof(*dst_state)); + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); } static void -XXH3_64bits_reset_internal(XXH3_state_t* statePtr, - XXH64_hash_t seed, - const void* secret, size_t secretSize) +XXH3_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) { size_t const initStart = offsetof(XXH3_state_t, bufferedSize); size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; @@ -3988,37 +4697,54 @@ XXH3_64bits_reset_internal(XXH3_state_t* statePtr, statePtr->acc[6] = XXH_PRIME64_5; statePtr->acc[7] = XXH_PRIME32_1; statePtr->seed = seed; + statePtr->useSeed = (seed != 0); statePtr->extSecret = (const unsigned char*)secret; XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr) { if (statePtr == NULL) return XXH_ERROR; - XXH3_64bits_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); return XXH_OK; } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) { if (statePtr == NULL) return XXH_ERROR; - XXH3_64bits_reset_internal(statePtr, 0, secret, secretSize); + XXH3_reset_internal(statePtr, 0, secret, secretSize); if (secret == NULL) return XXH_ERROR; if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; return XXH_OK; } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) { if (statePtr == NULL) return XXH_ERROR; if (seed==0) return XXH3_64bits_reset(statePtr); - if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed); - XXH3_64bits_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) + XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64) +{ + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ return XXH_OK; } @@ -4049,34 +4775,48 @@ XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, } } +#ifndef XXH3_STREAM_USE_STACK +# ifndef __clang__ /* clang doesn't need additional stack space */ +# define XXH3_STREAM_USE_STACK 1 +# endif +#endif /* * Both XXH3_64bits_update and XXH3_128bits_update use this routine. */ XXH_FORCE_INLINE XXH_errorcode -XXH3_update(XXH3_state_t* state, - const xxh_u8* input, size_t len, +XXH3_update(XXH3_state_t* XXH_RESTRICT const state, + const xxh_u8* XXH_RESTRICT input, size_t len, XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) { - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (input==NULL) { + XXH_ASSERT(len == 0); return XXH_OK; -#else - return XXH_ERROR; -#endif + } + XXH_ASSERT(state != NULL); { const xxh_u8* const bEnd = input + len; const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; - +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc)); +#else + xxh_u64* XXH_RESTRICT const acc = state->acc; +#endif state->totalLen += len; + XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); - if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */ + /* small input : just fill in tmp buffer */ + if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { XXH_memcpy(state->buffer + state->bufferedSize, input, len); state->bufferedSize += (XXH32_hash_t)len; return XXH_OK; } - /* total input is now > XXH3_INTERNALBUFFER_SIZE */ + /* total input is now > XXH3_INTERNALBUFFER_SIZE */ #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ @@ -4088,7 +4828,7 @@ XXH3_update(XXH3_state_t* state, size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); input += loadSize; - XXH3_consumeStripes(state->acc, + XXH3_consumeStripes(acc, &state->nbStripesSoFar, state->nbStripesPerBlock, state->buffer, XXH3_INTERNALBUFFER_STRIPES, secret, state->secretLimit, @@ -4097,30 +4837,68 @@ XXH3_update(XXH3_state_t* state, } XXH_ASSERT(input < bEnd); - /* Consume input by a multiple of internal buffer size */ - if (input+XXH3_INTERNALBUFFER_SIZE < bEnd) { - const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE; - do { - XXH3_consumeStripes(state->acc, - &state->nbStripesSoFar, state->nbStripesPerBlock, - input, XXH3_INTERNALBUFFER_STRIPES, - secret, state->secretLimit, - f_acc512, f_scramble); - input += XXH3_INTERNALBUFFER_SIZE; - } while (inputbuffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + /* large input to consume : ingest per full block */ + if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) { + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar); + /* join to current block's end */ + { size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar; + XXH_ASSERT(nbStripes <= nbStripes); + XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512); + f_scramble(acc, secret + state->secretLimit); + state->nbStripesSoFar = 0; + input += nbStripesToEnd * XXH_STRIPE_LEN; + nbStripes -= nbStripesToEnd; + } + /* consume per entire blocks */ + while(nbStripes >= state->nbStripesPerBlock) { + XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512); + f_scramble(acc, secret + state->secretLimit); + input += state->nbStripesPerBlock * XXH_STRIPE_LEN; + nbStripes -= state->nbStripesPerBlock; + } + /* consume last partial block */ + XXH3_accumulate(acc, input, secret, nbStripes, f_acc512); + input += nbStripes * XXH_STRIPE_LEN; + XXH_ASSERT(input < bEnd); /* at least some bytes left */ + state->nbStripesSoFar = nbStripes; + /* buffer predecessor of last partial stripe */ + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN); + } else { + /* content to consume <= block size */ + /* Consume input by a multiple of internal buffer size */ + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { + const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE; + do { + XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc512, f_scramble); + input += XXH3_INTERNALBUFFER_SIZE; + } while (inputbuffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + } } - XXH_ASSERT(input < bEnd); /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); state->bufferedSize = (XXH32_hash_t)(bEnd-input); +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + memcpy(state->acc, acc, sizeof(acc)); +#endif } return XXH_OK; } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len) { @@ -4138,7 +4916,7 @@ XXH3_digest_long (XXH64_hash_t* acc, * Digest on a local copy. This way, the state remains unaltered, and it can * continue ingesting more input afterwards. */ - memcpy(acc, state->acc, sizeof(state->acc)); + XXH_memcpy(acc, state->acc, sizeof(state->acc)); if (state->bufferedSize >= XXH_STRIPE_LEN) { size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; size_t nbStripesSoFar = state->nbStripesSoFar; @@ -4155,14 +4933,15 @@ XXH3_digest_long (XXH64_hash_t* acc, xxh_u8 lastStripe[XXH_STRIPE_LEN]; size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ - memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); - memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); XXH3_accumulate_512(acc, lastStripe, secret + state->secretLimit - XXH_SECRET_LASTACC_START); } } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) { const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; @@ -4174,57 +4953,13 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) (xxh_u64)state->totalLen * XXH_PRIME64_1); } /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ - if (state->seed) + if (state->useSeed) return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), secret, state->secretLimit + XXH_STRIPE_LEN); } -#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) - -XXH_PUBLIC_API void -XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize) -{ - XXH_ASSERT(secretBuffer != NULL); - if (customSeedSize == 0) { - memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); - return; - } - XXH_ASSERT(customSeed != NULL); - - { size_t const segmentSize = sizeof(XXH128_hash_t); - size_t const nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize; - XXH128_canonical_t scrambler; - XXH64_hash_t seeds[12]; - size_t segnb; - XXH_ASSERT(nbSegments == 12); - XXH_ASSERT(segmentSize * nbSegments == XXH_SECRET_DEFAULT_SIZE); /* exact multiple */ - XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); - - /* - * Copy customSeed to seeds[], truncating or repeating as necessary. - */ - { size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds)); - size_t filled = toFill; - memcpy(seeds, customSeed, toFill); - while (filled < sizeof(seeds)) { - toFill = XXH_MIN(filled, sizeof(seeds) - filled); - memcpy((char*)seeds + filled, seeds, toFill); - filled += toFill; - } } - - /* generate secret */ - memcpy(secretBuffer, &scrambler, sizeof(scrambler)); - for (segnb=1; segnb < nbSegments; segnb++) { - size_t const segmentStart = segnb * segmentSize; - XXH128_canonical_t segment; - XXH128_canonicalFromHash(&segment, - XXH128(&scrambler, sizeof(scrambler), XXH_readLE64(seeds + segnb) + segnb) ); - memcpy((char*)secretBuffer + segmentStart, &segment, sizeof(segment)); - } } -} - /* ========================================== * XXH3 128 bits (a.k.a XXH128) @@ -4526,9 +5261,10 @@ XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, } /* - * It's important for performance that XXH3_hashLong is not inlined. + * It's important for performance to pass @secretLen (when it's static) + * to the compiler, so that it can properly optimize the vectorized loop. */ -XXH_NO_INLINE XXH128_hash_t +XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) @@ -4595,6 +5331,7 @@ XXH3_128bits_internal(const void* input, size_t len, /* === Public XXH128 API === */ +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len) { return XXH3_128bits_internal(input, len, 0, @@ -4602,6 +5339,7 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len) XXH3_hashLong_128b_default); } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) { @@ -4610,6 +5348,7 @@ XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_ XXH3_hashLong_128b_withSecret); } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) { @@ -4618,6 +5357,16 @@ XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) XXH3_hashLong_128b_withSeed); } +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); +} + +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH128_hash_t XXH128(const void* input, size_t len, XXH64_hash_t seed) { @@ -4628,46 +5377,39 @@ XXH128(const void* input, size_t len, XXH64_hash_t seed) /* === XXH3 128-bit streaming === */ /* - * All the functions are actually the same as for 64-bit streaming variant. - * The only difference is the finalizatiom routine. + * All initialization and update functions are identical to 64-bit streaming variant. + * The only difference is the finalization routine. */ -static void -XXH3_128bits_reset_internal(XXH3_state_t* statePtr, - XXH64_hash_t seed, - const void* secret, size_t secretSize) -{ - XXH3_64bits_reset_internal(statePtr, seed, secret, secretSize); -} - +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr) { - if (statePtr == NULL) return XXH_ERROR; - XXH3_128bits_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; + return XXH3_64bits_reset(statePtr); } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) { - if (statePtr == NULL) return XXH_ERROR; - XXH3_128bits_reset_internal(statePtr, 0, secret, secretSize); - if (secret == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; - return XXH_OK; + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) { - if (statePtr == NULL) return XXH_ERROR; - if (seed==0) return XXH3_128bits_reset(statePtr); - if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed); - XXH3_128bits_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; + return XXH3_64bits_reset_withSeed(statePtr, seed); } +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); +} + +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len) { @@ -4675,6 +5417,7 @@ XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len) XXH3_accumulate_512, XXH3_scrambleAcc); } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state) { const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; @@ -4705,6 +5448,7 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state) #include /* memcmp, memcpy */ /* return : 1 is equal, 0 if different */ +/*! @ingroup xxh3_family */ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) { /* note : XXH128_hash_t is compact, it has no padding byte */ @@ -4715,6 +5459,7 @@ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) * return : >0 if *h128_1 > *h128_2 * <0 if *h128_1 < *h128_2 * =0 if *h128_1 == *h128_2 */ +/*! @ingroup xxh3_family */ XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) { XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; @@ -4727,6 +5472,7 @@ XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) /*====== Canonical representation ======*/ +/*! @ingroup xxh3_family */ XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) { @@ -4735,10 +5481,11 @@ XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) hash.high64 = XXH_swap64(hash.high64); hash.low64 = XXH_swap64(hash.low64); } - memcpy(dst, &hash.high64, sizeof(hash.high64)); - memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); } +/*! @ingroup xxh3_family */ XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src) { @@ -4748,6 +5495,69 @@ XXH128_hashFromCanonical(const XXH128_canonical_t* src) return h; } + + +/* ========================================== + * Secret generators + * ========================================== + */ +#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +static void XXH3_combine16(void* dst, XXH128_hash_t h128) +{ + XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); + XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize) +{ + XXH_ASSERT(secretBuffer != NULL); + if (secretBuffer == NULL) return XXH_ERROR; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + if (customSeedSize == 0) { + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + } + XXH_ASSERT(customSeed != NULL); + if (customSeed == NULL) return XXH_ERROR; + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { size_t pos = 0; + while (pos < secretSize) { + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char*)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; + } } + + { size_t const nbSeg16 = secretSize / 16; + size_t n; + XXH128_canonical_t scrambler; + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + for (n=0; n Date: Sat, 25 Dec 2021 20:55:22 +0100 Subject: [PATCH 12/17] Extract common replacement login in pass1 The replace const or replace with QM_ASSIGN pattern is common to all constant folding, extract it into a function. --- Zend/Optimizer/pass1.c | 131 +++++++++-------------------------------- 1 file changed, 28 insertions(+), 103 deletions(-) diff --git a/Zend/Optimizer/pass1.c b/Zend/Optimizer/pass1.c index b25001182f591..b49b52b5e1465 100644 --- a/Zend/Optimizer/pass1.c +++ b/Zend/Optimizer/pass1.c @@ -35,6 +35,17 @@ #include "zend_execute.h" #include "zend_vm.h" +static void replace_by_const_or_qm_assign(zend_op_array *op_array, zend_op *opline, zval *result) { + if (zend_optimizer_replace_by_const(op_array, opline + 1, opline->result_type, opline->result.var, result)) { + MAKE_NOP(opline); + } else { + opline->opcode = ZEND_QM_ASSIGN; + opline->extended_value = 0; + SET_UNUSED(opline->op2); + zend_optimizer_update_op1_const(op_array, opline, result); + } +} + void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) { zend_op *opline = op_array->opcodes; @@ -46,21 +57,13 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) switch (opline->opcode) { case ZEND_CONCAT: case ZEND_FAST_CONCAT: - if (opline->op1_type == IS_CONST) { - if (Z_TYPE(ZEND_OP1_LITERAL(opline)) != IS_STRING) { - convert_to_string(&ZEND_OP1_LITERAL(opline)); - } + if (opline->op1_type == IS_CONST && Z_TYPE(ZEND_OP1_LITERAL(opline)) != IS_STRING) { + convert_to_string(&ZEND_OP1_LITERAL(opline)); } - if (opline->op2_type == IS_CONST) { - if (Z_TYPE(ZEND_OP2_LITERAL(opline)) != IS_STRING) { - convert_to_string(&ZEND_OP2_LITERAL(opline)); - } - if (opline->op1_type == IS_CONST) { - goto constant_binary_op; - } + if (opline->op2_type == IS_CONST && Z_TYPE(ZEND_OP2_LITERAL(opline)) != IS_STRING) { + convert_to_string(&ZEND_OP2_LITERAL(opline)); } - break; - + ZEND_FALLTHROUGH; case ZEND_ADD: case ZEND_SUB: case ZEND_MUL: @@ -87,17 +90,10 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) /* binary operation with constant operands */ zval result; -constant_binary_op: if (zend_optimizer_eval_binary_op(&result, opline->opcode, &ZEND_OP1_LITERAL(opline), &ZEND_OP2_LITERAL(opline)) == SUCCESS) { literal_dtor(&ZEND_OP1_LITERAL(opline)); literal_dtor(&ZEND_OP2_LITERAL(opline)); - if (zend_optimizer_replace_by_const(op_array, opline + 1, IS_TMP_VAR, opline->result.var, &result)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - SET_UNUSED(opline->op2); - zend_optimizer_update_op1_const(op_array, opline, &result); - } + replace_by_const_or_qm_assign(op_array, opline, &result); } } break; @@ -116,13 +112,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if (zend_optimizer_eval_cast(&result, opline->extended_value, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { literal_dtor(&ZEND_OP1_LITERAL(opline)); - if (zend_optimizer_replace_by_const(op_array, opline + 1, opline->result_type, opline->result.var, &result)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - opline->extended_value = 0; - zend_optimizer_update_op1_const(op_array, opline, &result); - } + replace_by_const_or_qm_assign(op_array, opline, &result); break; } } @@ -136,12 +126,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if (zend_optimizer_eval_unary_op(&result, opline->opcode, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { literal_dtor(&ZEND_OP1_LITERAL(opline)); - if (zend_optimizer_replace_by_const(op_array, opline + 1, IS_TMP_VAR, opline->result.var, &result)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - zend_optimizer_update_op1_const(op_array, opline, &result); - } + replace_by_const_or_qm_assign(op_array, opline, &result); } } break; @@ -161,14 +146,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if ((offset = zend_get_constant_str("__COMPILER_HALT_OFFSET__", sizeof("__COMPILER_HALT_OFFSET__") - 1)) != NULL) { literal_dtor(&ZEND_OP2_LITERAL(opline)); - if (zend_optimizer_replace_by_const(op_array, opline, IS_TMP_VAR, opline->result.var, offset)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - opline->extended_value = 0; - SET_UNUSED(opline->op2); - zend_optimizer_update_op1_const(op_array, opline, offset); - } + replace_by_const_or_qm_assign(op_array, opline, offset); } EG(current_execute_data) = orig_execute_data; break; @@ -188,14 +166,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) break; } literal_dtor(&ZEND_OP2_LITERAL(opline)); - if (zend_optimizer_replace_by_const(op_array, opline, IS_TMP_VAR, opline->result.var, &c)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - opline->extended_value = 0; - SET_UNUSED(opline->op2); - zend_optimizer_update_op1_const(op_array, opline, &c); - } + replace_by_const_or_qm_assign(op_array, opline, &c); } break; @@ -258,15 +229,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) MAKE_NOP((opline - 1)); } literal_dtor(&ZEND_OP2_LITERAL(opline)); - - if (zend_optimizer_replace_by_const(op_array, opline, IS_TMP_VAR, opline->result.var, &t)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - opline->extended_value = 0; - SET_UNUSED(opline->op2); - zend_optimizer_update_op1_const(op_array, opline, &t); - } + replace_by_const_or_qm_assign(op_array, opline, &t); } } } @@ -367,14 +330,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) MAKE_NOP(init_opline); literal_dtor(&ZEND_OP1_LITERAL(send1_opline)); MAKE_NOP(send1_opline); - if (zend_optimizer_replace_by_const(op_array, opline + 1, IS_VAR, opline->result.var, &t)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - opline->extended_value = 0; - SET_UNUSED(opline->op2); - zend_optimizer_update_op1_const(op_array, opline, &t); - } + replace_by_const_or_qm_assign(op_array, opline, &t); } zend_string_release_ex(lc_name, 0); break; @@ -408,14 +364,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) MAKE_NOP(init_opline); literal_dtor(&ZEND_OP1_LITERAL(send1_opline)); MAKE_NOP(send1_opline); - if (zend_optimizer_replace_by_const(op_array, opline + 1, IS_VAR, opline->result.var, &t)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - opline->extended_value = 0; - SET_UNUSED(opline->op2); - zend_optimizer_update_op1_const(op_array, opline, &t); - } + replace_by_const_or_qm_assign(op_array, opline, &t); break; } else if (zend_string_equals_literal(Z_STR(ZEND_OP2_LITERAL(init_opline)), "constant")) { zval t; @@ -425,14 +374,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) MAKE_NOP(init_opline); literal_dtor(&ZEND_OP1_LITERAL(send1_opline)); MAKE_NOP(send1_opline); - if (zend_optimizer_replace_by_const(op_array, opline + 1, IS_VAR, opline->result.var, &t)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - opline->extended_value = 0; - SET_UNUSED(opline->op2); - zend_optimizer_update_op1_const(op_array, opline, &t); - } + replace_by_const_or_qm_assign(op_array, opline, &t); } break; /* dirname(IS_CONST/IS_STRING) -> IS_CONST/IS_STRING */ @@ -448,14 +390,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) MAKE_NOP(init_opline); literal_dtor(&ZEND_OP1_LITERAL(send1_opline)); MAKE_NOP(send1_opline); - if (zend_optimizer_replace_by_const(op_array, opline + 1, IS_VAR, opline->result.var, &t)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - opline->extended_value = 0; - SET_UNUSED(opline->op2); - zend_optimizer_update_op1_const(op_array, opline, &t); - } + replace_by_const_or_qm_assign(op_array, opline, &t); } else { zend_string_release_ex(dirname, 0); } @@ -472,12 +407,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if (zend_optimizer_eval_strlen(&t, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { literal_dtor(&ZEND_OP1_LITERAL(opline)); - if (zend_optimizer_replace_by_const(op_array, opline + 1, IS_TMP_VAR, opline->result.var, &t)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - zend_optimizer_update_op1_const(op_array, opline, &t); - } + replace_by_const_or_qm_assign(op_array, opline, &t); } } break; @@ -489,12 +419,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) } ZVAL_TRUE(&c); literal_dtor(&ZEND_OP1_LITERAL(opline)); - if (zend_optimizer_replace_by_const(op_array, opline, IS_TMP_VAR, opline->result.var, &c)) { - MAKE_NOP(opline); - } else { - opline->opcode = ZEND_QM_ASSIGN; - zend_optimizer_update_op1_const(op_array, opline, &c); - } + replace_by_const_or_qm_assign(op_array, opline, &c); } break; case ZEND_DECLARE_CONST: From 046096f2652fbd2c1256a6f725aca1b1e47d7915 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 21:00:07 +0100 Subject: [PATCH 13/17] Remove outdated FETCH_CLASS handling in FETCH_CLASS_CONSTANT optimization Nowadays self::X is represented using an UNUSED operand with FETCH_CLASS_SELF flag rather than a separate FETCH_CLASS instruction. The code already handles the new pattern. --- Zend/Optimizer/pass1.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/Zend/Optimizer/pass1.c b/Zend/Optimizer/pass1.c index b49b52b5e1465..b330ca24b4daf 100644 --- a/Zend/Optimizer/pass1.c +++ b/Zend/Optimizer/pass1.c @@ -194,14 +194,6 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) (opline->op1.num & ZEND_FETCH_CLASS_MASK) == ZEND_FETCH_CLASS_SELF) { /* for self::B */ ce = op_array->scope; - } else if (op_array->scope && - opline->op1_type == IS_VAR && - (opline - 1)->opcode == ZEND_FETCH_CLASS && - ((opline - 1)->op2_type == IS_UNUSED && - ((opline - 1)->op1.num & ZEND_FETCH_CLASS_MASK) == ZEND_FETCH_CLASS_SELF) && - (opline - 1)->result.var == opline->op1.var) { - /* for self::B */ - ce = op_array->scope; } if (ce) { @@ -225,8 +217,6 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if (opline->op1_type == IS_CONST) { literal_dtor(&ZEND_OP1_LITERAL(opline)); - } else if (opline->op1_type == IS_VAR) { - MAKE_NOP((opline - 1)); } literal_dtor(&ZEND_OP2_LITERAL(opline)); replace_by_const_or_qm_assign(op_array, opline, &t); From 2cf93032ee0f9a8defa3df289258fa2a0a12da8d Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 21:51:29 +0100 Subject: [PATCH 14/17] Sink op_array scope case into get_class_entry() This handles references to the current class through its name rather than self (and for cases where is is not linked yet and thus not covered by the context lookup). Rather than handling this only for FETCH_CLASS_CONSTANT optimization, integrate this into the generic get_class_entry() utility. --- Zend/Optimizer/dfa_pass.c | 2 +- Zend/Optimizer/escape_analysis.c | 4 ++-- Zend/Optimizer/pass1.c | 13 ++++--------- Zend/Optimizer/zend_inference.c | 9 +++++---- Zend/Optimizer/zend_optimizer.c | 9 +++++++-- Zend/Optimizer/zend_optimizer_internal.h | 3 ++- Zend/Optimizer/zend_ssa.c | 2 +- 7 files changed, 22 insertions(+), 20 deletions(-) diff --git a/Zend/Optimizer/dfa_pass.c b/Zend/Optimizer/dfa_pass.c index 1f846959bc009..0218a77d3f950 100644 --- a/Zend/Optimizer/dfa_pass.c +++ b/Zend/Optimizer/dfa_pass.c @@ -293,7 +293,7 @@ static inline bool can_elide_return_type_check( ZEND_TYPE_FOREACH(arg_info->type, single_type) { if (ZEND_TYPE_HAS_NAME(*single_type)) { zend_string *lcname = zend_string_tolower(ZEND_TYPE_NAME(*single_type)); - zend_class_entry *ce = zend_optimizer_get_class_entry(script, lcname); + zend_class_entry *ce = zend_optimizer_get_class_entry(script, op_array, lcname); zend_string_release(lcname); bool result = ce && safe_instanceof(use_info->ce, ce); if (result == !is_intersection) { diff --git a/Zend/Optimizer/escape_analysis.c b/Zend/Optimizer/escape_analysis.c index e13015ceb7cd8..e66fc4f9e712c 100644 --- a/Zend/Optimizer/escape_analysis.c +++ b/Zend/Optimizer/escape_analysis.c @@ -160,7 +160,7 @@ static bool is_allocation_def(zend_op_array *op_array, zend_ssa *ssa, int def, i /* objects with destructors should escape */ if (opline->op1_type == IS_CONST) { zend_class_entry *ce = zend_optimizer_get_class_entry( - script, Z_STR_P(CRT_CONSTANT(opline->op1)+1)); + script, op_array, Z_STR_P(CRT_CONSTANT(opline->op1)+1)); uint32_t forbidden_flags = /* These flags will always cause an exception */ ZEND_ACC_IMPLICIT_ABSTRACT_CLASS | ZEND_ACC_EXPLICIT_ABSTRACT_CLASS @@ -228,7 +228,7 @@ static bool is_local_def(zend_op_array *op_array, zend_ssa *ssa, int def, int va /* objects with destructors should escape */ if (opline->op1_type == IS_CONST) { zend_class_entry *ce = zend_optimizer_get_class_entry( - script, Z_STR_P(CRT_CONSTANT(opline->op1)+1)); + script, op_array, Z_STR_P(CRT_CONSTANT(opline->op1)+1)); if (ce && !ce->create_object && !ce->constructor && !ce->destructor && !ce->__get && !ce->__set && !ce->parent) { return 1; diff --git a/Zend/Optimizer/pass1.c b/Zend/Optimizer/pass1.c index b330ca24b4daf..1aad01cb2f1a9 100644 --- a/Zend/Optimizer/pass1.c +++ b/Zend/Optimizer/pass1.c @@ -179,15 +179,10 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if (opline->op1_type == IS_CONST && Z_TYPE(ZEND_OP1_LITERAL(opline)) == IS_STRING) { /* for A::B */ - if (op_array->scope && - zend_string_equals_ci(Z_STR(ZEND_OP1_LITERAL(opline)), op_array->scope->name)) { - ce = op_array->scope; - } else { - ce = zend_optimizer_get_class_entry( - ctx->script, Z_STR(op_array->literals[opline->op1.constant + 1])); - if (!ce) { - break; - } + ce = zend_optimizer_get_class_entry( + ctx->script, op_array, Z_STR(op_array->literals[opline->op1.constant + 1])); + if (!ce) { + break; } } else if (op_array->scope && opline->op1_type == IS_UNUSED && diff --git a/Zend/Optimizer/zend_inference.c b/Zend/Optimizer/zend_inference.c index 52d093db116f7..c7df495c0ae09 100644 --- a/Zend/Optimizer/zend_inference.c +++ b/Zend/Optimizer/zend_inference.c @@ -2143,7 +2143,8 @@ static uint32_t zend_convert_type(const zend_script *script, zend_type type, zen * we use a plain object type for class unions. */ if (ZEND_TYPE_HAS_NAME(type)) { zend_string *lcname = zend_string_tolower(ZEND_TYPE_NAME(type)); - *pce = zend_optimizer_get_class_entry(script, lcname); + // TODO: Pass through op_array. + *pce = zend_optimizer_get_class_entry(script, NULL, lcname); zend_string_release_ex(lcname, 0); } } @@ -2231,7 +2232,7 @@ static zend_property_info *zend_fetch_static_prop_info(const zend_script *script } } else if (opline->op2_type == IS_CONST) { zval *zv = CRT_CONSTANT(opline->op2); - ce = zend_optimizer_get_class_entry(script, Z_STR_P(zv + 1)); + ce = zend_optimizer_get_class_entry(script, op_array, Z_STR_P(zv + 1)); } if (ce) { @@ -3015,7 +3016,7 @@ static zend_always_inline zend_result _zend_update_type_info( } else if (opline->op2_type == IS_CONST) { zval *zv = CRT_CONSTANT(opline->op2); if (Z_TYPE_P(zv) == IS_STRING) { - ce = zend_optimizer_get_class_entry(script, Z_STR_P(zv+1)); + ce = zend_optimizer_get_class_entry(script, op_array, Z_STR_P(zv+1)); UPDATE_SSA_OBJ_TYPE(ce, 0, ssa_op->result_def); } else { UPDATE_SSA_OBJ_TYPE(NULL, 0, ssa_op->result_def); @@ -3027,7 +3028,7 @@ static zend_always_inline zend_result _zend_update_type_info( case ZEND_NEW: tmp = MAY_BE_RC1|MAY_BE_RCN|MAY_BE_OBJECT; if (opline->op1_type == IS_CONST && - (ce = zend_optimizer_get_class_entry(script, Z_STR_P(CRT_CONSTANT(opline->op1)+1))) != NULL) { + (ce = zend_optimizer_get_class_entry(script, op_array, Z_STR_P(CRT_CONSTANT(opline->op1)+1))) != NULL) { UPDATE_SSA_OBJ_TYPE(ce, 0, ssa_op->result_def); } else if ((t1 & MAY_BE_CLASS) && ssa_op->op1_use >= 0 && ssa_var_info[ssa_op->op1_use].ce) { UPDATE_SSA_OBJ_TYPE(ssa_var_info[ssa_op->op1_use].ce, ssa_var_info[ssa_op->op1_use].is_instanceof, ssa_op->result_def); diff --git a/Zend/Optimizer/zend_optimizer.c b/Zend/Optimizer/zend_optimizer.c index 9de7a944da1a5..1abeeefeacaaf 100644 --- a/Zend/Optimizer/zend_optimizer.c +++ b/Zend/Optimizer/zend_optimizer.c @@ -694,7 +694,8 @@ void zend_optimizer_shift_jump(zend_op_array *op_array, zend_op *opline, uint32_ } } -zend_class_entry *zend_optimizer_get_class_entry(const zend_script *script, zend_string *lcname) { +zend_class_entry *zend_optimizer_get_class_entry( + const zend_script *script, const zend_op_array *op_array, zend_string *lcname) { zend_class_entry *ce = script ? zend_hash_find_ptr(&script->class_table, lcname) : NULL; if (ce) { return ce; @@ -705,6 +706,10 @@ zend_class_entry *zend_optimizer_get_class_entry(const zend_script *script, zend return ce; } + if (op_array && op_array->scope && zend_string_equals_ci(op_array->scope->name, lcname)) { + return op_array->scope; + } + return NULL; } @@ -713,7 +718,7 @@ static zend_class_entry *get_class_entry_from_op1( if (opline->op1_type == IS_CONST) { zval *op1 = CRT_CONSTANT(opline->op1); if (Z_TYPE_P(op1) == IS_STRING) { - return zend_optimizer_get_class_entry(script, Z_STR_P(op1 + 1)); + return zend_optimizer_get_class_entry(script, op_array, Z_STR_P(op1 + 1)); } } else if (opline->op1_type == IS_UNUSED && op_array->scope && !(op_array->scope->ce_flags & ZEND_ACC_TRAIT) diff --git a/Zend/Optimizer/zend_optimizer_internal.h b/Zend/Optimizer/zend_optimizer_internal.h index bd4367eaf276b..550b911052a6b 100644 --- a/Zend/Optimizer/zend_optimizer_internal.h +++ b/Zend/Optimizer/zend_optimizer_internal.h @@ -96,7 +96,8 @@ bool zend_optimizer_replace_by_const(zend_op_array *op_array, uint32_t var, zval *val); zend_op *zend_optimizer_get_loop_var_def(const zend_op_array *op_array, zend_op *free_opline); -zend_class_entry *zend_optimizer_get_class_entry(const zend_script *script, zend_string *lcname); +zend_class_entry *zend_optimizer_get_class_entry( + const zend_script *script, const zend_op_array *op_array, zend_string *lcname); void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx); void zend_optimizer_pass3(zend_op_array *op_array, zend_optimizer_ctx *ctx); diff --git a/Zend/Optimizer/zend_ssa.c b/Zend/Optimizer/zend_ssa.c index 9ca2e11a6f7ee..98f60468a0e69 100644 --- a/Zend/Optimizer/zend_ssa.c +++ b/Zend/Optimizer/zend_ssa.c @@ -527,7 +527,7 @@ static void place_essa_pis( (opline-1)->op2_type == IS_CONST) { int var = EX_VAR_TO_NUM((opline-1)->op1.var); zend_string *lcname = Z_STR_P(CRT_CONSTANT_EX(op_array, (opline-1), (opline-1)->op2) + 1); - zend_class_entry *ce = zend_optimizer_get_class_entry(script, lcname); + zend_class_entry *ce = zend_optimizer_get_class_entry(script, op_array, lcname); if (!ce) { continue; } From 206d80e11abd4d4483c40a2add0ac55a42087c81 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 25 Dec 2021 22:18:50 +0100 Subject: [PATCH 15/17] Reuse get_class_entry_from_op1() helper Export and reuse this helper in places that fetch a class entry from op1. --- Zend/Optimizer/escape_analysis.c | 42 +++++++++++------------- Zend/Optimizer/pass1.c | 19 ++--------- Zend/Optimizer/zend_inference.c | 4 +-- Zend/Optimizer/zend_optimizer.c | 8 ++--- Zend/Optimizer/zend_optimizer_internal.h | 2 ++ 5 files changed, 30 insertions(+), 45 deletions(-) diff --git a/Zend/Optimizer/escape_analysis.c b/Zend/Optimizer/escape_analysis.c index e66fc4f9e712c..b7c0a5ec4466a 100644 --- a/Zend/Optimizer/escape_analysis.c +++ b/Zend/Optimizer/escape_analysis.c @@ -156,23 +156,22 @@ static bool is_allocation_def(zend_op_array *op_array, zend_ssa *ssa, int def, i switch (opline->opcode) { case ZEND_INIT_ARRAY: return 1; - case ZEND_NEW: + case ZEND_NEW: { /* objects with destructors should escape */ - if (opline->op1_type == IS_CONST) { - zend_class_entry *ce = zend_optimizer_get_class_entry( - script, op_array, Z_STR_P(CRT_CONSTANT(opline->op1)+1)); - uint32_t forbidden_flags = - /* These flags will always cause an exception */ - ZEND_ACC_IMPLICIT_ABSTRACT_CLASS | ZEND_ACC_EXPLICIT_ABSTRACT_CLASS - | ZEND_ACC_INTERFACE | ZEND_ACC_TRAIT; - if (ce && !ce->parent && !ce->create_object && !ce->constructor && - !ce->destructor && !ce->__get && !ce->__set && - !(ce->ce_flags & forbidden_flags) && - (ce->ce_flags & ZEND_ACC_CONSTANTS_UPDATED)) { - return 1; - } + zend_class_entry *ce = zend_optimizer_get_class_entry_from_op1( + script, op_array, opline); + uint32_t forbidden_flags = + /* These flags will always cause an exception */ + ZEND_ACC_IMPLICIT_ABSTRACT_CLASS | ZEND_ACC_EXPLICIT_ABSTRACT_CLASS + | ZEND_ACC_INTERFACE | ZEND_ACC_TRAIT; + if (ce && !ce->parent && !ce->create_object && !ce->constructor && + !ce->destructor && !ce->__get && !ce->__set && + !(ce->ce_flags & forbidden_flags) && + (ce->ce_flags & ZEND_ACC_CONSTANTS_UPDATED)) { + return 1; } break; + } case ZEND_QM_ASSIGN: if (opline->op1_type == IS_CONST && Z_TYPE_P(CRT_CONSTANT(opline->op1)) == IS_ARRAY) { @@ -224,17 +223,16 @@ static bool is_local_def(zend_op_array *op_array, zend_ssa *ssa, int def, int va case ZEND_QM_ASSIGN: case ZEND_ASSIGN: return 1; - case ZEND_NEW: + case ZEND_NEW: { /* objects with destructors should escape */ - if (opline->op1_type == IS_CONST) { - zend_class_entry *ce = zend_optimizer_get_class_entry( - script, op_array, Z_STR_P(CRT_CONSTANT(opline->op1)+1)); - if (ce && !ce->create_object && !ce->constructor && - !ce->destructor && !ce->__get && !ce->__set && !ce->parent) { - return 1; - } + zend_class_entry *ce = zend_optimizer_get_class_entry_from_op1( + script, op_array, opline); + if (ce && !ce->create_object && !ce->constructor && + !ce->destructor && !ce->__get && !ce->__set && !ce->parent) { + return 1; } break; + } } } else if (op->op1_def == var) { switch (opline->opcode) { diff --git a/Zend/Optimizer/pass1.c b/Zend/Optimizer/pass1.c index 1aad01cb2f1a9..7cc7ae912a795 100644 --- a/Zend/Optimizer/pass1.c +++ b/Zend/Optimizer/pass1.c @@ -174,23 +174,8 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if (opline->op2_type == IS_CONST && Z_TYPE(ZEND_OP2_LITERAL(opline)) == IS_STRING) { - zend_class_entry *ce = NULL; - - if (opline->op1_type == IS_CONST && - Z_TYPE(ZEND_OP1_LITERAL(opline)) == IS_STRING) { - /* for A::B */ - ce = zend_optimizer_get_class_entry( - ctx->script, op_array, Z_STR(op_array->literals[opline->op1.constant + 1])); - if (!ce) { - break; - } - } else if (op_array->scope && - opline->op1_type == IS_UNUSED && - (opline->op1.num & ZEND_FETCH_CLASS_MASK) == ZEND_FETCH_CLASS_SELF) { - /* for self::B */ - ce = op_array->scope; - } - + zend_class_entry *ce = zend_optimizer_get_class_entry_from_op1( + ctx->script, op_array, opline); if (ce) { zend_class_constant *cc; zval *c, t; diff --git a/Zend/Optimizer/zend_inference.c b/Zend/Optimizer/zend_inference.c index c7df495c0ae09..80b01a6db26ac 100644 --- a/Zend/Optimizer/zend_inference.c +++ b/Zend/Optimizer/zend_inference.c @@ -3027,8 +3027,8 @@ static zend_always_inline zend_result _zend_update_type_info( break; case ZEND_NEW: tmp = MAY_BE_RC1|MAY_BE_RCN|MAY_BE_OBJECT; - if (opline->op1_type == IS_CONST && - (ce = zend_optimizer_get_class_entry(script, op_array, Z_STR_P(CRT_CONSTANT(opline->op1)+1))) != NULL) { + ce = zend_optimizer_get_class_entry_from_op1(script, op_array, opline); + if (ce) { UPDATE_SSA_OBJ_TYPE(ce, 0, ssa_op->result_def); } else if ((t1 & MAY_BE_CLASS) && ssa_op->op1_use >= 0 && ssa_var_info[ssa_op->op1_use].ce) { UPDATE_SSA_OBJ_TYPE(ssa_var_info[ssa_op->op1_use].ce, ssa_var_info[ssa_op->op1_use].is_instanceof, ssa_op->result_def); diff --git a/Zend/Optimizer/zend_optimizer.c b/Zend/Optimizer/zend_optimizer.c index 1abeeefeacaaf..f8aee5148cbeb 100644 --- a/Zend/Optimizer/zend_optimizer.c +++ b/Zend/Optimizer/zend_optimizer.c @@ -713,8 +713,8 @@ zend_class_entry *zend_optimizer_get_class_entry( return NULL; } -static zend_class_entry *get_class_entry_from_op1( - zend_script *script, zend_op_array *op_array, zend_op *opline) { +zend_class_entry *zend_optimizer_get_class_entry_from_op1( + const zend_script *script, const zend_op_array *op_array, const zend_op *opline) { if (opline->op1_type == IS_CONST) { zval *op1 = CRT_CONSTANT(opline->op1); if (Z_TYPE_P(op1) == IS_STRING) { @@ -770,7 +770,7 @@ zend_function *zend_optimizer_get_called_func( break; case ZEND_INIT_STATIC_METHOD_CALL: if (opline->op2_type == IS_CONST && Z_TYPE_P(CRT_CONSTANT(opline->op2)) == IS_STRING) { - zend_class_entry *ce = get_class_entry_from_op1( + zend_class_entry *ce = zend_optimizer_get_class_entry_from_op1( script, op_array, opline); if (ce) { zend_string *func_name = Z_STR_P(CRT_CONSTANT(opline->op2) + 1); @@ -812,7 +812,7 @@ zend_function *zend_optimizer_get_called_func( break; case ZEND_NEW: { - zend_class_entry *ce = get_class_entry_from_op1( + zend_class_entry *ce = zend_optimizer_get_class_entry_from_op1( script, op_array, opline); if (ce && ce->type == ZEND_USER_CLASS) { return ce->constructor; diff --git a/Zend/Optimizer/zend_optimizer_internal.h b/Zend/Optimizer/zend_optimizer_internal.h index 550b911052a6b..0116e1506000d 100644 --- a/Zend/Optimizer/zend_optimizer_internal.h +++ b/Zend/Optimizer/zend_optimizer_internal.h @@ -98,6 +98,8 @@ bool zend_optimizer_replace_by_const(zend_op_array *op_array, zend_op *zend_optimizer_get_loop_var_def(const zend_op_array *op_array, zend_op *free_opline); zend_class_entry *zend_optimizer_get_class_entry( const zend_script *script, const zend_op_array *op_array, zend_string *lcname); +zend_class_entry *zend_optimizer_get_class_entry_from_op1( + const zend_script *script, const zend_op_array *op_array, const zend_op *opline); void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx); void zend_optimizer_pass3(zend_op_array *op_array, zend_optimizer_ctx *ctx); From 924e875651ca77d6d3a07f32ed26fc9b70df0b01 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sun, 26 Dec 2021 09:48:06 +0100 Subject: [PATCH 16/17] Minor code cleanup in pass1 Move literal destruction into helper and use a common result variable to make code more compact. --- Zend/Optimizer/pass1.c | 124 +++++++++++++++-------------------------- 1 file changed, 45 insertions(+), 79 deletions(-) diff --git a/Zend/Optimizer/pass1.c b/Zend/Optimizer/pass1.c index 7cc7ae912a795..4477a0270a393 100644 --- a/Zend/Optimizer/pass1.c +++ b/Zend/Optimizer/pass1.c @@ -36,6 +36,12 @@ #include "zend_vm.h" static void replace_by_const_or_qm_assign(zend_op_array *op_array, zend_op *opline, zval *result) { + if (opline->op1_type == IS_CONST) { + literal_dtor(&ZEND_OP1_LITERAL(opline)); + } + if (opline->op2_type == IS_CONST) { + literal_dtor(&ZEND_OP2_LITERAL(opline)); + } if (zend_optimizer_replace_by_const(op_array, opline + 1, opline->result_type, opline->result.var, result)) { MAKE_NOP(opline); } else { @@ -52,6 +58,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) zend_op *end = opline + op_array->last; bool collect_constants = (ZEND_OPTIMIZER_PASS_15 & ctx->optimization_level)? (op_array == &ctx->script->main_op_array) : 0; + zval result; while (opline < end) { switch (opline->opcode) { @@ -85,16 +92,9 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) case ZEND_SPACESHIP: case ZEND_CASE: case ZEND_CASE_STRICT: - if (opline->op1_type == IS_CONST && - opline->op2_type == IS_CONST) { - /* binary operation with constant operands */ - zval result; - - if (zend_optimizer_eval_binary_op(&result, opline->opcode, &ZEND_OP1_LITERAL(opline), &ZEND_OP2_LITERAL(opline)) == SUCCESS) { - literal_dtor(&ZEND_OP1_LITERAL(opline)); - literal_dtor(&ZEND_OP2_LITERAL(opline)); - replace_by_const_or_qm_assign(op_array, opline, &result); - } + if (opline->op1_type == IS_CONST && opline->op2_type == IS_CONST && + zend_optimizer_eval_binary_op(&result, opline->opcode, &ZEND_OP1_LITERAL(opline), &ZEND_OP2_LITERAL(opline)) == SUCCESS) { + replace_by_const_or_qm_assign(op_array, opline, &result); } break; @@ -106,28 +106,17 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) break; case ZEND_CAST: - if (opline->op1_type == IS_CONST) { - /* cast of constant operand */ - zval result; - - if (zend_optimizer_eval_cast(&result, opline->extended_value, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { - literal_dtor(&ZEND_OP1_LITERAL(opline)); - replace_by_const_or_qm_assign(op_array, opline, &result); - break; - } + if (opline->op1_type == IS_CONST && + zend_optimizer_eval_cast(&result, opline->extended_value, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { + replace_by_const_or_qm_assign(op_array, opline, &result); } break; case ZEND_BW_NOT: case ZEND_BOOL_NOT: - if (opline->op1_type == IS_CONST) { - /* unary operation on constant operand */ - zval result; - - if (zend_optimizer_eval_unary_op(&result, opline->opcode, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { - literal_dtor(&ZEND_OP1_LITERAL(opline)); - replace_by_const_or_qm_assign(op_array, opline, &result); - } + if (opline->op1_type == IS_CONST && + zend_optimizer_eval_unary_op(&result, opline->opcode, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { + replace_by_const_or_qm_assign(op_array, opline, &result); } break; @@ -155,18 +144,15 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if (opline->op2_type == IS_CONST && Z_TYPE(ZEND_OP2_LITERAL(opline)) == IS_STRING) { /* substitute persistent constants */ - zval c; - - if (!zend_optimizer_get_persistent_constant(Z_STR(ZEND_OP2_LITERAL(opline)), &c, 1)) { - if (!ctx->constants || !zend_optimizer_get_collected_constant(ctx->constants, &ZEND_OP2_LITERAL(opline), &c)) { + if (!zend_optimizer_get_persistent_constant(Z_STR(ZEND_OP2_LITERAL(opline)), &result, 1)) { + if (!ctx->constants || !zend_optimizer_get_collected_constant(ctx->constants, &ZEND_OP2_LITERAL(opline), &result)) { break; } } - if (Z_TYPE(c) == IS_CONSTANT_AST) { + if (Z_TYPE(result) == IS_CONSTANT_AST) { break; } - literal_dtor(&ZEND_OP2_LITERAL(opline)); - replace_by_const_or_qm_assign(op_array, opline, &c); + replace_by_const_or_qm_assign(op_array, opline, &result); } break; @@ -177,29 +163,22 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) zend_class_entry *ce = zend_optimizer_get_class_entry_from_op1( ctx->script, op_array, opline); if (ce) { - zend_class_constant *cc; - zval *c, t; - - if ((cc = zend_hash_find_ptr(&ce->constants_table, - Z_STR(ZEND_OP2_LITERAL(opline)))) != NULL && - (ZEND_CLASS_CONST_FLAGS(cc) & ZEND_ACC_PPP_MASK) == ZEND_ACC_PUBLIC) { - c = &cc->value; + zend_class_constant *cc = zend_hash_find_ptr( + &ce->constants_table, Z_STR(ZEND_OP2_LITERAL(opline))); + if (cc && (ZEND_CLASS_CONST_FLAGS(cc) & ZEND_ACC_PPP_MASK) == ZEND_ACC_PUBLIC) { + zval *c = &cc->value; if (Z_TYPE_P(c) == IS_CONSTANT_AST) { zend_ast *ast = Z_ASTVAL_P(c); if (ast->kind != ZEND_AST_CONSTANT - || !zend_optimizer_get_persistent_constant(zend_ast_get_constant_name(ast), &t, 1) - || Z_TYPE(t) == IS_CONSTANT_AST) { + || !zend_optimizer_get_persistent_constant(zend_ast_get_constant_name(ast), &result, 1) + || Z_TYPE(result) == IS_CONSTANT_AST) { break; } } else { - ZVAL_COPY_OR_DUP(&t, c); + ZVAL_COPY_OR_DUP(&result, c); } - if (opline->op1_type == IS_CONST) { - literal_dtor(&ZEND_OP1_LITERAL(opline)); - } - literal_dtor(&ZEND_OP2_LITERAL(opline)); - replace_by_const_or_qm_assign(op_array, opline, &t); + replace_by_const_or_qm_assign(op_array, opline, &result); } } } @@ -294,18 +273,16 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) && func->module->handle == NULL #endif ) { - zval t; - ZVAL_TRUE(&t); + ZVAL_TRUE(&result); literal_dtor(&ZEND_OP2_LITERAL(init_opline)); MAKE_NOP(init_opline); literal_dtor(&ZEND_OP1_LITERAL(send1_opline)); MAKE_NOP(send1_opline); - replace_by_const_or_qm_assign(op_array, opline, &t); + replace_by_const_or_qm_assign(op_array, opline, &result); } zend_string_release_ex(lc_name, 0); break; } else if (zend_string_equals_literal(Z_STR(ZEND_OP2_LITERAL(init_opline)), "extension_loaded")) { - zval t; zend_string *lc_name = zend_string_tolower( Z_STR(ZEND_OP1_LITERAL(send1_opline))); zend_module_entry *m = zend_hash_find_ptr(&module_registry, @@ -316,7 +293,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) if (PG(enable_dl)) { break; } else { - ZVAL_FALSE(&t); + ZVAL_FALSE(&result); } } else { if (m->type == MODULE_PERSISTENT @@ -324,7 +301,7 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) && m->handle == NULL #endif ) { - ZVAL_TRUE(&t); + ZVAL_TRUE(&result); } else { break; } @@ -334,17 +311,15 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) MAKE_NOP(init_opline); literal_dtor(&ZEND_OP1_LITERAL(send1_opline)); MAKE_NOP(send1_opline); - replace_by_const_or_qm_assign(op_array, opline, &t); + replace_by_const_or_qm_assign(op_array, opline, &result); break; } else if (zend_string_equals_literal(Z_STR(ZEND_OP2_LITERAL(init_opline)), "constant")) { - zval t; - - if (zend_optimizer_get_persistent_constant(Z_STR(ZEND_OP1_LITERAL(send1_opline)), &t, 1)) { + if (zend_optimizer_get_persistent_constant(Z_STR(ZEND_OP1_LITERAL(send1_opline)), &result, 1)) { literal_dtor(&ZEND_OP2_LITERAL(init_opline)); MAKE_NOP(init_opline); literal_dtor(&ZEND_OP1_LITERAL(send1_opline)); MAKE_NOP(send1_opline); - replace_by_const_or_qm_assign(op_array, opline, &t); + replace_by_const_or_qm_assign(op_array, opline, &result); } break; /* dirname(IS_CONST/IS_STRING) -> IS_CONST/IS_STRING */ @@ -353,14 +328,12 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) zend_string *dirname = zend_string_init(Z_STRVAL(ZEND_OP1_LITERAL(send1_opline)), Z_STRLEN(ZEND_OP1_LITERAL(send1_opline)), 0); ZSTR_LEN(dirname) = zend_dirname(ZSTR_VAL(dirname), ZSTR_LEN(dirname)); if (IS_ABSOLUTE_PATH(ZSTR_VAL(dirname), ZSTR_LEN(dirname))) { - zval t; - - ZVAL_STR(&t, dirname); + ZVAL_STR(&result, dirname); literal_dtor(&ZEND_OP2_LITERAL(init_opline)); MAKE_NOP(init_opline); literal_dtor(&ZEND_OP1_LITERAL(send1_opline)); MAKE_NOP(send1_opline); - replace_by_const_or_qm_assign(op_array, opline, &t); + replace_by_const_or_qm_assign(op_array, opline, &result); } else { zend_string_release_ex(dirname, 0); } @@ -372,25 +345,18 @@ void zend_optimizer_pass1(zend_op_array *op_array, zend_optimizer_ctx *ctx) break; } case ZEND_STRLEN: - if (opline->op1_type == IS_CONST) { - zval t; - - if (zend_optimizer_eval_strlen(&t, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { - literal_dtor(&ZEND_OP1_LITERAL(opline)); - replace_by_const_or_qm_assign(op_array, opline, &t); - } + if (opline->op1_type == IS_CONST && + zend_optimizer_eval_strlen(&result, &ZEND_OP1_LITERAL(opline)) == SUCCESS) { + replace_by_const_or_qm_assign(op_array, opline, &result); } break; case ZEND_DEFINED: - { - zval c; - if (!zend_optimizer_get_persistent_constant(Z_STR(ZEND_OP1_LITERAL(opline)), &c, 0)) { - break; - } - ZVAL_TRUE(&c); - literal_dtor(&ZEND_OP1_LITERAL(opline)); - replace_by_const_or_qm_assign(op_array, opline, &c); + if (!zend_optimizer_get_persistent_constant(Z_STR(ZEND_OP1_LITERAL(opline)), &result, 0)) { + break; } + ZVAL_TRUE(&result); + literal_dtor(&ZEND_OP1_LITERAL(opline)); + replace_by_const_or_qm_assign(op_array, opline, &result); break; case ZEND_DECLARE_CONST: if (collect_constants && From c4334fc616e65962bf2f4253d4a6c14ccaa48981 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sun, 26 Dec 2021 09:54:25 +0100 Subject: [PATCH 17/17] Remove special chr/count handling in sccp function evaluation These can be handled by the generic code. Worth noting that count will usually go through ZEND_COUNT, and chr on constants is evaluated in the compiler, so these are not particularly compile-time sensitive either. --- Zend/Optimizer/sccp.c | 18 +----------------- ext/standard/basic_functions.stub.php | 1 + ext/standard/basic_functions_arginfo.h | 4 ++-- 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/Zend/Optimizer/sccp.c b/Zend/Optimizer/sccp.c index 5f3b7639ccdc7..df323b9000075 100644 --- a/Zend/Optimizer/sccp.c +++ b/Zend/Optimizer/sccp.c @@ -787,23 +787,7 @@ static inline zend_result ct_eval_func_call( if (num_args == 1) { /* Handle a few functions for which we manually implement evaluation here. */ - if (zend_string_equals_literal(name, "chr")) { - zend_long c; - if (Z_TYPE_P(args[0]) != IS_LONG) { - return FAILURE; - } - - c = Z_LVAL_P(args[0]) & 0xff; - ZVAL_CHAR(result, c); - return SUCCESS; - } else if (zend_string_equals_literal(name, "count")) { - if (Z_TYPE_P(args[0]) != IS_ARRAY) { - return FAILURE; - } - - ZVAL_LONG(result, zend_hash_num_elements(Z_ARRVAL_P(args[0]))); - return SUCCESS; - } else if (zend_string_equals_literal(name, "ini_get")) { + if (zend_string_equals_literal(name, "ini_get")) { zend_ini_entry *ini_entry; if (Z_TYPE_P(args[0]) != IS_STRING) { diff --git a/ext/standard/basic_functions.stub.php b/ext/standard/basic_functions.stub.php index e5483cf23b644..5a6e611e8778a 100755 --- a/ext/standard/basic_functions.stub.php +++ b/ext/standard/basic_functions.stub.php @@ -82,6 +82,7 @@ function krsort(array &$array, int $flags = SORT_REGULAR): bool {} /** @return true */ function ksort(array &$array, int $flags = SORT_REGULAR): bool {} +/** @compile-time-eval */ function count(Countable|array $value, int $mode = COUNT_NORMAL): int {} /** @alias count */ diff --git a/ext/standard/basic_functions_arginfo.h b/ext/standard/basic_functions_arginfo.h index 1f37885813c09..850c3f7a3051a 100644 --- a/ext/standard/basic_functions_arginfo.h +++ b/ext/standard/basic_functions_arginfo.h @@ -1,5 +1,5 @@ /* This is a generated file, edit the .stub.php file instead. - * Stub hash: cbba5dd593bba640750378c7d668b9e4ea6c979d */ + * Stub hash: ed5328c35c17c591847feba6cb00f5badb2b446c */ ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_set_time_limit, 0, 1, _IS_BOOL, 0) ZEND_ARG_TYPE_INFO(0, seconds, IS_LONG, 0) @@ -2870,7 +2870,7 @@ static const zend_function_entry ext_functions[] = { ZEND_FE(array_push, arginfo_array_push) ZEND_FE(krsort, arginfo_krsort) ZEND_FE(ksort, arginfo_ksort) - ZEND_FE(count, arginfo_count) + ZEND_SUPPORTS_COMPILE_TIME_EVAL_FE(count, arginfo_count) ZEND_FALIAS(sizeof, count, arginfo_sizeof) ZEND_FE(natsort, arginfo_natsort) ZEND_FE(natcasesort, arginfo_natcasesort)