clang warns several times along the lines of:
lib/zstd/compress.c:1043:7: warning: bitwise and of boolean expressions; did you mean logical and? [-Wbool-operation-and]
if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
&&
Bitwise ANDs do not short circuit, meaning that the ZSTD_read32 calls
will be evaluated even if the first condition is not true. This is not
always a problem but it is not a standard way to do conditionals so
replace the bitwise ANDs with logical ones to fix the warning and make
the code clearer.
Signed-off-by: Nathan Chancellor <[email protected]>
---
lib/zstd/compress.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
index b080264ed3ad..0e515d1d4237 100644
--- a/lib/zstd/compress.c
+++ b/lib/zstd/compress.c
@@ -1040,7 +1040,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t sr
const BYTE *match = base + matchIndex;
hashTable[h] = curr; /* update hash table */
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
+ if ((offset_1 > 0) && (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
ip++;
ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
@@ -1072,7 +1072,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t sr
hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
/* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
+ while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
/* store sequence */
size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
{
@@ -1291,7 +1291,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, siz
const BYTE *match = base + matchIndexS;
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
+ if ((offset_1 > 0) && (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
ip++;
ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
@@ -1350,7 +1350,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, siz
hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
/* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
+ while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
/* store sequence */
size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
{
@@ -1929,7 +1929,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
const BYTE *start = ip + 1;
/* check repCode */
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
+ if ((offset_1 > 0) && (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
/* repcode : we take it */
matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
if (depth == 0)
@@ -1953,7 +1953,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
if (depth >= 1)
while (ip < ilimit) {
ip++;
- if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
+ if ((offset) && ((offset_1 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
int const gain2 = (int)(mlRep * 3);
int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
@@ -1974,7 +1974,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
/* let's find an even better one */
if ((depth == 2) && (ip < ilimit)) {
ip++;
- if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
+ if ((offset) && ((offset_1 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
int const gain2 = (int)(ml2 * 4);
int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
@@ -2021,7 +2021,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
}
/* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
+ while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
/* store sequence */
matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
offset = offset_2;
base-commit: ba31f97d43be41ca99ab72a6131d7c226306865f
--
2.33.0.rc2
You should CC Nick Terell for ZSTD patches, added.
On Sat, Aug 14, 2021 at 05:41:54PM -0700, Nathan Chancellor wrote:
> clang warns several times along the lines of:
>
> lib/zstd/compress.c:1043:7: warning: bitwise and of boolean expressions; did you mean logical and? [-Wbool-operation-and]
> if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
> ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> &&
>
> Bitwise ANDs do not short circuit, meaning that the ZSTD_read32 calls
> will be evaluated even if the first condition is not true. This is not
> always a problem but it is not a standard way to do conditionals so
> replace the bitwise ANDs with logical ones to fix the warning and make
> the code clearer.
>
> Signed-off-by: Nathan Chancellor <[email protected]>
> ---
> lib/zstd/compress.c | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
> index b080264ed3ad..0e515d1d4237 100644
> --- a/lib/zstd/compress.c
> +++ b/lib/zstd/compress.c
> @@ -1040,7 +1040,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t sr
> const BYTE *match = base + matchIndex;
> hashTable[h] = curr; /* update hash table */
>
> - if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
> + if ((offset_1 > 0) && (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
> mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
> ip++;
> ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
> @@ -1072,7 +1072,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t sr
> hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
> hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
> /* check immediate repcode */
> - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
> + while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
> /* store sequence */
> size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
> {
> @@ -1291,7 +1291,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, siz
> const BYTE *match = base + matchIndexS;
> hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
>
> - if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
> + if ((offset_1 > 0) && (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
> mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
> ip++;
> ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
> @@ -1350,7 +1350,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, siz
> hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
>
> /* check immediate repcode */
> - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
> + while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
> /* store sequence */
> size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
> {
> @@ -1929,7 +1929,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
> const BYTE *start = ip + 1;
>
> /* check repCode */
> - if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
> + if ((offset_1 > 0) && (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
> /* repcode : we take it */
> matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
> if (depth == 0)
> @@ -1953,7 +1953,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
> if (depth >= 1)
> while (ip < ilimit) {
> ip++;
> - if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
> + if ((offset) && ((offset_1 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
> size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
> int const gain2 = (int)(mlRep * 3);
> int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
> @@ -1974,7 +1974,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
> /* let's find an even better one */
> if ((depth == 2) && (ip < ilimit)) {
> ip++;
> - if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
> + if ((offset) && ((offset_1 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
> size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
> int const gain2 = (int)(ml2 * 4);
> int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
> @@ -2021,7 +2021,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
> }
>
> /* check immediate repcode */
> - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
> + while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
> /* store sequence */
> matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
> offset = offset_2;
>
> base-commit: ba31f97d43be41ca99ab72a6131d7c226306865f
> --
> 2.33.0.rc2
On 8/16/2021 8:14 AM, David Sterba wrote:
> You should CC Nick Terell for ZSTD patches, added.
Thanks for the info and adding him, I did not see an entry for lib/zstd/
in MAINTAINERS and there is no consistent person picking up patches
according to git.
Cheers,
Nathan
> On Sat, Aug 14, 2021 at 05:41:54PM -0700, Nathan Chancellor wrote:
>> clang warns several times along the lines of:
>>
>> lib/zstd/compress.c:1043:7: warning: bitwise and of boolean expressions; did you mean logical and? [-Wbool-operation-and]
>> if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
>> ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>> &&
>>
>> Bitwise ANDs do not short circuit, meaning that the ZSTD_read32 calls
>> will be evaluated even if the first condition is not true. This is not
>> always a problem but it is not a standard way to do conditionals so
>> replace the bitwise ANDs with logical ones to fix the warning and make
>> the code clearer.
>>
>> Signed-off-by: Nathan Chancellor <[email protected]>
>> ---
>> lib/zstd/compress.c | 16 ++++++++--------
>> 1 file changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
>> index b080264ed3ad..0e515d1d4237 100644
>> --- a/lib/zstd/compress.c
>> +++ b/lib/zstd/compress.c
>> @@ -1040,7 +1040,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t sr
>> const BYTE *match = base + matchIndex;
>> hashTable[h] = curr; /* update hash table */
>>
>> - if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
>> + if ((offset_1 > 0) && (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
>> mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
>> ip++;
>> ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
>> @@ -1072,7 +1072,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t sr
>> hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
>> hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
>> /* check immediate repcode */
>> - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
>> + while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
>> /* store sequence */
>> size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
>> {
>> @@ -1291,7 +1291,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, siz
>> const BYTE *match = base + matchIndexS;
>> hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
>>
>> - if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
>> + if ((offset_1 > 0) && (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
>> mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
>> ip++;
>> ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
>> @@ -1350,7 +1350,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, siz
>> hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
>>
>> /* check immediate repcode */
>> - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
>> + while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
>> /* store sequence */
>> size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
>> {
>> @@ -1929,7 +1929,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
>> const BYTE *start = ip + 1;
>>
>> /* check repCode */
>> - if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
>> + if ((offset_1 > 0) && (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
>> /* repcode : we take it */
>> matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
>> if (depth == 0)
>> @@ -1953,7 +1953,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
>> if (depth >= 1)
>> while (ip < ilimit) {
>> ip++;
>> - if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
>> + if ((offset) && ((offset_1 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
>> size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
>> int const gain2 = (int)(mlRep * 3);
>> int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
>> @@ -1974,7 +1974,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
>> /* let's find an even better one */
>> if ((depth == 2) && (ip < ilimit)) {
>> ip++;
>> - if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
>> + if ((offset) && ((offset_1 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
>> size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
>> int const gain2 = (int)(ml2 * 4);
>> int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
>> @@ -2021,7 +2021,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t src
>> }
>>
>> /* check immediate repcode */
>> - while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
>> + while ((ip <= ilimit) && ((offset_2 > 0) && (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
>> /* store sequence */
>> matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
>> offset = offset_2;
>>
>> base-commit: ba31f97d43be41ca99ab72a6131d7c226306865f
>> --
>> 2.33.0.rc2
On Mon, Aug 16, 2021 at 09:53:53AM -0700, Nathan Chancellor wrote:
> On 8/16/2021 8:14 AM, David Sterba wrote:
> > You should CC Nick Terell for ZSTD patches, added.
>
> Thanks for the info and adding him, I did not see an entry for lib/zstd/
> in MAINTAINERS and there is no consistent person picking up patches
> according to git.
It's in an intermediate state [1], so far the compression algorithms
have been under the crypto subsystem because it's part of the API, but
regarding ZSTD, it's a bigger beast IMHO deserving an independent git
and merge flow, so I just noticed.
[1] https://lwn.net/ml/linux-kernel/[email protected]/
On Sat, 14 Aug 2021 17:41:54 -0700 Nathan Chancellor <[email protected]> wrote:
> clang warns several times along the lines of:
>
> lib/zstd/compress.c:1043:7: warning: bitwise and of boolean expressions; did you mean logical and? [-Wbool-operation-and]
> if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
> ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> &&
>
> Bitwise ANDs do not short circuit, meaning that the ZSTD_read32 calls
> will be evaluated even if the first condition is not true. This is not
> always a problem but it is not a standard way to do conditionals so
> replace the bitwise ANDs with logical ones to fix the warning and make
> the code clearer.
>
> ...
>
> --- a/lib/zstd/compress.c
> +++ b/lib/zstd/compress.c
> @@ -1040,7 +1040,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t sr
> const BYTE *match = base + matchIndex;
> hashTable[h] = curr; /* update hash table */
>
> - if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
> + if ((offset_1 > 0) && (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
yeah, this is a late night party trick which is sometimes used to
attempt to speed things up by avoiding a branch. It is perhaps
beneficial if the LHS is almost always true. I guess.
I'd prefer that the code not do this - it's silly, looks wrong and I
bet it's unmeasurable.
But I think this code is supposed to be kept in sync with an
out-of-tree upstream version so this change might be problematic.
Dunno, let's see what Nick thinks.
From: Andrew Morton
> Sent: 17 August 2021 02:06
>
> On Sat, 14 Aug 2021 17:41:54 -0700 Nathan Chancellor <[email protected]> wrote:
>
> > clang warns several times along the lines of:
> >
> > lib/zstd/compress.c:1043:7: warning: bitwise and of boolean expressions; did you mean logical and?
> [-Wbool-operation-and]
> > if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
> > ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> > &&
> >
> > Bitwise ANDs do not short circuit, meaning that the ZSTD_read32 calls
> > will be evaluated even if the first condition is not true. This is not
> > always a problem but it is not a standard way to do conditionals so
> > replace the bitwise ANDs with logical ones to fix the warning and make
> > the code clearer.
> >
> > ...
> >
> > --- a/lib/zstd/compress.c
> > +++ b/lib/zstd/compress.c
> > @@ -1040,7 +1040,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t
> sr
> > const BYTE *match = base + matchIndex;
> > hashTable[h] = curr; /* update hash table */
> >
> > - if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
> > + if ((offset_1 > 0) && (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
>
> yeah, this is a late night party trick which is sometimes used to
> attempt to speed things up by avoiding a branch. It is perhaps
> beneficial if the LHS is almost always true. I guess.
>
> I'd prefer that the code not do this - it's silly, looks wrong and I
> bet it's unmeasurable.
>
> But I think this code is supposed to be kept in sync with an
> out-of-tree upstream version so this change might be problematic.
Except that in this case you want the short-circuit.
The RH condition is slow and always true when offset_1 is zero.
OTOH subtracting offset_1 makes the lines look odd (out of context).
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)