diff --git a/fix_remaining_conflicts.ps1 b/fix_remaining_conflicts.ps1 new file mode 100644 index 000000000000..d2928cad0577 --- /dev/null +++ b/fix_remaining_conflicts.ps1 @@ -0,0 +1,19 @@ +# 修复剩余的Git合并冲突 +$filePath = "hbase-common\src\main\java\org\apache\hadoop\hbase\CellComparatorImpl.java" +$content = Get-Content $filePath -Raw + +# 修复所有剩余的冲突模式 +$content = $content -replace '<<<<<<< HEAD\s*\n\s*private int compareBBKV\(final ByteBufferKeyValue left, final ByteBufferKeyValue right\) \{\s*\n=======\s*\n\s*private static int compareBBKV\(final ByteBufferKeyValue left, final ByteBufferKeyValue right\) \{\s*\n>>>>>>> rvv-optimization', 'private static int compareBBKV(final ByteBufferKeyValue left, final ByteBufferKeyValue right) {' + +$content = $content -replace '<<<<<<< HEAD\s*\n\s*private int compareKVVsBBKV\(final KeyValue left, final ByteBufferKeyValue right\) \{\s*\n=======\s*\n\s*private static int compareKVVsBBKV\(final KeyValue left, final ByteBufferKeyValue right\) \{\s*\n>>>>>>> rvv-optimization', 'private static int compareKVVsBBKV(final KeyValue left, final ByteBufferKeyValue right) {' + +# 修复方法调用冲突 +$content = $content -replace '<<<<<<< HEAD\s*\n\s*diff = ByteBufferUtils\.compareTo\(left\.getRowByteBuffer\(\), left\.getRowPosition\(\), leftRowLength,\s*\n\s*right\.getRowByteBuffer\(\), right\.getRowPosition\(\), rightRowLength\);\s*\n=======\s*\n\s*diff = ByteBufferUtils\.compareToRvv\(left\.getRowByteBuffer\(\), left\.getRowPosition\(\), leftRowLength,\s*\n\s*right\.getRowByteBuffer\(\), right\.getRowPosition\(\), rightRowLength\);\s*\n>>>>>>> rvv-optimization', 'diff = ByteBufferUtils.compareToRvv(left.getRowByteBuffer(), left.getRowPosition(), leftRowLength, right.getRowByteBuffer(), right.getRowPosition(), rightRowLength);' + +# 修复其他常见的冲突模式 +$content = $content -replace '<<<<<<< HEAD\s*\n\s*.*?\s*\n=======\s*\n\s*(.*?)\s*\n>>>>>>> rvv-optimization', '$1' + +# 保存文件 +Set-Content $filePath $content -Encoding UTF8 + +Write-Host "冲突修复完成!" diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java index 0e6a53ca7c47..ab51983098bb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java @@ -1,4 +1,4 @@ -/* +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,25 +22,34 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.util.RVVByteBufferUtils; /** - * Compare two HBase cells. Do not use this method comparing -ROOT- or - * hbase:meta cells. Cells from these tables need a specialized comparator, one that - * takes account of the special formatting of the row where we have commas to delimit table from - * regionname, from row. See KeyValue for how it has a special comparator to do hbase:meta cells and + * Compare two HBase cells. Do not use this method comparing -ROOT- + * or + * hbase:meta cells. Cells from these tables need a specialized + * comparator, one that + * takes account of the special formatting of the row where we have commas to + * delimit table from + * regionname, from row. See KeyValue for how it has a special comparator to do + * hbase:meta cells and * yet another for -ROOT-. *

- * While using this comparator for {{@link #compareRows(Cell, Cell)} et al, the hbase:meta cells - * format should be taken into consideration, for which the instance of this comparator should be + * While using this comparator for {{@link #compareRows(Cell, Cell)} et al, the + * hbase:meta cells + * format should be taken into consideration, for which the instance of this + * comparator should be * used. In all other cases the static APIs in this comparator would be enough *

- * HOT methods. We spend a good portion of CPU comparing. Anything that makes the compare faster - * will likely manifest at the macro level. + * HOT methods. We spend a good portion of CPU comparing. Anything that makes + * the compare faster + * will likely manifest at the macro level. See also {@link BBKVComparator}. Use + * it when mostly + * {@link ByteBufferKeyValue}s. *

*/ -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UNKNOWN", - justification = "Findbugs doesn't like the way we are negating the result of" - + " a compare in below") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UNKNOWN", justification = "Findbugs doesn't like the way we are negating the result of" + + " a compare in below") @InterfaceAudience.Private @InterfaceStability.Evolving public class CellComparatorImpl implements CellComparator { @@ -48,7 +57,8 @@ public class CellComparatorImpl implements CellComparator { private static final long serialVersionUID = 8186411895799094989L; /** - * Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion of + * Comparator for plain key/values; i.e. non-catalog table key/values. Works on + * Key portion of * KeyValue only. */ public static final CellComparatorImpl COMPARATOR = new CellComparatorImpl(); @@ -96,45 +106,43 @@ public int compare(final Cell l, final Cell r, boolean ignoreSequenceid) { return diff; } } - - if (ignoreSequenceid) { - return diff; - } - // Negate following comparisons so later edits show up first mvccVersion: later sorts first - return Long.compare(PrivateCellUtil.getSequenceId(r), PrivateCellUtil.getSequenceId(l)); + // Negate following comparisons so later edits show up first mvccVersion: later + // sorts first + return ignoreSequenceid ? diff : Long.compare(((KeyValue) r).getSequenceId(), ((KeyValue) l).getSequenceId()); } - private int compareKeyValues(final KeyValue left, final KeyValue right) { + private static int compareKeyValues(final KeyValue left, final KeyValue right) { int diff; // Compare Rows. Cache row length. int leftRowLength = left.getRowLength(); int rightRowLength = right.getRowLength(); diff = Bytes.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, - right.getRowArray(), right.getRowOffset(), rightRowLength); + right.getRowArray(), right.getRowOffset(), rightRowLength); if (diff != 0) { return diff; } - // If the column is not specified, the "minimum" key type appears as latest in the sorted - // order, regardless of the timestamp. This is used for specifying the last key/value in a - // given row, because there is no "lexicographically last column" (it would be infinitely + // If the column is not specified, the "minimum" key type appears as latest in + // the sorted + // order, regardless of the timestamp. This is used for specifying the last + // key/value in a + // given row, because there is no "lexicographically last column" (it would be + // infinitely // long). - // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // The "maximum" key type does not need this behavior. Copied from KeyValue. + // This is bad in // that // we can't do memcmp w/ special rules like this. // TODO: Is there a test for this behavior? int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = - left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); // No need of left row length below here. byte leftType = left.getTypeByte(leftKeyLength); - if ( - leftType == KeyValue.Type.Minimum.getCode() && leftFamilyLength + leftQualifierLength == 0 - ) { + if (leftType == KeyValue.Type.Minimum.getCode() && leftFamilyLength + leftQualifierLength == 0) { // left is "bigger", i.e. it appears later in the sorted order return 1; } @@ -142,32 +150,29 @@ private int compareKeyValues(final KeyValue left, final KeyValue right) { int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = - right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); // No need of right row length below here. byte rightType = right.getTypeByte(rightKeyLength); - if ( - rightType == KeyValue.Type.Minimum.getCode() && rightFamilyLength + rightQualifierLength == 0 - ) { + if (rightType == KeyValue.Type.Minimum.getCode() && rightFamilyLength + rightQualifierLength == 0) { return -1; } // Compare families. int leftFamilyPosition = left.getFamilyOffset(leftFamilyLengthPosition); int rightFamilyPosition = right.getFamilyOffset(rightFamilyLengthPosition); - diff = compareFamilies(left, leftFamilyPosition, leftFamilyLength, right, rightFamilyPosition, - rightFamilyLength); + diff = Bytes.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, + right.getFamilyArray(), rightFamilyPosition, rightFamilyLength); if (diff != 0) { return diff; } // Compare qualifiers diff = Bytes.compareTo(left.getQualifierArray(), - left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, - right.getQualifierArray(), right.getQualifierOffset(rightFamilyPosition, rightFamilyLength), - rightQualifierLength); + left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierArray(), right.getQualifierOffset(rightFamilyPosition, rightFamilyLength), + rightQualifierLength); if (diff != 0) { return diff; } @@ -187,37 +192,39 @@ private int compareKeyValues(final KeyValue left, final KeyValue right) { return (0xff & rightType) - (0xff & leftType); } - private int compareBBKV(final ByteBufferKeyValue left, final ByteBufferKeyValue right) { + private static int compareBBKV(final ByteBufferKeyValue left, final ByteBufferKeyValue right) { int diff; // Compare Rows. Cache row length. int leftRowLength = left.getRowLength(); int rightRowLength = right.getRowLength(); diff = ByteBufferUtils.compareTo(left.getRowByteBuffer(), left.getRowPosition(), leftRowLength, - right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + if (diff != 0) { return diff; } - // If the column is not specified, the "minimum" key type appears as latest in the sorted - // order, regardless of the timestamp. This is used for specifying the last key/value in a - // given row, because there is no "lexicographically last column" (it would be infinitely + // If the column is not specified, the "minimum" key type appears as latest in + // the sorted + // order, regardless of the timestamp. This is used for specifying the last + // key/value in a + // given row, because there is no "lexicographically last column" (it would be + // infinitely // long). - // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // The "maximum" key type does not need this behavior. Copied from KeyValue. + // This is bad in // that // we can't do memcmp w/ special rules like this. // TODO: Is there a test for this behavior? int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = - left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); // No need of left row length below here. byte leftType = left.getTypeByte(leftKeyLength); - if ( - leftType == KeyValue.Type.Minimum.getCode() && leftFamilyLength + leftQualifierLength == 0 - ) { + if (leftType == KeyValue.Type.Minimum.getCode() && leftFamilyLength + leftQualifierLength == 0) { // left is "bigger", i.e. it appears later in the sorted order return 1; } @@ -225,32 +232,29 @@ private int compareBBKV(final ByteBufferKeyValue left, final ByteBufferKeyValue int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = - right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); // No need of right row length below here. byte rightType = right.getTypeByte(rightKeyLength); - if ( - rightType == KeyValue.Type.Minimum.getCode() && rightFamilyLength + rightQualifierLength == 0 - ) { + if (rightType == KeyValue.Type.Minimum.getCode() && rightFamilyLength + rightQualifierLength == 0) { return -1; } // Compare families. int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); - diff = compareFamilies(left, leftFamilyPosition, leftFamilyLength, right, rightFamilyPosition, - rightFamilyLength); + diff = RVVByteBufferUtils.compareToRvv(left.getFamilyByteBuffer(), leftFamilyPosition, + leftFamilyLength, right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); if (diff != 0) { return diff; } // Compare qualifiers - diff = ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), - left.getQualifierPosition(leftFamilyPosition, leftFamilyLength), leftQualifierLength, - right.getQualifierByteBuffer(), - right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); + diff = RVVByteBufferUtils.compareToRvv(left.getQualifierByteBuffer(), + left.getQualifierPosition(leftFamilyPosition, leftFamilyLength), leftQualifierLength, + right.getQualifierByteBuffer(), + right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); if (diff != 0) { return diff; } @@ -269,37 +273,38 @@ private int compareBBKV(final ByteBufferKeyValue left, final ByteBufferKeyValue return (0xff & rightType) - (0xff & leftType); } - private int compareKVVsBBKV(final KeyValue left, final ByteBufferKeyValue right) { + private static int compareKVVsBBKV(final KeyValue left, final ByteBufferKeyValue right) { int diff; // Compare Rows. Cache row length. int leftRowLength = left.getRowLength(); int rightRowLength = right.getRowLength(); - diff = ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, - right.getRowByteBuffer(), right.getRowPosition(), rightRowLength); + diff = ByteBufferUtils.compareTo(right.getRowByteBuffer(), right.getRowPosition(), rightRowLength, + left.getRowArray(), left.getRowOffset(), leftRowLength); if (diff != 0) { return diff; } - // If the column is not specified, the "minimum" key type appears as latest in the sorted - // order, regardless of the timestamp. This is used for specifying the last key/value in a - // given row, because there is no "lexicographically last column" (it would be infinitely + // If the column is not specified, the "minimum" key type appears as latest in + // the sorted + // order, regardless of the timestamp. This is used for specifying the last + // key/value in a + // given row, because there is no "lexicographically last column" (it would be + // infinitely // long). - // The "maximum" key type does not need this behavior. Copied from KeyValue. This is bad in + // The "maximum" key type does not need this behavior. Copied from KeyValue. + // This is bad in // that // we can't do memcmp w/ special rules like this. // TODO: Is there a test for this behavior? int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); int leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = - left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); // No need of left row length below here. byte leftType = left.getTypeByte(leftKeyLength); - if ( - leftType == KeyValue.Type.Minimum.getCode() && leftFamilyLength + leftQualifierLength == 0 - ) { + if (leftType == KeyValue.Type.Minimum.getCode() && leftFamilyLength + leftQualifierLength == 0) { // left is "bigger", i.e. it appears later in the sorted order return 1; } @@ -307,32 +312,29 @@ private int compareKVVsBBKV(final KeyValue left, final ByteBufferKeyValue right) int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); int rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = - right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); // No need of right row length below here. byte rightType = right.getTypeByte(rightKeyLength); - if ( - rightType == KeyValue.Type.Minimum.getCode() && rightFamilyLength + rightQualifierLength == 0 - ) { + if (rightType == KeyValue.Type.Minimum.getCode() && rightFamilyLength + rightQualifierLength == 0) { return -1; } // Compare families. int leftFamilyPosition = left.getFamilyOffset(leftFamilyLengthPosition); int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); - diff = compareFamilies(left, leftFamilyPosition, leftFamilyLength, right, rightFamilyPosition, - rightFamilyLength); + diff = ByteBufferUtils.compareTo(right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength, + left.getFamilyArray(), leftFamilyPosition, leftFamilyLength); if (diff != 0) { return diff; } // Compare qualifiers - diff = ByteBufferUtils.compareTo(left.getQualifierArray(), - left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength, - right.getQualifierByteBuffer(), - right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength); + diff = ByteBufferUtils.compareTo(right.getQualifierByteBuffer(), + right.getQualifierPosition(rightFamilyPosition, rightFamilyLength), rightQualifierLength, + left.getQualifierArray(), + left.getQualifierOffset(leftFamilyPosition, leftFamilyLength), leftQualifierLength); if (diff != 0) { return diff; } @@ -353,7 +355,9 @@ private int compareKVVsBBKV(final KeyValue left, final ByteBufferKeyValue right) /** * Compares the family and qualifier part of the cell - * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise + * + * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 + * otherwise */ public final int compareColumns(final Cell left, final Cell right) { int diff = compareFamilies(left, right); @@ -364,7 +368,7 @@ public final int compareColumns(final Cell left, final Cell right) { } private int compareColumns(final Cell left, final int leftFamLen, final int leftQualLen, - final Cell right, final int rightFamLen, final int rightQualLen) { + final Cell right, final int rightFamLen, final int rightQualLen) { int diff = compareFamilies(left, leftFamLen, right, rightFamLen); if (diff != 0) { return diff; @@ -372,120 +376,96 @@ private int compareColumns(final Cell left, final int leftFamLen, final int left return compareQualifiers(left, leftQualLen, right, rightQualLen); } - /** - * This method will be overridden when we compare cells inner store to bypass family comparing. - */ - protected int compareFamilies(Cell left, int leftFamLen, Cell right, int rightFamLen) { + private int compareFamilies(Cell left, int leftFamLen, Cell right, int rightFamLen) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { - return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), - ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, - ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), - ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); + return RVVByteBufferUtils.compareToRvv(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); } if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), - ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, right.getFamilyArray(), - right.getFamilyOffset(), rightFamLen); + ((ByteBufferExtendedCell) left).getFamilyPosition(), leftFamLen, right.getFamilyArray(), + right.getFamilyOffset(), rightFamLen); } if (right instanceof ByteBufferExtendedCell) { - // Notice how we flip the order of the compare here. We used to negate the return value but + // Notice how we flip the order of the compare here. We used to negate the + // return value but // see what FindBugs says // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // It suggest flipping the order to get same effect and 'safer'. return ByteBufferUtils.compareTo(left.getFamilyArray(), left.getFamilyOffset(), leftFamLen, - ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), - ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), rightFamLen); } return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), leftFamLen, - right.getFamilyArray(), right.getFamilyOffset(), rightFamLen); + right.getFamilyArray(), right.getFamilyOffset(), rightFamLen); } private final int compareQualifiers(Cell left, int leftQualLen, Cell right, int rightQualLen) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { - return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, - ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); + return RVVByteBufferUtils.compareToRvv(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, + ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); } if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, - right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); + ((ByteBufferExtendedCell) left).getQualifierPosition(), leftQualLen, + right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); } if (right instanceof ByteBufferExtendedCell) { - // Notice how we flip the order of the compare here. We used to negate the return value but + // Notice how we flip the order of the compare here. We used to negate the + // return value but // see what FindBugs says // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // It suggest flipping the order to get same effect and 'safer'. - return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), - leftQualLen, ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen); + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), rightQualLen, + left.getQualifierArray(), left.getQualifierOffset(), leftQualLen); } return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), leftQualLen, - right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); + right.getQualifierArray(), right.getQualifierOffset(), rightQualLen); } /** * Compare the families of left and right cell - * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise + * + * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 + * otherwise */ @Override public final int compareFamilies(Cell left, Cell right) { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { - return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), - ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), - ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), - ((ByteBufferExtendedCell) right).getFamilyPosition(), right.getFamilyLength()); + return RVVByteBufferUtils.compareToRvv(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), + ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), right.getFamilyLength()); } if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), - ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), - right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); + ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), + right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } if (right instanceof ByteBufferExtendedCell) { - // Notice how we flip the order of the compare here. We used to negate the return value but + // Notice how we flip the order of the compare here. We used to negate the + // return value but // see what FindBugs says // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // It suggest flipping the order to get same effect and 'safer'. - return ByteBufferUtils.compareTo(left.getFamilyArray(), left.getFamilyOffset(), - left.getFamilyLength(), ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), - ((ByteBufferExtendedCell) right).getFamilyPosition(), right.getFamilyLength()); + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) right).getFamilyByteBuffer(), + ((ByteBufferExtendedCell) right).getFamilyPosition(), right.getFamilyLength(), + left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength()); } return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), - right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); - } - - /** - * This method will be overridden when we compare cells inner store to bypass family comparing. - */ - protected int compareFamilies(KeyValue left, int leftFamilyPosition, int leftFamilyLength, - KeyValue right, int rightFamilyPosition, int rightFamilyLength) { - return Bytes.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, - right.getFamilyArray(), rightFamilyPosition, rightFamilyLength); - } - - /** - * This method will be overridden when we compare cells inner store to bypass family comparing. - */ - protected int compareFamilies(ByteBufferKeyValue left, int leftFamilyPosition, - int leftFamilyLength, ByteBufferKeyValue right, int rightFamilyPosition, - int rightFamilyLength) { - return ByteBufferUtils.compareTo(left.getFamilyByteBuffer(), leftFamilyPosition, - leftFamilyLength, right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); - } - - /** - * This method will be overridden when we compare cells inner store to bypass family comparing. - */ - protected int compareFamilies(KeyValue left, int leftFamilyPosition, int leftFamilyLength, - ByteBufferKeyValue right, int rightFamilyPosition, int rightFamilyLength) { - return ByteBufferUtils.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength, - right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength); + right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } static int compareQualifiers(KeyValue left, KeyValue right) { - // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not - // sharing gets us a few percent more throughput in compares. If changes here or there, make + // NOTE: Same method is in CellComparatorImpl, also private, not shared, + // intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or + // there, make // sure done in both places. // Compare Rows. Cache row length. int leftRowLength = left.getRowLength(); @@ -494,16 +474,14 @@ static int compareQualifiers(KeyValue left, KeyValue right) { int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = - left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); // No need of left row length below here. int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = - right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); // Compare families. int leftFamilyOffset = left.getFamilyOffset(leftFamilyLengthPosition); @@ -511,13 +489,15 @@ static int compareQualifiers(KeyValue left, KeyValue right) { // Compare qualifiers return Bytes.compareTo(left.getQualifierArray(), leftFamilyOffset + leftFamilyLength, - leftQualifierLength, right.getQualifierArray(), rightFamilyOffset + rightFamilyLength, - rightQualifierLength); + leftQualifierLength, right.getQualifierArray(), rightFamilyOffset + rightFamilyLength, + rightQualifierLength); } static int compareQualifiers(KeyValue left, ByteBufferKeyValue right) { - // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not - // sharing gets us a few percent more throughput in compares. If changes here or there, make + // NOTE: Same method is in CellComparatorImpl, also private, not shared, + // intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or + // there, make // sure done in both places. // Compare Rows. Cache row length. int leftRowLength = left.getRowLength(); @@ -526,30 +506,30 @@ static int compareQualifiers(KeyValue left, ByteBufferKeyValue right) { int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = - left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); // No need of left row length below here. int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = - right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); // Compare families. int leftFamilyOffset = left.getFamilyOffset(leftFamilyLengthPosition); int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); // Compare qualifiers - return ByteBufferUtils.compareTo(left.getQualifierArray(), leftFamilyOffset + leftFamilyLength, - leftQualifierLength, right.getQualifierByteBuffer(), rightFamilyPosition + rightFamilyLength, - rightQualifierLength); + return ByteBufferUtils.compareTo(right.getQualifierByteBuffer(), rightFamilyPosition + rightFamilyLength, + rightQualifierLength, left.getQualifierArray(), leftFamilyOffset + leftFamilyLength, + leftQualifierLength); } static int compareQualifiers(ByteBufferKeyValue left, KeyValue right) { - // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not - // sharing gets us a few percent more throughput in compares. If changes here or there, make + // NOTE: Same method is in CellComparatorImpl, also private, not shared, + // intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or + // there, make // sure done in both places. // Compare Rows. Cache row length. int leftRowLength = left.getRowLength(); @@ -558,16 +538,14 @@ static int compareQualifiers(ByteBufferKeyValue left, KeyValue right) { int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = - left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); // No need of left row length below here. int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = - right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); // Compare families. int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); @@ -575,13 +553,15 @@ static int compareQualifiers(ByteBufferKeyValue left, KeyValue right) { // Compare qualifiers return ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), - leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierArray(), - rightFamilyOffset + rightFamilyLength, rightQualifierLength); + leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierArray(), + rightFamilyOffset + rightFamilyLength, rightQualifierLength); } static int compareQualifiers(ByteBufferKeyValue left, ByteBufferKeyValue right) { - // NOTE: Same method is in CellComparatorImpl, also private, not shared, intentionally. Not - // sharing gets us a few percent more throughput in compares. If changes here or there, make + // NOTE: Same method is in CellComparatorImpl, also private, not shared, + // intentionally. Not + // sharing gets us a few percent more throughput in compares. If changes here or + // there, make // sure done in both places. // Compare Rows. Cache row length. int leftRowLength = left.getRowLength(); @@ -590,30 +570,30 @@ static int compareQualifiers(ByteBufferKeyValue left, ByteBufferKeyValue right) int leftFamilyLengthPosition = left.getFamilyLengthPosition(leftRowLength); byte leftFamilyLength = left.getFamilyLength(leftFamilyLengthPosition); int leftKeyLength = left.getKeyLength(); - int leftQualifierLength = - left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); + int leftQualifierLength = left.getQualifierLength(leftKeyLength, leftRowLength, leftFamilyLength); // No need of left row length below here. int rightFamilyLengthPosition = right.getFamilyLengthPosition(rightRowLength); byte rightFamilyLength = right.getFamilyLength(rightFamilyLengthPosition); int rightKeyLength = right.getKeyLength(); - int rightQualifierLength = - right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); + int rightQualifierLength = right.getQualifierLength(rightKeyLength, rightRowLength, rightFamilyLength); // Compare families. int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition); int rightFamilyPosition = right.getFamilyPosition(rightFamilyLengthPosition); // Compare qualifiers - return ByteBufferUtils.compareTo(left.getQualifierByteBuffer(), - leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierByteBuffer(), - rightFamilyPosition + rightFamilyLength, rightQualifierLength); + return RVVByteBufferUtils.compareToRvv(left.getQualifierByteBuffer(), + leftFamilyPosition + leftFamilyLength, leftQualifierLength, right.getQualifierByteBuffer(), + rightFamilyPosition + rightFamilyLength, rightQualifierLength); } /** * Compare the qualifiers part of the left and right cells. - * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise + * + * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 + * otherwise */ @Override public final int compareQualifiers(Cell left, Cell right) { @@ -627,37 +607,42 @@ public final int compareQualifiers(Cell left, Cell right) { return compareQualifiers((ByteBufferKeyValue) left, (KeyValue) right); } else { if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { - return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), - ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); + return RVVByteBufferUtils.compareToRvv(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), + ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); } if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), - right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); + ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), + right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); } if (right instanceof ByteBufferExtendedCell) { - // Notice how we flip the order of the compare here. We used to negate the return value but + // Notice how we flip the order of the compare here. We used to negate the + // return value but // see what FindBugs says // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // It suggest flipping the order to get same effect and 'safer'. - return ByteBufferUtils.compareTo(left.getQualifierArray(), left.getQualifierOffset(), - left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), - ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) right).getQualifierByteBuffer(), + ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength(), + left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength()); } return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), - left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), - right.getQualifierLength()); + left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), + right.getQualifierLength()); } } /** - * Compares the rows of the left and right cell. For the hbase:meta case this method is overridden - * such that it can handle hbase:meta cells. The caller should ensure using the appropriate + * Compares the rows of the left and right cell. For the hbase:meta case this + * method is overridden + * such that it can handle hbase:meta cells. The caller should ensure using the + * appropriate * comparator for hbase:meta. - * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise + * + * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 + * otherwise */ @Override public int compareRows(final Cell left, final Cell right) { @@ -670,46 +655,52 @@ static int compareRows(final Cell left, int leftRowLength, final Cell right, int return 0; } if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) { - return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getRowByteBuffer(), - ((ByteBufferExtendedCell) left).getRowPosition(), leftRowLength, - ((ByteBufferExtendedCell) right).getRowByteBuffer(), - ((ByteBufferExtendedCell) right).getRowPosition(), rightRowLength); + return RVVByteBufferUtils.compareToRvv(((ByteBufferExtendedCell) left).getRowByteBuffer(), + ((ByteBufferExtendedCell) left).getRowPosition(), leftRowLength, + ((ByteBufferExtendedCell) right).getRowByteBuffer(), + ((ByteBufferExtendedCell) right).getRowPosition(), rightRowLength); } if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getRowByteBuffer(), - ((ByteBufferExtendedCell) left).getRowPosition(), leftRowLength, right.getRowArray(), - right.getRowOffset(), rightRowLength); + ((ByteBufferExtendedCell) left).getRowPosition(), leftRowLength, right.getRowArray(), + right.getRowOffset(), rightRowLength); } if (right instanceof ByteBufferExtendedCell) { - // Notice how we flip the order of the compare here. We used to negate the return value but + // Notice how we flip the order of the compare here. We used to negate the + // return value but // see what FindBugs says // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // It suggest flipping the order to get same effect and 'safer'. - return ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, - ((ByteBufferExtendedCell) right).getRowByteBuffer(), - ((ByteBufferExtendedCell) right).getRowPosition(), rightRowLength); + return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) right).getRowByteBuffer(), + ((ByteBufferExtendedCell) right).getRowPosition(), rightRowLength, + left.getRowArray(), left.getRowOffset(), leftRowLength); } return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), leftRowLength, - right.getRowArray(), right.getRowOffset(), rightRowLength); + right.getRowArray(), right.getRowOffset(), rightRowLength); } /** - * Compares the row part of the cell with a simple plain byte[] like the stopRow in Scan. This + * Compares the row part of the cell with a simple plain byte[] like the stopRow + * in Scan. This * should be used with context where for hbase:meta cells the - * {{@link MetaCellComparator#META_COMPARATOR} should be used the cell to be compared the kv - * serialized byte[] to be compared with the offset in the byte[] the length in the byte[] - * @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger than byte[], -1 + * {{@link MetaCellComparator#META_COMPARATOR} should be used the cell to be + * compared the kv + * serialized byte[] to be compared with the offset in the byte[] the length in + * the byte[] + * + * @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger than + * byte[], -1 * otherwise */ @Override public int compareRows(Cell left, byte[] right, int roffset, int rlength) { if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getRowByteBuffer(), - ((ByteBufferExtendedCell) left).getRowPosition(), left.getRowLength(), right, roffset, - rlength); + ((ByteBufferExtendedCell) left).getRowPosition(), left.getRowLength(), right, roffset, + rlength); } return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right, - roffset, rlength); + roffset, rlength); } @Override @@ -719,18 +710,17 @@ public final int compareWithoutRow(final Cell left, final Cell right) { // for specifying the last key/value in a given row, because there is no // "lexicographically last column" (it would be infinitely long). The // "maximum" key type does not need this behavior. - // Copied from KeyValue. This is bad in that we can't do memcmp w/ special rules like this. + // Copied from KeyValue. This is bad in that we can't do memcmp w/ special rules + // like this. int lFamLength = left.getFamilyLength(); int rFamLength = right.getFamilyLength(); int lQualLength = left.getQualifierLength(); int rQualLength = right.getQualifierLength(); - byte leftType = PrivateCellUtil.getTypeByte(left); - byte rightType = PrivateCellUtil.getTypeByte(right); - if (lFamLength + lQualLength == 0 && leftType == KeyValue.Type.Minimum.getCode()) { + if (lFamLength + lQualLength == 0 && ((KeyValue) left).getTypeByte() == KeyValue.Type.Minimum.getCode()) { // left is "bigger", i.e. it appears later in the sorted order return 1; } - if (rFamLength + rQualLength == 0 && rightType == KeyValue.Type.Minimum.getCode()) { + if (rFamLength + rQualLength == 0 && ((KeyValue) right).getTypeByte() == KeyValue.Type.Minimum.getCode()) { return -1; } if (lFamLength != rFamLength) { @@ -752,7 +742,7 @@ public final int compareWithoutRow(final Cell left, final Cell right) { // of higher numbers sort before those of lesser numbers. Maximum (255) // appears ahead of everything, and minimum (0) appears after // everything. - return (0xff & rightType) - (0xff & leftType); + return (0xff & ((KeyValue) right).getTypeByte()) - (0xff & ((KeyValue) left).getTypeByte()); } @Override @@ -772,8 +762,10 @@ public Comparator getSimpleComparator() { } /** - * Utility method that makes a guess at comparator to use based off passed tableName. Use in + * Utility method that makes a guess at comparator to use based off passed + * tableName. Use in * extreme when no comparator specified. + * * @return CellComparator to use going off the {@code tableName} passed. */ public static CellComparator getCellComparator(TableName tableName) { @@ -781,14 +773,17 @@ public static CellComparator getCellComparator(TableName tableName) { } /** - * Utility method that makes a guess at comparator to use based off passed tableName. Use in + * Utility method that makes a guess at comparator to use based off passed + * tableName. Use in * extreme when no comparator specified. + * * @return CellComparator to use going off the {@code tableName} passed. */ public static CellComparator getCellComparator(byte[] tableName) { - // FYI, TableName.toBytes does not create an array; just returns existing array pointer. + // FYI, TableName.toBytes does not create an array; just returns existing array + // pointer. return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) - ? MetaCellComparator.META_COMPARATOR - : CellComparatorImpl.COMPARATOR; + ? MetaCellComparator.META_COMPARATOR + : CellComparatorImpl.COMPARATOR; } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java index 7f6e87ebf911..fe1a057f38a2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java @@ -22,9 +22,9 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Compare two HBase cells inner store, skip compare family for better performance. Important!!! we - * should not make fake cell with fake family which length greater than zero inner store, otherwise - * this optimization cannot be used. + * Compare two HBase cells inner store, skip compare family for better performance. Important!!! + * We should not make fake cell with fake family which length greater than zero inner store, + * otherwise this optimization cannot be used. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -33,32 +33,9 @@ public class InnerStoreCellComparator extends CellComparatorImpl { private static final long serialVersionUID = 8186411895799094989L; public static final InnerStoreCellComparator INNER_STORE_COMPARATOR = - new InnerStoreCellComparator(); + new InnerStoreCellComparator(); - @Override - protected int compareFamilies(Cell left, int leftFamilyLength, Cell right, - int rightFamilyLength) { - return leftFamilyLength - rightFamilyLength; - } - - @Override - protected int compareFamilies(KeyValue left, int leftFamilyPosition, int leftFamilyLength, - KeyValue right, int rightFamilyPosition, int rightFamilyLength) { - return leftFamilyLength - rightFamilyLength; - } - - @Override - protected int compareFamilies(ByteBufferKeyValue left, int leftFamilyPosition, - int leftFamilyLength, ByteBufferKeyValue right, int rightFamilyPosition, - int rightFamilyLength) { - return leftFamilyLength - rightFamilyLength; - } - - @Override - protected int compareFamilies(KeyValue left, int leftFamilyPosition, int leftFamilyLength, - ByteBufferKeyValue right, int rightFamilyPosition, int rightFamilyLength) { - return leftFamilyLength - rightFamilyLength; - } + // 不再重写 compareFamilies,由父类 CellComparatorImpl 处理 RVV 优化逻辑 /** * Utility method that makes a guess at comparator to use based off passed tableName. Use in @@ -76,7 +53,7 @@ public static CellComparator getInnerStoreCellComparator(TableName tableName) { */ public static CellComparator getInnerStoreCellComparator(byte[] tableName) { return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) - ? MetaCellComparator.META_COMPARATOR - : InnerStoreCellComparator.INNER_STORE_COMPARATOR; + ? MetaCellComparator.META_COMPARATOR + : InnerStoreCellComparator.INNER_STORE_COMPARATOR; } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RVVByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RVVByteBufferUtils.java new file mode 100644 index 000000000000..cc1475394a7c --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RVVByteBufferUtils.java @@ -0,0 +1,55 @@ +package org.apache.hadoop.hbase.util; + +import java.nio.ByteBuffer; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class RVVByteBufferUtils { + + static { + try { + System.loadLibrary("scan_rvv_jni"); + } catch (Throwable t) { + // ignore; use availability checks + } + } + + public static boolean available() { + return ScanRVV.available(); + } + + public static native int compareToRvv(ByteBuffer a, int aOffset, int aLen, + ByteBuffer b, int bOffset, int bLen); + + public static native int commonPrefixRvv(byte[] left, int leftOffset, + byte[] right, int rightOffset, + int maxLen); + + public static native int findCommonPrefixRvv(ByteBuffer a, int aOffset, int aLen, + ByteBuffer b, int bOffset, int bLen); + + public static native int findCommonPrefixRvv(ByteBuffer a, int aOffset, int aLen, + byte[] b, int bOffset, int bLen); + + public static byte[] readBytesRvv(ByteBuffer buf) { + int len = buf.remaining(); + byte[] dst = new byte[len]; + if (!available()) { + if (buf.hasArray()) { + System.arraycopy(buf.array(), buf.arrayOffset() + buf.position(), dst, 0, len); + } else { + int pos = buf.position(); + for (int i = 0; i < len; i++) { + dst[i] = buf.get(pos + i); + } + } + return dst; + } + if (buf.hasArray()) { + System.arraycopy(buf.array(), buf.arrayOffset() + buf.position(), dst, 0, len); + } else { + ScanRVV.rvvMemcpy(dst, 0, buf, buf.position(), len); + } + return dst; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ScanRVV.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ScanRVV.java new file mode 100644 index 000000000000..6d43c1e09c38 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ScanRVV.java @@ -0,0 +1,130 @@ +package org.apache.hadoop.hbase.util; + +import java.nio.ByteBuffer; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * RISC-V RVV 向量化扫描优化工具类 + */ +@InterfaceAudience.Private +public class ScanRVV { + + private static boolean rvvEnabled = false; + + static { + try { + System.loadLibrary("scan_rvv_jni"); + rvvEnabled = true; + } catch (UnsatisfiedLinkError e) { + rvvEnabled = false; + } catch (Throwable t) { + rvvEnabled = false; + } + } + + public static native boolean isEnabled(); + + public static void setEnabled(boolean enabled) { + rvvEnabled = enabled; + } + + public static boolean available() { + if (!rvvEnabled) { + return false; + } + try { + return isEnabled(); + } catch (Throwable t) { + return false; + } + } + + public static native int compareCells(byte[] aKey, int aLen, byte[] bKey, int bLen); + + public static native int compareKeyForNextRow(byte[] indexedKey, int idxLen, byte[] curKey, int curLen); + + public static native int compareKeyForNextColumn(byte[] indexedKey, int idxLen, byte[] curKey, int curLen); + + public static native int memcmp(byte[] a, int offsetA, int lengthA, + byte[] b, int offsetB, int lengthB); + + public static native boolean prefixMatch(byte[] a, int offsetA, + byte[] b, int offsetB, + int prefixLen); + + public static int memcmp(byte[] a, byte[] b, int length) { + if (a == null || b == null) { + throw new IllegalArgumentException("Input arrays cannot be null"); + } + if (length < 0) { + throw new IllegalArgumentException("Length cannot be negative"); + } + return memcmp(a, 0, length, b, 0, length); + } + + public static boolean prefixMatch(byte[] a, byte[] b, int prefixLen) { + if (a == null || b == null) { + return false; + } + if (prefixLen <= 0 || prefixLen > a.length || prefixLen > b.length) { + return false; + } + return prefixMatch(a, 0, b, 0, prefixLen); + } + + public static native int rvvCommonPrefix(byte[] a, int offsetA, int lengthA, + byte[] b, int offsetB, int lengthB); + + public static native void rvvMemcpy(byte[] dst, int dstOffset, + byte[] src, int srcOffset, + int length); + + public static void rvvMemcpy(byte[] dst, int dstOffset, + ByteBuffer src, int srcOffset, + int length) { + if (dst == null || src == null) { + throw new IllegalArgumentException("Input parameters cannot be null"); + } + if (length < 0) { + throw new IllegalArgumentException("Length cannot be negative"); + } + + if (src.hasArray()) { + System.arraycopy(src.array(), src.arrayOffset() + srcOffset, dst, dstOffset, length); + } else { + byte[] tmp = new byte[length]; + src.position(srcOffset); + src.get(tmp); + rvvMemcpy(dst, dstOffset, tmp, 0, length); + } + } + + public static byte[] copyToArray(byte[] src, int offset, int length) { + if (src == null) { + throw new IllegalArgumentException("Source array cannot be null"); + } + if (offset < 0 || length < 0 || offset + length > src.length) { + throw new IllegalArgumentException("Invalid offset or length"); + } + + byte[] dst = new byte[length]; + ScanRVV.rvvMemcpy(dst, 0, src, offset, length); + return dst; + } + + public static byte[] copyToArray(ByteBuffer src) { + if (src == null) { + throw new IllegalArgumentException("Source ByteBuffer cannot be null"); + } + + int len = src.remaining(); + byte[] dst = new byte[len]; + if (src.hasArray()) { + System.arraycopy(src.array(), src.arrayOffset() + src.position(), dst, 0, len); + } else { + rvvMemcpy(dst, 0, src, src.position(), len); + } + return dst; + } +} diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java index b6ec92b4aa9e..151680e51d58 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java @@ -39,11 +39,13 @@ public class Lz4Compressor implements CanReinit, Compressor { protected boolean finish, finished; protected long bytesRead, bytesWritten; + private final boolean useNative = Lz4Native.available(); + Lz4Compressor(int bufferSize) { compressor = LZ4Factory.fastestInstance().fastCompressor(); this.bufferSize = bufferSize; - this.inBuf = ByteBuffer.allocate(bufferSize); - this.outBuf = ByteBuffer.allocate(bufferSize); + this.inBuf = ByteBuffer.allocateDirect(bufferSize); + this.outBuf = ByteBuffer.allocateDirect(bufferSize); this.outBuf.position(bufferSize); } @@ -61,37 +63,58 @@ public int compress(byte[] b, int off, int len) throws IOException { if (inBuf.position() > 0) { inBuf.flip(); int uncompressed = inBuf.remaining(); - int needed = maxCompressedLength(uncompressed); - // Can we decompress directly into the provided array? - ByteBuffer writeBuffer; - boolean direct = false; - if (len <= needed) { - writeBuffer = ByteBuffer.wrap(b, off, len); - direct = true; - } else { - // If we don't have enough capacity in our currently allocated output buffer, - // allocate a new one which does. + + if (useNative && inBuf.isDirect() && outBuf.isDirect()) { + // 如果你现在用的是 heap ByteBuffer,改成 direct 分配(见 reset/reinit 部分) + int needed = Lz4Native.maxCompressedLength(uncompressed); if (outBuf.capacity() < needed) { - needed = CompressionUtil.roundInt2(needed); - outBuf = ByteBuffer.allocate(needed); + needed = org.apache.hadoop.hbase.io.compress.CompressionUtil.roundInt2(needed); + outBuf = ByteBuffer.allocateDirect(needed); } else { outBuf.clear(); } - writeBuffer = outBuf; - } - final int oldPos = writeBuffer.position(); - compressor.compress(inBuf, writeBuffer); - final int written = writeBuffer.position() - oldPos; - bytesWritten += written; - inBuf.clear(); - finished = true; - if (!direct) { - outBuf.flip(); + int written = Lz4Native.compressDirect(inBuf, inBuf.position(), uncompressed, + outBuf, outBuf.position(), outBuf.remaining()); + if (written < 0) + throw new IOException("LZ4 native compress failed: " + written); + bytesWritten += written; + inBuf.clear(); + finished = true; + outBuf.limit(outBuf.position() + written); int n = Math.min(written, len); outBuf.get(b, off, n); return n; } else { - return written; + // 走原 lz4-java 路径 + int needed = maxCompressedLength(uncompressed); + ByteBuffer writeBuffer; + boolean direct = false; + if (len <= needed) { + writeBuffer = ByteBuffer.wrap(b, off, len); + direct = true; + } else { + if (outBuf.capacity() < needed) { + needed = org.apache.hadoop.hbase.io.compress.CompressionUtil.roundInt2(needed); + outBuf = ByteBuffer.allocate(needed); + } else { + outBuf.clear(); + } + writeBuffer = outBuf; + } + final int oldPos = writeBuffer.position(); + compressor.compress(inBuf, writeBuffer); + final int written = writeBuffer.position() - oldPos; + bytesWritten += written; + inBuf.clear(); + finished = true; + if (!direct) { + outBuf.flip(); + int n = Math.min(written, len); + outBuf.get(b, off, n); + return n; + } else { + return written; + } } } else { finished = true; @@ -136,8 +159,8 @@ public void reinit(Configuration conf) { int newBufferSize = Lz4Codec.getBufferSize(conf); if (bufferSize != newBufferSize) { bufferSize = newBufferSize; - this.inBuf = ByteBuffer.allocate(bufferSize); - this.outBuf = ByteBuffer.allocate(bufferSize); + this.inBuf = ByteBuffer.allocateDirect(bufferSize); + this.outBuf = ByteBuffer.allocateDirect(bufferSize); } } reset(); @@ -162,7 +185,8 @@ public void setDictionary(byte[] b, int off, int len) { @Override public void setInput(byte[] b, int off, int len) { if (inBuf.remaining() < len) { - // Get a new buffer that can accomodate the accumulated input plus the additional + // Get a new buffer that can accomodate the accumulated input plus the + // additional // input that would cause a buffer overflow without reallocation. // This condition should be fortunately rare, because it is expensive. int needed = CompressionUtil.roundInt2(inBuf.capacity() + len); diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Native.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Native.java new file mode 100644 index 000000000000..3c193ba76acc --- /dev/null +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Native.java @@ -0,0 +1,27 @@ +package org.apache.hadoop.hbase.io.compress.lz4; + +import java.nio.ByteBuffer; + +final class Lz4Native { + static { + NativeLoader.load(); + } + + static boolean available() { + return NativeLoader.isLoaded(); + } + + static boolean isAvailable() { + return available(); + } + + static native int maxCompressedLength(int srcLen); + static native int compressDirect(ByteBuffer src, int srcOff, int srcLen, + ByteBuffer dst, int dstOff, int dstCap); + static native int decompressDirect(ByteBuffer src, int srcOff, int srcLen, + ByteBuffer dst, int dstOff, int dstCap); + + + private Lz4Native() {} +} + diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/NativeLoader.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/NativeLoader.java new file mode 100644 index 000000000000..f2efae61778d --- /dev/null +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/NativeLoader.java @@ -0,0 +1,39 @@ +package org.apache.hadoop.hbase.io.compress.lz4; + +import java.io.File; + +final class NativeLoader { + private static volatile boolean loaded = false; + + static synchronized boolean load() { + if (loaded) return true; + + try { + // 尝试使用环境变量 + String lib = System.getenv("HBASE_LZ4RVV_LIB"); + if (lib != null && !lib.isEmpty()) { + File f = new File(lib); + System.load(f.getAbsolutePath()); + } else { + // 使用相对路径加载 + String relativePath = "libhbase_lz4rvv.so"; + File f = new File(relativePath); + System.load(f.getAbsolutePath()); + } + loaded = true; + } catch (Throwable t) { + loaded = false; + System.err.println("[NativeLoader ERROR] Failed to load native library: " + t); + t.printStackTrace(); + } + + return loaded; + } + + static boolean isLoaded() { + return loaded; + } + + private NativeLoader() {} +} + diff --git a/hbase-compression/hbase-compression-lz4/src/native/Lz4Native.c b/hbase-compression/hbase-compression-lz4/src/native/Lz4Native.c new file mode 100644 index 000000000000..5f539cc0b593 --- /dev/null +++ b/hbase-compression/hbase-compression-lz4/src/native/Lz4Native.c @@ -0,0 +1,72 @@ +#include +#include "lz4.h" +#include +#include + +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_io_compress_lz4_Lz4Native_maxCompressedLength + (JNIEnv *env, jclass clazz, jint srcLen) { + return LZ4_compressBound(srcLen); +} + + +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_io_compress_lz4_Lz4Native_compressDirect + (JNIEnv *env, jclass clazz, + jobject src, jint srcOff, jint srcLen, + jobject dst, jint dstOff, jint dstCap) { + + // 取 Direct ByteBuffer 的底层地址 + char* srcPtr = (char*)(*env)->GetDirectBufferAddress(env, src); + char* dstPtr = (char*)(*env)->GetDirectBufferAddress(env, dst); + + if (srcPtr == NULL || dstPtr == NULL) { + return -1001; // LZ4_ERROR_NULL_PTR - DirectBuffer 获取失败 + } + + // 偏移量修正 + const char* srcAddr = srcPtr + srcOff; + char* dstAddr = dstPtr + dstOff; + + // 检查缓冲区边界 + if (srcOff < 0 || srcLen < 0 || dstOff < 0 || dstCap < 0) { + return -1002; // LZ4_ERROR_INVALID_PARAM - 参数无效 + } + + // 检查目标缓冲区是否足够大 + int maxCompressedSize = LZ4_compressBound(srcLen); + if (dstCap < maxCompressedSize) { + return -1003; // LZ4_ERROR_BUFFER_TOO_SMALL - 目标缓冲区太小 + } + + // 调用 LZ4 压缩 + int compressedSize = LZ4_compress_default(srcAddr, dstAddr, srcLen, dstCap); + + return compressedSize; // 返回压缩结果大小(失败时 <= 0) +} + +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_io_compress_lz4_Lz4Native_decompressDirect + (JNIEnv *env, jclass clazz, + jobject src, jint srcOff, jint srcLen, + jobject dst, jint dstOff, jint dstCap) { + + // 获取 Direct ByteBuffer 的底层地址 + char* srcPtr = (char*)(*env)->GetDirectBufferAddress(env, src); + char* dstPtr = (char*)(*env)->GetDirectBufferAddress(env, dst); + + if (srcPtr == NULL || dstPtr == NULL) { + return -1001; // LZ4_ERROR_NULL_PTR - DirectBuffer 获取失败 + } + + // 检查参数有效性 + if (srcOff < 0 || srcLen < 0 || dstOff < 0 || dstCap < 0) { + return -1002; // LZ4_ERROR_INVALID_PARAM - 参数无效 + } + + // 偏移修正 + const char* srcAddr = srcPtr + srcOff; + char* dstAddr = dstPtr + dstOff; + + // 调用 LZ4 解压 + int decompressedSize = LZ4_decompress_safe(srcAddr, dstAddr, srcLen, dstCap); + + return decompressedSize; // <0 表示解压失败 +} \ No newline at end of file diff --git a/hbase-compression/hbase-compression-lz4/src/native/lz4.c b/hbase-compression/hbase-compression-lz4/src/native/lz4.c new file mode 100644 index 000000000000..9bd0ed556bfe --- /dev/null +++ b/hbase-compression/hbase-compression-lz4/src/native/lz4.c @@ -0,0 +1,3250 @@ +/* + LZ4 - Fast LZ compression algorithm + Copyright (c) Yann Collet. All rights reserved. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ + +/*-************************************ +* Tuning parameters +**************************************/ +/* + * LZ4_HEAPMODE : + * Select how stateless compression functions like `LZ4_compress_default()` + * allocate memory for their hash table, + * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). + */ +#ifndef LZ4_HEAPMODE +# define LZ4_HEAPMODE 0 +#endif + +/* + * LZ4_ACCELERATION_DEFAULT : + * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 + */ +#define LZ4_ACCELERATION_DEFAULT 1 +/* + * LZ4_ACCELERATION_MAX : + * Any "acceleration" value higher than this threshold + * get treated as LZ4_ACCELERATION_MAX instead (fix #876) + */ +#define LZ4_ACCELERATION_MAX 65537 + + +/*-************************************ +* CPU Feature Detection +**************************************/ +/* LZ4_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets which assembly generation depends on alignment. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ +# if defined(__GNUC__) && \ + ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ + || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define LZ4_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER) +# define LZ4_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/* + * LZ4_FORCE_SW_BITCOUNT + * Define this parameter if your target system or compiler does not support hardware bit count + */ +#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ +# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */ +# define LZ4_FORCE_SW_BITCOUNT +#endif + + + +/*-************************************ +* Dependency +**************************************/ +/* + * LZ4_SRC_INCLUDED: + * Amalgamation flag, whether lz4.c is included + */ +#ifndef LZ4_SRC_INCLUDED +# define LZ4_SRC_INCLUDED 1 +#endif + +#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS +# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ +#endif + +#ifndef LZ4_STATIC_LINKING_ONLY +# define LZ4_STATIC_LINKING_ONLY +#endif +#include "lz4.h" +/* see also "memory routines" below */ + + +/*-************************************ +* Compiler Options +**************************************/ +#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */ +# include /* only present in VS2005+ */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */ +# pragma warning(disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */ +# pragma warning(disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */ +# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */ +#endif /* _MSC_VER */ + +#ifndef LZ4_FORCE_INLINE +# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */ +# define LZ4_FORCE_INLINE static __forceinline +# else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# if defined (__GNUC__) || defined (__clang__) +# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define LZ4_FORCE_INLINE static inline +# endif +# else +# define LZ4_FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +# endif /* _MSC_VER */ +#endif /* LZ4_FORCE_INLINE */ + +/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE + * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8, + * together with a simple 8-byte copy loop as a fall-back path. + * However, this optimization hurts the decompression speed by >30%, + * because the execution does not go to the optimized loop + * for typical compressible data, and all of the preamble checks + * before going to the fall-back path become useless overhead. + * This optimization happens only with the -O3 flag, and -O2 generates + * a simple 8-byte copy loop. + * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8 + * functions are annotated with __attribute__((optimize("O2"))), + * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute + * of LZ4_wildCopy8 does not affect the compression speed. + */ +#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__) +# define LZ4_FORCE_O2 __attribute__((optimize("O2"))) +# undef LZ4_FORCE_INLINE +# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline)) +#else +# define LZ4_FORCE_O2 +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) +# define expect(expr,value) (__builtin_expect ((expr),(value)) ) +#else +# define expect(expr,value) (expr) +#endif + +#ifndef likely +#define likely(expr) expect((expr) != 0, 1) +#endif +#ifndef unlikely +#define unlikely(expr) expect((expr) != 0, 0) +#endif + +/* Should the alignment test prove unreliable, for some reason, + * it can be disabled by setting LZ4_ALIGN_TEST to 0 */ +#ifndef LZ4_ALIGN_TEST /* can be externally provided */ +# define LZ4_ALIGN_TEST 1 +#endif + + +/*-************************************ +* Memory routines +**************************************/ + +/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION : + * Disable relatively high-level LZ4/HC functions that use dynamic memory + * allocation functions (malloc(), calloc(), free()). + * + * Note that this is a compile-time switch. And since it disables + * public/stable LZ4 v1 API functions, we don't recommend using this + * symbol to generate a library for distribution. + * + * The following public functions are removed when this symbol is defined. + * - lz4 : LZ4_createStream, LZ4_freeStream, + * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated) + * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC, + * LZ4_createHC (deprecated), LZ4_freeHC (deprecated) + * - lz4frame, lz4file : All LZ4F_* functions + */ +#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +# define ALLOC(s) lz4_error_memory_allocation_is_disabled +# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled +# define FREEMEM(p) lz4_error_memory_allocation_is_disabled +#elif defined(LZ4_USER_MEMORY_FUNCTIONS) +/* memory management functions can be customized by user project. + * Below functions must exist somewhere in the Project + * and be available at link time */ +void* LZ4_malloc(size_t s); +void* LZ4_calloc(size_t n, size_t s); +void LZ4_free(void* p); +# define ALLOC(s) LZ4_malloc(s) +# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s) +# define FREEMEM(p) LZ4_free(p) +#else +# include /* malloc, calloc, free */ +# define ALLOC(s) malloc(s) +# define ALLOC_AND_ZERO(s) calloc(1,s) +# define FREEMEM(p) free(p) +#endif + +#if ! LZ4_FREESTANDING +# include /* memset, memcpy */ +#endif +#if !defined(LZ4_memset) +# define LZ4_memset(p,v,s) memset((p),(v),(s)) +#endif +#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s)) + + +/*-************************************ +* Common Constants +**************************************/ +#define MINMATCH 4 + +#define WILDCOPYLENGTH 8 +#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ +#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ +#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */ +#define FASTLOOP_SAFE_DISTANCE 64 +static const int LZ4_minLength = (MFLIMIT+1); + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define LZ4_DISTANCE_ABSOLUTE_MAX 65535 +#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */ +# error "LZ4_DISTANCE_MAX is too big : must be <= 65535" +#endif + +#define ML_BITS 4 +#define ML_MASK ((1U<=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + +#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */ + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) +# include + static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ " %i: ", __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + +static int LZ4_isAligned(const void* ptr, size_t alignment) +{ + return ((size_t)ptr & (alignment -1)) == 0; +} + + +/*-************************************ +* Types +**************************************/ +#include +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef unsigned char BYTE; /*uint8_t not necessarily blessed to alias arbitrary type*/ + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef uintptr_t uptrval; +#else +# if UINT_MAX != 4294967295UL +# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4" +# endif + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; + typedef size_t uptrval; /* generally true, except OpenVMS-64 */ +#endif + +#if defined(__x86_64__) + typedef U64 reg_t; /* 64-bits in x32 mode */ +#else + typedef size_t reg_t; /* 32-bits in x32 mode */ +#endif + +typedef enum { + notLimited = 0, + limitedOutput = 1, + fillOutput = 2 +} limitedOutput_directive; + + +/*-************************************ +* Reading and writing into memory +**************************************/ + +/** + * LZ4 relies on memcpy with a constant size being inlined. In freestanding + * environments, the compiler can't assume the implementation of memcpy() is + * standard compliant, so it can't apply its specialized memcpy() inlining + * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze + * memcpy() as if it were standard compliant, so it can inline it in freestanding + * environments. This is needed when decompressing the Linux Kernel, for example. + */ +#if !defined(LZ4_memcpy) +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) +# else +# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size) +# endif +#endif + +#if !defined(LZ4_memmove) +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4_memmove __builtin_memmove +# else +# define LZ4_memmove memmove +# endif +#endif + +static unsigned LZ4_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) +#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__)) +#elif defined(_MSC_VER) +#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop)) +#endif + +#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2) +/* lie to the compiler about data alignment; use with caution */ + +static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; } +static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; } +static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; } + +static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } +static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } + +#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16; +LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32; +LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST; + +static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; } +static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; } +static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; } + +static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; } +static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; } + +#else /* safe and portable access using memcpy() */ + +static U16 LZ4_read16(const void* memPtr) +{ + U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; +} + +static U32 LZ4_read32(const void* memPtr) +{ + U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; +} + +static reg_t LZ4_read_ARCH(const void* memPtr) +{ + reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; +} + +static void LZ4_write16(void* memPtr, U16 value) +{ + LZ4_memcpy(memPtr, &value, sizeof(value)); +} + +static void LZ4_write32(void* memPtr, U32 value) +{ + LZ4_memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* LZ4_FORCE_MEMORY_ACCESS */ + + +static U16 LZ4_readLE16(const void* memPtr) +{ + if (LZ4_isLittleEndian()) { + return LZ4_read16(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + return (U16)((U16)p[0] | (p[1]<<8)); + } +} + +#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT +static U32 LZ4_readLE32(const void* memPtr) +{ + if (LZ4_isLittleEndian()) { + return LZ4_read32(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24); + } +} +#endif + +static void LZ4_writeLE16(void* memPtr, U16 value) +{ + if (LZ4_isLittleEndian()) { + LZ4_write16(memPtr, value); + } else { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE) value; + p[1] = (BYTE)(value>>8); + } +} + +/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ +LZ4_FORCE_INLINE +void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; + + do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */ +LZ4_FORCE_INLINE void +LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; + + do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH + * - there is at least 12 bytes available to write after dstEnd */ +LZ4_FORCE_INLINE void +LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) +{ + BYTE v[8]; + + assert(dstEnd >= dstPtr + MINMATCH); + + switch(offset) { + case 1: + MEM_INIT(v, *srcPtr, 8); + break; + case 2: + LZ4_memcpy(v, srcPtr, 2); + LZ4_memcpy(&v[2], srcPtr, 2); +#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */ +# pragma warning(push) +# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */ +#endif + LZ4_memcpy(&v[4], v, 4); +#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */ +# pragma warning(pop) +#endif + break; + case 4: + LZ4_memcpy(v, srcPtr, 4); + LZ4_memcpy(&v[4], srcPtr, 4); + break; + default: + LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset); + return; + } + + LZ4_memcpy(dstPtr, v, 8); + dstPtr += 8; + while (dstPtr < dstEnd) { + LZ4_memcpy(dstPtr, v, 8); + dstPtr += 8; + } +} +#endif + + +/*-************************************ +* Common functions +**************************************/ +static unsigned LZ4_NbCommonBytes (reg_t val) +{ + assert(val != 0); + if (LZ4_isLittleEndian()) { + if (sizeof(val) == 8) { +# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT) +/*-************************************************************************************************* +* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11. +* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics +* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC. +****************************************************************************************************/ +# if defined(__clang__) && (__clang_major__ < 10) + /* Avoid undefined clang-cl intrinsics issue. + * See https://github.com/lz4/lz4/pull/1017 for details. */ + return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3; +# else + /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */ + return (unsigned)_tzcnt_u64(val) >> 3; +# endif +# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanForward64(&r, (U64)val); + return (unsigned)r >> 3; +# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctzll((U64)val) >> 3; +# else + const U64 m = 0x0101010101010101ULL; + val ^= val - 1; + return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56); +# endif + } else /* 32 bits */ { +# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward(&r, (U32)val); + return (unsigned)r >> 3; +# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctz((U32)val) >> 3; +# else + const U32 m = 0x01010101; + return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24; +# endif + } + } else /* Big Endian CPU */ { + if (sizeof(val)==8) { +# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clzll((U64)val) >> 3; +# else +#if 1 + /* this method is probably faster, + * but adds a 128 bytes lookup table */ + static const unsigned char ctz7_tab[128] = { + 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, + }; + U64 const mask = 0x0101010101010101ULL; + U64 const t = (((val >> 8) - mask) | val) & mask; + return ctz7_tab[(t * 0x0080402010080402ULL) >> 57]; +#else + /* this method doesn't consume memory space like the previous one, + * but it contains several branches, + * that may end up slowing execution */ + static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits. + Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. + Note that this code path is never triggered in 32-bits mode. */ + unsigned r; + if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +#endif +# endif + } else /* 32 bits */ { +# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clz((U32)val) >> 3; +# else + val >>= 8; + val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) | + (val + 0x00FF0000)) >> 24; + return (unsigned)val ^ 3; +# endif + } + } +} + + +#define STEPSIZE sizeof(reg_t) +LZ4_FORCE_INLINE +unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) +{ + const BYTE* const pStart = pIn; + + if (likely(pIn < pInLimit-(STEPSIZE-1))) { + reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); + if (!diff) { + pIn+=STEPSIZE; pMatch+=STEPSIZE; + } else { + return LZ4_NbCommonBytes(diff); + } } + + while (likely(pIn < pInLimit-(STEPSIZE-1))) { + reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); + if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; } + pIn += LZ4_NbCommonBytes(diff); + return (unsigned)(pIn - pStart); + } + + if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; } + if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; } + if ((pIn compression run slower on incompressible data */ + + +/*-************************************ +* Local Structures and types +**************************************/ +typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; + +/** + * This enum distinguishes several different modes of accessing previous + * content in the stream. + * + * - noDict : There is no preceding content. + * - withPrefix64k : Table entries up to ctx->dictSize before the current blob + * blob being compressed are valid and refer to the preceding + * content (of length ctx->dictSize), which is available + * contiguously preceding in memory the content currently + * being compressed. + * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere + * else in memory, starting at ctx->dictionary with length + * ctx->dictSize. + * - usingDictCtx : Everything concerning the preceding content is + * in a separate context, pointed to by ctx->dictCtx. + * ctx->dictionary, ctx->dictSize, and table entries + * in the current context that refer to positions + * preceding the beginning of the current compression are + * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx + * ->dictSize describe the location and size of the preceding + * content, and matches are found by looking in the ctx + * ->dictCtx->hashTable. + */ +typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; +typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; + + +/*-************************************ +* Local Utils +**************************************/ +int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } +const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; } +int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } +int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); } + + +/*-**************************************** +* Internal Definitions, used only in Tests +*******************************************/ +#if defined (__cplusplus) +extern "C" { +#endif + +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize); + +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize); +int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, + int compressedSize, int targetOutputSize, int dstCapacity, + const void* dictStart, size_t dictSize); +#if defined (__cplusplus) +} +#endif + +/*-****************************** +* Compression functions +********************************/ +LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType) +{ + if (tableType == byU16) + return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); + else + return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); +} + +LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType) +{ + const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG; + if (LZ4_isLittleEndian()) { + const U64 prime5bytes = 889523592379ULL; + return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); + } else { + const U64 prime8bytes = 11400714785074694791ULL; + return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); + } +} + +LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) +{ + if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); + +#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT + return LZ4_hash4(LZ4_readLE32(p), tableType); +#else + return LZ4_hash4(LZ4_read32(p), tableType); +#endif +} + +LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: { /* illegal! */ assert(0); return; } + case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; } + case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; } + } +} + +LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: /* fallthrough */ + case byPtr: { /* illegal! */ assert(0); return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } + case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } + } +} + +/* LZ4_putPosition*() : only used in byPtr mode */ +LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, + void* tableBase, tableType_t const tableType) +{ + const BYTE** const hashTable = (const BYTE**)tableBase; + assert(tableType == byPtr); (void)tableType; + hashTable[h] = p; +} + +LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType) +{ + U32 const h = LZ4_hashPosition(p, tableType); + LZ4_putPositionOnHash(p, h, tableBase, tableType); +} + +/* LZ4_getIndexOnHash() : + * Index of match position registered in hash table. + * hash position must be calculated by using base+index, or dictBase+index. + * Assumption 1 : only valid if tableType == byU32 or byU16. + * Assumption 2 : h is presumed valid (within limits of hash table) + */ +LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) +{ + LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); + if (tableType == byU32) { + const U32* const hashTable = (const U32*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-2))); + return hashTable[h]; + } + if (tableType == byU16) { + const U16* const hashTable = (const U16*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-1))); + return hashTable[h]; + } + assert(0); return 0; /* forbidden case */ +} + +static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType) +{ + assert(tableType == byPtr); (void)tableType; + { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } +} + +LZ4_FORCE_INLINE const BYTE* +LZ4_getPosition(const BYTE* p, + const void* tableBase, tableType_t tableType) +{ + U32 const h = LZ4_hashPosition(p, tableType); + return LZ4_getPositionOnHash(h, tableBase, tableType); +} + +LZ4_FORCE_INLINE void +LZ4_prepareTable(LZ4_stream_t_internal* const cctx, + const int inputSize, + const tableType_t tableType) { + /* If the table hasn't been used, it's guaranteed to be zeroed out, and is + * therefore safe to use no matter what mode we're in. Otherwise, we figure + * out if it's safe to leave as is or whether it needs to be reset. + */ + if ((tableType_t)cctx->tableType != clearedTable) { + assert(inputSize >= 0); + if ((tableType_t)cctx->tableType != tableType + || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) + || ((tableType == byU32) && cctx->currentOffset > 1 GB) + || tableType == byPtr + || inputSize >= 4 KB) + { + DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", (void*)cctx); + MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); + cctx->currentOffset = 0; + cctx->tableType = (U32)clearedTable; + } else { + DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); + } + } + + /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, + * is faster than compressing without a gap. + * However, compressing with currentOffset == 0 is faster still, + * so we preserve that case. + */ + if (cctx->currentOffset != 0 && tableType == byU32) { + DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); + cctx->currentOffset += 64 KB; + } + + /* Finally, clear history */ + cctx->dictCtx = NULL; + cctx->dictionary = NULL; + cctx->dictSize = 0; +} + +/** LZ4_compress_generic_validated() : + * inlined, to ensure branches are decided at compilation time. + * The following conditions are presumed already validated: + * - source != NULL + * - inputSize > 0 + */ +LZ4_FORCE_INLINE int LZ4_compress_generic_validated( + LZ4_stream_t_internal* const cctx, + const char* const source, + char* const dest, + const int inputSize, + int* inputConsumed, /* only written when outputDirective == fillOutput */ + const int maxOutputSize, + const limitedOutput_directive outputDirective, + const tableType_t tableType, + const dict_directive dictDirective, + const dictIssue_directive dictIssue, + const int acceleration) +{ + int result; + const BYTE* ip = (const BYTE*)source; + + U32 const startIndex = cctx->currentOffset; + const BYTE* base = (const BYTE*)source - startIndex; + const BYTE* lowLimit; + + const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; + const BYTE* const dictionary = + dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; + const U32 dictSize = + dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; + const U32 dictDelta = + (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */ + + int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); + U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ + const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary; + const BYTE* anchor = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; + const BYTE* const matchlimit = iend - LASTLITERALS; + + /* the dictCtx currentOffset is indexed on the start of the dictionary, + * while a dictionary in the current context precedes the currentOffset */ + const BYTE* dictBase = (dictionary == NULL) ? NULL : + (dictDirective == usingDictCtx) ? + dictionary + dictSize - dictCtx->currentOffset : + dictionary + dictSize - startIndex; + + BYTE* op = (BYTE*) dest; + BYTE* const olimit = op + maxOutputSize; + + U32 offset = 0; + U32 forwardH; + + DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType); + assert(ip != NULL); + if (tableType == byU16) assert(inputSize= 1); + + lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); + + /* Update context state */ + if (dictDirective == usingDictCtx) { + /* Subsequent linked blocks can't use the dictionary. */ + /* Instead, they use the block we just compressed. */ + cctx->dictCtx = NULL; + cctx->dictSize = (U32)inputSize; + } else { + cctx->dictSize += (U32)inputSize; + } + cctx->currentOffset += (U32)inputSize; + cctx->tableType = (U32)tableType; + + if (inputSizehashTable, byPtr); + } else { + LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType); + } } + ip++; forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for ( ; ; ) { + const BYTE* match; + BYTE* token; + const BYTE* filledIp; + + /* Find a match */ + if (tableType == byPtr) { + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType); + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType); + + } while ( (match+LZ4_DISTANCE_MAX < ip) + || (LZ4_read32(match) != LZ4_read32(ip)) ); + + } else { /* byU32, byU16 */ + + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + U32 const current = (U32)(forwardIp - base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex <= current); + assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + assert(tableType == byU32); + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + matchIndex += dictDelta; /* make dictCtx index comparable with current context */ + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; + } + } else if (dictDirective == usingExtDict) { + if (matchIndex < startIndex) { + DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); + assert(startIndex - matchIndex >= MINMATCH); + assert(dictBase); + match = dictBase + matchIndex; + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; + } + } else { /* single continuous memory segment */ + match = base + matchIndex; + } + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + + DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); + if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */ + assert(matchIndex < current); + if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX)) + && (matchIndex+LZ4_DISTANCE_MAX < current)) { + continue; + } /* too far */ + assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ + + if (LZ4_read32(match) == LZ4_read32(ip)) { + if (maybe_extMem) offset = current - matchIndex; + break; /* match found */ + } + + } while(1); + } + + /* Catch up */ + filledIp = ip; + assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */ + if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) { + do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1]))); + } + + /* Encode Literals */ + { unsigned const litLength = (unsigned)(ip - anchor); + token = op++; + if ((outputDirective == limitedOutput) && /* Check output buffer overflow */ + (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) { + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + if ((outputDirective == fillOutput) && + (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) { + op--; + goto _last_literals; + } + if (litLength >= RUN_MASK) { + unsigned len = litLength - RUN_MASK; + *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } + else *token = (BYTE)(litLength< olimit)) { + /* the match was too close to the end, rewind and go to last literals */ + op = token; + goto _last_literals; + } + + /* Encode Offset */ + if (maybe_extMem) { /* static test */ + DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); + assert(offset <= LZ4_DISTANCE_MAX && offset > 0); + LZ4_writeLE16(op, (U16)offset); op+=2; + } else { + DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); + assert(ip-match <= LZ4_DISTANCE_MAX); + LZ4_writeLE16(op, (U16)(ip - match)); op+=2; + } + + /* Encode MatchLength */ + { unsigned matchCode; + + if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) + && (lowLimit==dictionary) /* match within extDict */ ) { + const BYTE* limit = ip + (dictEnd-match); + assert(dictEnd > match); + if (limit > matchlimit) limit = matchlimit; + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); + ip += (size_t)matchCode + MINMATCH; + if (ip==limit) { + unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); + matchCode += more; + ip += more; + } + DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); + } else { + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); + ip += (size_t)matchCode + MINMATCH; + DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); + } + + if ((outputDirective) && /* Check output buffer overflow */ + (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) { + if (outputDirective == fillOutput) { + /* Match description too long : reduce it */ + U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255; + ip -= matchCode - newMatchCode; + assert(newMatchCode < matchCode); + matchCode = newMatchCode; + if (unlikely(ip <= filledIp)) { + /* We have already filled up to filledIp so if ip ends up less than filledIp + * we have positions in the hash table beyond the current position. This is + * a problem if we reuse the hash table. So we have to remove these positions + * from the hash table. + */ + const BYTE* ptr; + DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); + for (ptr = ip; ptr <= filledIp; ++ptr) { + U32 const h = LZ4_hashPosition(ptr, tableType); + LZ4_clearHash(h, cctx->hashTable, tableType); + } + } + } else { + assert(outputDirective == limitedOutput); + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + } + if (matchCode >= ML_MASK) { + *token += ML_MASK; + matchCode -= ML_MASK; + LZ4_write32(op, 0xFFFFFFFF); + while (matchCode >= 4*255) { + op+=4; + LZ4_write32(op, 0xFFFFFFFF); + matchCode -= 4*255; + } + op += matchCode / 255; + *op++ = (BYTE)(matchCode % 255); + } else + *token += (BYTE)(matchCode); + } + /* Ensure we have enough space for the last literals. */ + assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit)); + + anchor = ip; + + /* Test end of chunk */ + if (ip >= mflimitPlusOne) break; + + /* Fill table */ + { U32 const h = LZ4_hashPosition(ip-2, tableType); + if (tableType == byPtr) { + LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr); + } else { + U32 const idx = (U32)((ip-2) - base); + LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType); + } } + + /* Test next position */ + if (tableType == byPtr) { + + match = LZ4_getPosition(ip, cctx->hashTable, tableType); + LZ4_putPosition(ip, cctx->hashTable, tableType); + if ( (match+LZ4_DISTANCE_MAX >= ip) + && (LZ4_read32(match) == LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + } else { /* byU32, byU16 */ + + U32 const h = LZ4_hashPosition(ip, tableType); + U32 const current = (U32)(ip-base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex < current); + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + assert(tableType == byU32); + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + matchIndex += dictDelta; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else if (dictDirective==usingExtDict) { + if (matchIndex < startIndex) { + assert(dictBase); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else { /* single memory segment */ + match = base + matchIndex; + } + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + assert(matchIndex < current); + if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) + && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current)) + && (LZ4_read32(match) == LZ4_read32(ip)) ) { + token=op++; + *token=0; + if (maybe_extMem) offset = current - matchIndex; + DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", + (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); + goto _next_match; + } + } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + + } + +_last_literals: + /* Encode Last Literals */ + { size_t lastRun = (size_t)(iend - anchor); + if ( (outputDirective) && /* Check output buffer overflow */ + (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { + if (outputDirective == fillOutput) { + /* adapt lastRun to fill 'dst' */ + assert(olimit >= op); + lastRun = (size_t)(olimit-op) - 1/*token*/; + lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/ + } else { + assert(outputDirective == limitedOutput); + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + } + DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun); + if (lastRun >= RUN_MASK) { + size_t accumulator = lastRun - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRun< 0); + DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result); + return result; +} + +#if defined(__riscv) && defined(__riscv_vector) +#include + +static inline void LZ4_wildCopy_rvv(BYTE* dst, const BYTE* src, size_t len) { + size_t offset = 0; + while (offset < len) { + size_t vl = __riscv_vsetvl_e8m1(len - offset); + vuint8m1_t vec = __riscv_vle8_v_u8m1(src + offset, vl); + __riscv_vse8_v_u8m1(dst + offset, vec, vl); + offset += vl; + } +} + +static inline unsigned LZ4_count_rvv(const BYTE* ip, const BYTE* match, const BYTE* limit) { + unsigned len = 0; + size_t remaining = (size_t)(limit - ip); + while (remaining > 0) { + size_t vl = __riscv_vsetvl_e8m1(remaining); + vuint8m1_t v_ip = __riscv_vle8_v_u8m1(ip, vl); + vuint8m1_t v_match = __riscv_vle8_v_u8m1(match, vl); + vbool8_t mask = __riscv_vmsne_vv_u8m1_b8(v_ip, v_match, vl); + int first_diff = __riscv_vfirst_m_b8(mask, vl); + if (first_diff != -1) { + len += (unsigned)first_diff; + return len; + } + len += (unsigned)vl; + ip += vl; + match += vl; + remaining -= vl; + } + return len; +} + +/* RVV 版本字面量拷贝 */ +static inline void LZ4_memcpy_rvv(BYTE* dst, const BYTE* src, size_t len) { + size_t remaining = len; // 剩余待拷贝字节数 + // 循环处理所有字节,使用向量化加速 + while (remaining > 0) { + size_t vl = __riscv_vsetvl_e8m1(remaining); + vuint8m1_t vec = __riscv_vle8_v_u8m1(src, vl); + __riscv_vse8_v_u8m1(dst, vec, vl); + src += vl; + dst += vl; + remaining -= vl; + } + } + + +#endif + +int LZ4_compress_generic_validated_rvv( + LZ4_stream_t_internal* const cctx, + const char* const source, + char* const dest, + const int inputSize, + int* inputConsumed, + const int maxOutputSize, + const limitedOutput_directive outputDirective, + const tableType_t tableType, + const dict_directive dictDirective, + const dictIssue_directive dictIssue, + const int acceleration +) { + int result; + const BYTE* ip = (const BYTE*)source; + + U32 const startIndex = cctx->currentOffset; + const BYTE* base = (const BYTE*)source - startIndex; + const BYTE* lowLimit; + + const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; + const BYTE* const dictionary = + dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; + const U32 dictSize = + dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; + const U32 dictDelta = + (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */ + + int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); + U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ + const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary; + const BYTE* anchor = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; + const BYTE* const matchlimit = iend - LASTLITERALS; + + /* the dictCtx currentOffset is indexed on the start of the dictionary, + * while a dictionary in the current context precedes the currentOffset */ + const BYTE* dictBase = (dictionary == NULL) ? NULL : + (dictDirective == usingDictCtx) ? + dictionary + dictSize - dictCtx->currentOffset : + dictionary + dictSize - startIndex; + + BYTE* op = (BYTE*) dest; + BYTE* const olimit = op + maxOutputSize; + + U32 offset = 0; + U32 forwardH; + + DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType); + assert(ip != NULL); + if (tableType == byU16) assert(inputSize= 1); + + lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); + + /* Update context state */ + if (dictDirective == usingDictCtx) { + /* Subsequent linked blocks can't use the dictionary. */ + /* Instead, they use the block we just compressed. */ + cctx->dictCtx = NULL; + cctx->dictSize = (U32)inputSize; + } else { + cctx->dictSize += (U32)inputSize; + } + cctx->currentOffset += (U32)inputSize; + cctx->tableType = (U32)tableType; + + if (inputSizehashTable, byPtr); + } else { + LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType); + } } + ip++; forwardH = LZ4_hashPosition(ip, tableType); + + // 主循环 + for ( ; ; ) { + const BYTE* match; + BYTE* token; + const BYTE* filledIp; + + /* Find a match */ + if (tableType == byPtr) { + // byPtr 查找匹配逻辑保持原样 + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType); + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType); + + } while ( (match+LZ4_DISTANCE_MAX < ip) + || (LZ4_read32(match) != LZ4_read32(ip)) ); + + } else { /* byU32, byU16 */ + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + U32 const current = (U32)(forwardIp - base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex <= current); + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + matchIndex += dictDelta; + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = source; + } + } else if (dictDirective == usingExtDict) { + if (matchIndex < startIndex) { + match = dictBase + matchIndex; + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = source; + } + } else { + match = base + matchIndex; + } + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + + if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) continue; + if ((tableType != byU16 || LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX) + && (matchIndex+LZ4_DISTANCE_MAX < current)) continue; + if (LZ4_read32(match) == LZ4_read32(ip)) { + if (maybe_extMem) offset = current - matchIndex; + break; + } + } while(1); + } + + /* Catch up */ + filledIp = ip; + if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) { + do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1]))); + } + + /* Encode Literals */ + { + unsigned const litLength = (unsigned)(ip - anchor); + token = op++; + + if ((outputDirective == limitedOutput) && + (unlikely(op + litLength + 2 + 1 + LASTLITERALS + (litLength/255) > olimit))) { + return 0; + } + if ((outputDirective == fillOutput) && + (unlikely(op + (litLength+240)/255 + litLength + 2 + 1 + MFLIMIT - MINMATCH > olimit))) { + op--; + goto _last_literals; + } + + if (litLength >= RUN_MASK) { + unsigned len = litLength - RUN_MASK; + *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } else { + *token = (BYTE)(litLength< olimit)) { + op = token; + goto _last_literals; + } + + /* Encode Offset */ + if (maybe_extMem) { + LZ4_writeLE16(op, (U16)offset); op+=2; + } else { + LZ4_writeLE16(op, (U16)(ip - match)); op+=2; + } + + /* Encode MatchLength */ + { + unsigned matchCode; + if ((dictDirective==usingExtDict || dictDirective==usingDictCtx) && + (lowLimit==dictionary)) { + const BYTE* limit = ip + (dictEnd - match); + if (limit > matchlimit) limit = matchlimit; +#if defined(__riscv) && defined(__riscv_vector) + matchCode = LZ4_count_rvv(ip+MINMATCH, match+MINMATCH, limit); +#else + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); +#endif + ip += matchCode + MINMATCH; + if (ip==limit) { +#if defined(__riscv) && defined(__riscv_vector) + unsigned more = LZ4_count_rvv(limit, (const BYTE*)source, matchlimit); +#else + unsigned more = LZ4_count(limit, (const BYTE*)source, matchlimit); +#endif + matchCode += more; + ip += more; + } + } else { +#if defined(__riscv) && defined(__riscv_vector) + matchCode = LZ4_count_rvv(ip+MINMATCH, match+MINMATCH, matchlimit); +#else + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); +#endif + ip += matchCode + MINMATCH; + } + + if ((outputDirective) && + (unlikely(op + 1 + LASTLITERALS + (matchCode+240)/255 > olimit))) { + if (outputDirective == fillOutput) { + U32 newMatchCode = 15 - 1 + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255; + ip -= matchCode - newMatchCode; + matchCode = newMatchCode; + } else { + return 0; + } + } + + if (matchCode >= ML_MASK) { + *token += ML_MASK; + matchCode -= ML_MASK; + LZ4_write32(op, 0xFFFFFFFF); + while (matchCode >= 4*255) { + op+=4; + LZ4_write32(op, 0xFFFFFFFF); + matchCode -= 4*255; + } + op += matchCode / 255; + *op++ = (BYTE)(matchCode % 255); + } else { + *token += (BYTE)(matchCode); + } + } + + anchor = ip; + if (ip >= mflimitPlusOne) break; + + /* Fill hash table */ + { + U32 const h = LZ4_hashPosition(ip-2, tableType); + if (tableType == byPtr) { + LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr); + } else { + U32 const idx = (U32)((ip-2) - base); + LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType); + } + } + + /* Test next position for match */ + if (tableType == byPtr) { + match = LZ4_getPosition(ip, cctx->hashTable, tableType); + LZ4_putPosition(ip, cctx->hashTable, tableType); + if ((match+LZ4_DISTANCE_MAX >= ip) && (LZ4_read32(match) == LZ4_read32(ip))) { + token = op++; *token=0; goto _next_match; + } + } else { + U32 const h = LZ4_hashPosition(ip, tableType); + U32 const current = (U32)(ip-base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + if (dictDirective==usingDictCtx && matchIndexhashTable, byU32); + match = dictBase + matchIndex; + lowLimit = dictionary; + matchIndex += dictDelta; + } else if (dictDirective==usingExtDict && matchIndexhashTable, tableType); + if (((dictIssue==dictSmall)?(matchIndex>=prefixIdxLimit):1) && + (((tableType==byU16)&&(LZ4_DISTANCE_MAX==LZ4_DISTANCE_ABSOLUTE_MAX))?1:(matchIndex+LZ4_DISTANCE_MAX>=current)) && + (LZ4_read32(match)==LZ4_read32(ip))) { + token=op++; *token=0; + if (maybe_extMem) offset=current - matchIndex; + goto _next_match; + } + } + + forwardH = LZ4_hashPosition(++ip, tableType); + } +_last_literals: + { + size_t lastRun = (size_t)(iend - anchor); + if ((outputDirective) && + (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { + if (outputDirective == fillOutput) { + assert(olimit >= op); + lastRun = (size_t)(olimit-op) - 1; + lastRun -= (lastRun + 256 - RUN_MASK) / 256; + } else { + assert(outputDirective == limitedOutput); + return 0; + } + } + + if (lastRun >= RUN_MASK) { + size_t accumulator = lastRun - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for (; accumulator >= 255; accumulator -= 255) *op++ = 255; + *op++ = (BYTE)accumulator; + } else { + *op++ = (BYTE)(lastRun << ML_BITS); + } + + /* RVV向量化拷贝 */ +#if defined(__riscv) && defined(__riscv_vector) + LZ4_memcpy_rvv(op, anchor, lastRun); +#else + memcpy(op, anchor, lastRun); +#endif + ip = anchor + lastRun; + op += lastRun; + } + + + if (outputDirective == fillOutput) { + *inputConsumed = (int)(((const char*)ip) - source); + } + result = (int)(((char*)op) - dest); + assert(result > 0); + DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result); + return result; + +} + + +LZ4_FORCE_INLINE int LZ4_compress_generic( + LZ4_stream_t_internal* const cctx, + const char* const src, + char* const dst, + const int srcSize, + int *inputConsumed, /* only written when outputDirective == fillOutput */ + const int dstCapacity, + const limitedOutput_directive outputDirective, + const tableType_t tableType, + const dict_directive dictDirective, + const dictIssue_directive dictIssue, + const int acceleration) +{ + DEBUGLOG(5, "LZ4_compress_generic (RVV): srcSize=%i, dstCapacity=%i", + srcSize, dstCapacity); + + if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */ + if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */ + if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */ + DEBUGLOG(5, "Generating an empty block"); + assert(outputDirective == notLimited || dstCapacity >= 1); + assert(dst != NULL); + dst[0] = 0; + if (outputDirective == fillOutput) { + assert (inputConsumed != NULL); + *inputConsumed = 0; + } + return 1; + } + assert(src != NULL); + + return LZ4_compress_generic_validated_rvv( + cctx, src, dst, srcSize, inputConsumed, + dstCapacity, outputDirective, + tableType, dictDirective, dictIssue, acceleration + ); + +} + + +int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +{ + LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse; + assert(ctx != NULL); + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + if (maxOutputSize >= LZ4_compressBound(inputSize)) { + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } +} + +/** + * LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of + * "correctly initialized"). + */ +int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) +{ + LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse; + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + assert(ctx != NULL); + + if (dstCapacity >= LZ4_compressBound(srcSize)) { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } +} + + +int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration) +{ + int result; +#if (LZ4_HEAPMODE) + LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctxPtr == NULL) return 0; +#else + LZ4_stream_t ctx; + LZ4_stream_t* const ctxPtr = &ctx; +#endif + result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration); + +#if (LZ4_HEAPMODE) + FREEMEM(ctxPtr); +#endif + return result; +} + + +int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity) +{ + return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1); +} + + +/* Note!: This function leaves the stream in an unclean/broken state! + * It is not safe to subsequently use the same state with a _fastReset() or + * _continue() call without resetting it. */ +static int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration) +{ + void* const s = LZ4_initStream(state, sizeof (*state)); + assert(s != NULL); (void)s; + + if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ + return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration); + } else { + if (*srcSizePtr < LZ4_64Klimit) { + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration); + } else { + tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration); + } } +} + +int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration) +{ + int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration); + /* clean the state on exit */ + LZ4_initStream(state, sizeof (LZ4_stream_t)); + return r; +} + + +int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) +{ +#if (LZ4_HEAPMODE) + LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctx == NULL) return 0; +#else + LZ4_stream_t ctxBody; + LZ4_stream_t* const ctx = &ctxBody; +#endif + + int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1); + +#if (LZ4_HEAPMODE) + FREEMEM(ctx); +#endif + return result; +} + + + +/*-****************************** +* Streaming functions +********************************/ + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4_stream_t* LZ4_createStream(void) +{ + LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); + LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal)); + DEBUGLOG(4, "LZ4_createStream %p", (void*)lz4s); + if (lz4s == NULL) return NULL; + LZ4_initStream(lz4s, sizeof(*lz4s)); + return lz4s; +} +#endif + +static size_t LZ4_stream_t_alignment(void) +{ +#if LZ4_ALIGN_TEST + typedef struct { char c; LZ4_stream_t t; } t_a; + return sizeof(t_a) - sizeof(LZ4_stream_t); +#else + return 1; /* effectively disabled */ +#endif +} + +LZ4_stream_t* LZ4_initStream (void* buffer, size_t size) +{ + DEBUGLOG(5, "LZ4_initStream"); + if (buffer == NULL) { return NULL; } + if (size < sizeof(LZ4_stream_t)) { return NULL; } + if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL; + MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal)); + return (LZ4_stream_t*)buffer; +} + +/* resetStream is now deprecated, + * prefer initStream() which is more general */ +void LZ4_resetStream (LZ4_stream_t* LZ4_stream) +{ + DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", (void*)LZ4_stream); + MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal)); +} + +void LZ4_resetStream_fast(LZ4_stream_t* ctx) { + LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); +} + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +int LZ4_freeStream (LZ4_stream_t* LZ4_stream) +{ + if (!LZ4_stream) return 0; /* support free on NULL */ + DEBUGLOG(5, "LZ4_freeStream %p", (void*)LZ4_stream); + FREEMEM(LZ4_stream); + return (0); +} +#endif + + +typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e; +#define HASH_UNIT sizeof(reg_t) +int LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict, + const char* dictionary, int dictSize, + LoadDict_mode_e _ld) +{ + LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; + const tableType_t tableType = byU32; + const BYTE* p = (const BYTE*)dictionary; + const BYTE* const dictEnd = p + dictSize; + U32 idx32; + + DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, (void*)dictionary, (void*)LZ4_dict); + + /* It's necessary to reset the context, + * and not just continue it with prepareTable() + * to avoid any risk of generating overflowing matchIndex + * when compressing using this dictionary */ + LZ4_resetStream(LZ4_dict); + + /* We always increment the offset by 64 KB, since, if the dict is longer, + * we truncate it to the last 64k, and if it's shorter, we still want to + * advance by a whole window length so we can provide the guarantee that + * there are only valid offsets in the window, which allows an optimization + * in LZ4_compress_fast_continue() where it uses noDictIssue even when the + * dictionary isn't a full 64k. */ + dict->currentOffset += 64 KB; + + if (dictSize < (int)HASH_UNIT) { + return 0; + } + + if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; + dict->dictionary = p; + dict->dictSize = (U32)(dictEnd - p); + dict->tableType = (U32)tableType; + idx32 = dict->currentOffset - dict->dictSize; + + while (p <= dictEnd-HASH_UNIT) { + U32 const h = LZ4_hashPosition(p, tableType); + /* Note: overwriting => favors positions end of dictionary */ + LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType); + p+=3; idx32+=3; + } + + if (_ld == _ld_slow) { + /* Fill hash table with additional references, to improve compression capability */ + p = dict->dictionary; + idx32 = dict->currentOffset - dict->dictSize; + while (p <= dictEnd-HASH_UNIT) { + U32 const h = LZ4_hashPosition(p, tableType); + U32 const limit = dict->currentOffset - 64 KB; + if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) { + /* Note: not overwriting => favors positions beginning of dictionary */ + LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType); + } + p++; idx32++; + } + } + + return (int)dict->dictSize; +} + +int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast); +} + +int LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow); +} + +void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) +{ + const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL : + &(dictionaryStream->internal_donotuse); + + DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", + (void*)workingStream, (void*)dictionaryStream, + dictCtx != NULL ? dictCtx->dictSize : 0); + + if (dictCtx != NULL) { + /* If the current offset is zero, we will never look in the + * external dictionary context, since there is no value a table + * entry can take that indicate a miss. In that case, we need + * to bump the offset to something non-zero. + */ + if (workingStream->internal_donotuse.currentOffset == 0) { + workingStream->internal_donotuse.currentOffset = 64 KB; + } + + /* Don't actually attach an empty dictionary. + */ + if (dictCtx->dictSize == 0) { + dictCtx = NULL; + } + } + workingStream->internal_donotuse.dictCtx = dictCtx; +} + + +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) +{ + assert(nextSize >= 0); + if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ + /* rescale hash table */ + U32 const delta = LZ4_dict->currentOffset - 64 KB; + const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; + int i; + DEBUGLOG(4, "LZ4_renormDictT"); + for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; + else LZ4_dict->hashTable[i] -= delta; + } + LZ4_dict->currentOffset = 64 KB; + if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; + LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; + } +} + + +int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, + const char* source, char* dest, + int inputSize, int maxOutputSize, + int acceleration) +{ + const tableType_t tableType = byU32; + LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse; + const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL; + + DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize); + + LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */ + if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; + if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + + /* invalidate tiny dictionaries */ + if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */ + && (dictEnd != source) /* prefix mode */ + && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */ + && (streamPtr->dictCtx == NULL) /* usingDictCtx */ + ) { + DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, (void*)streamPtr->dictionary); + /* remove dictionary existence from history, to employ faster prefix mode */ + streamPtr->dictSize = 0; + streamPtr->dictionary = (const BYTE*)source; + dictEnd = source; + } + + /* Check overlapping input/dictionary space */ + { const char* const sourceEnd = source + inputSize; + if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) { + streamPtr->dictSize = (U32)(dictEnd - sourceEnd); + if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; + if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; + streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize; + } + } + + /* prefix mode : source data follows dictionary */ + if (dictEnd == source) { + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration); + else + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration); + } + + /* external dictionary mode */ + { int result; + if (streamPtr->dictCtx) { + /* We depend here on the fact that dictCtx'es (produced by + * LZ4_loadDict) guarantee that their tables contain no references + * to offsets between dictCtx->currentOffset - 64 KB and + * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe + * to use noDictIssue even when the dict isn't a full 64 KB. + */ + if (inputSize > 4 KB) { + /* For compressing large blobs, it is faster to pay the setup + * cost to copy the dictionary's tables into the active context, + * so that the compression loop is only looking into one table. + */ + LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr)); + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration); + } + } else { /* small data <= 4 KB */ + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } + } + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + return result; + } +} + + +/* Hidden debug function, to force-test external dictionary mode */ +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) +{ + LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse; + int result; + + LZ4_renormDictT(streamPtr, srcSize); + + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + } + + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)srcSize; + + return result; +} + + +/*! LZ4_saveDict() : + * If previously compressed data block is not guaranteed to remain available at its memory location, + * save it into a safer place (char* safeBuffer). + * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable, + * one can therefore call LZ4_compress_fast_continue() right after. + * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + */ +int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) +{ + LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; + + DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, (void*)safeBuffer); + + if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */ + if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; } + + if (safeBuffer == NULL) assert(dictSize == 0); + if (dictSize > 0) { + const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; + assert(dict->dictionary); + LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize); + } + + dict->dictionary = (const BYTE*)safeBuffer; + dict->dictSize = (U32)dictSize; + + return dictSize; +} + + + +/*-******************************* + * Decompression functions + ********************************/ + +typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; + +#undef MIN +#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) + + +/* variant for decompress_unsafe() + * does not know end of input + * presumes input is well formed + * note : will consume at least one byte */ +static size_t read_long_length_no_check(const BYTE** pp) +{ + size_t b, l = 0; + do { b = **pp; (*pp)++; l += b; } while (b==255); + DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1) + return l; +} + +/* core decoder variant for LZ4_decompress_fast*() + * for legacy support only : these entry points are deprecated. + * - Presumes input is correctly formed (no defense vs malformed inputs) + * - Does not know input size (presume input buffer is "large enough") + * - Decompress a full block (only) + * @return : nb of bytes read from input. + * Note : this variant is not optimized for speed, just for maintenance. + * the goal is to remove support of decompress_fast*() variants by v2.0 +**/ +LZ4_FORCE_INLINE int +LZ4_decompress_unsafe_generic( + const BYTE* const istart, + BYTE* const ostart, + int decompressedSize, + + size_t prefixSize, + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note: =0 if dictStart==NULL */ + ) +{ + const BYTE* ip = istart; + BYTE* op = (BYTE*)ostart; + BYTE* const oend = ostart + decompressedSize; + const BYTE* const prefixStart = ostart - prefixSize; + + DEBUGLOG(5, "LZ4_decompress_unsafe_generic"); + if (dictStart == NULL) assert(dictSize == 0); + + while (1) { + /* start new sequence */ + unsigned token = *ip++; + + /* literals */ + { size_t ll = token >> ML_BITS; + if (ll==15) { + /* long literal length */ + ll += read_long_length_no_check(&ip); + } + if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */ + LZ4_memmove(op, ip, ll); /* support in-place decompression */ + op += ll; + ip += ll; + if ((size_t)(oend-op) < MFLIMIT) { + if (op==oend) break; /* end of block */ + DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op); + /* incorrect end of block : + * last match must start at least MFLIMIT==12 bytes before end of output block */ + return -1; + } } + + /* match */ + { size_t ml = token & 15; + size_t const offset = LZ4_readLE16(ip); + ip+=2; + + if (ml==15) { + /* long literal length */ + ml += read_long_length_no_check(&ip); + } + ml += MINMATCH; + + if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */ + + { const BYTE* match = op - offset; + + /* out of range */ + if (offset > (size_t)(op - prefixStart) + dictSize) { + DEBUGLOG(6, "offset out of range"); + return -1; + } + + /* check special case : extDict */ + if (offset > (size_t)(op - prefixStart)) { + /* extDict scenario */ + const BYTE* const dictEnd = dictStart + dictSize; + const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart)); + size_t const extml = (size_t)(dictEnd - extMatch); + if (extml > ml) { + /* match entirely within extDict */ + LZ4_memmove(op, extMatch, ml); + op += ml; + ml = 0; + } else { + /* match split between extDict & prefix */ + LZ4_memmove(op, extMatch, extml); + op += extml; + ml -= extml; + } + match = prefixStart; + } + + /* match copy - slow variant, supporting overlap copy */ + { size_t u; + for (u=0; u= ipmax before start of loop. Returns initial_error if so. + * @error (output) - error code. Must be set to 0 before call. +**/ +typedef size_t Rvl_t; +static const Rvl_t rvl_error = (Rvl_t)(-1); +LZ4_FORCE_INLINE Rvl_t +read_variable_length(const BYTE** ip, const BYTE* ilimit, + int initial_check) +{ + Rvl_t s, length = 0; + assert(ip != NULL); + assert(*ip != NULL); + assert(ilimit != NULL); + if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */ + return rvl_error; + } + s = **ip; + (*ip)++; + length += s; + if (unlikely((*ip) > ilimit)) { /* read limit reached */ + return rvl_error; + } + /* accumulator overflow detection (32-bit mode only) */ + if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { + return rvl_error; + } + if (likely(s != 255)) return length; + do { + s = **ip; + (*ip)++; + length += s; + if (unlikely((*ip) > ilimit)) { /* read limit reached */ + return rvl_error; + } + /* accumulator overflow detection (32-bit mode only) */ + if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { + return rvl_error; + } + } while (s == 255); + + return length; +} + +/*! LZ4_decompress_generic() : + * This generic decompression function covers all use cases. + * It shall be instantiated several times, using different sets of directives. + * Note that it is important for performance that this function really get inlined, + * in order to remove useless branches during compilation optimization. + */ +LZ4_FORCE_INLINE int +LZ4_decompress_generic( + const char* const src, + char* const dst, + int srcSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */ + + earlyEnd_directive partialDecoding, /* full, partial */ + dict_directive dict, /* noDict, withPrefix64k, usingExtDict */ + const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */ + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note : = 0 if noDict */ + ) +{ + if ((src == NULL) || (outputSize < 0)) { return -1; } + + { const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + + BYTE* op = (BYTE*) dst; + BYTE* const oend = op + outputSize; + BYTE* cpy; + + const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize; + + const int checkOffset = (dictSize < (int)(64 KB)); + + + /* Set up the "end" pointers for the shortcut. */ + const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/; + const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/; + + const BYTE* match; + size_t offset; + unsigned token; + size_t length; + + + DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize); + + /* Special cases */ + assert(lowPrefix <= op); + if (unlikely(outputSize==0)) { + /* Empty output buffer */ + if (partialDecoding) return 0; + return ((srcSize==1) && (*ip==0)) ? 0 : -1; + } + if (unlikely(srcSize==0)) { return -1; } + + /* LZ4_FAST_DEC_LOOP: + * designed for modern OoO performance cpus, + * where copying reliably 32-bytes is preferable to an unpredictable branch. + * note : fast loop may show a regression for some client arm chips. */ +#if LZ4_FAST_DEC_LOOP + if ((oend - op) < FASTLOOP_SAFE_DISTANCE) { + DEBUGLOG(6, "move to safe decode loop"); + goto safe_decode; + } + + /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */ + DEBUGLOG(6, "using fast decode loop"); + while (1) { + /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */ + assert(oend - op >= FASTLOOP_SAFE_DISTANCE); + assert(ip < iend); + token = *ip++; + length = token >> ML_BITS; /* literal length */ + DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); + + /* decode literal length */ + if (length == RUN_MASK) { + size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); + if (addl == rvl_error) { + DEBUGLOG(6, "error reading long literal length"); + goto _output_error; + } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ + if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ + + /* copy literals */ + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } + LZ4_wildCopy32(op, ip, op+length); + ip += length; op += length; + } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) { + /* We don't need to check oend, since we check it once for each loop below */ + DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); + /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */ + LZ4_memcpy(op, ip, 16); + ip += length; op += length; + } else { + goto safe_literal_copy; + } + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + DEBUGLOG(6, "blockPos%6u: offset = %u", (unsigned)(op-(BYTE*)dst), (unsigned)offset); + match = op - offset; + assert(match <= op); /* overflow check */ + + /* get matchlength */ + length = token & ML_MASK; + DEBUGLOG(7, " match length token = %u (len==%u)", (unsigned)length, (unsigned)length+MINMATCH); + + if (length == ML_MASK) { + size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); + if (addl == rvl_error) { + DEBUGLOG(5, "error reading long match length"); + goto _output_error; + } + length += addl; + length += MINMATCH; + DEBUGLOG(7, " long match length == %u", (unsigned)length); + if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */ + if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + goto safe_match_copy; + } + } else { + length += MINMATCH; + if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + DEBUGLOG(7, "moving to safe_match_copy (ml==%u)", (unsigned)length); + goto safe_match_copy; + } + + /* Fastpath check: skip LZ4_wildCopy32 when true */ + if ((dict == withPrefix64k) || (match >= lowPrefix)) { + if (offset >= 8) { + assert(match >= lowPrefix); + assert(match <= op); + assert(op + 18 <= oend); + + LZ4_memcpy(op, match, 8); + LZ4_memcpy(op+8, match+8, 8); + LZ4_memcpy(op+16, match+16, 2); + op += length; + continue; + } } } + + if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) { + DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match); + goto _output_error; + } + /* match starting within external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + assert(dictEnd != NULL); + if (unlikely(op+length > oend-LASTLITERALS)) { + if (partialDecoding) { + DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd"); + length = MIN(length, (size_t)(oend-op)); + } else { + DEBUGLOG(6, "end-of-block condition violated") + goto _output_error; + } } + + if (length <= (size_t)(lowPrefix-match)) { + /* match fits entirely within external dictionary : just copy */ + LZ4_memmove(op, dictEnd - (lowPrefix-match), length); + op += length; + } else { + /* match stretches into both external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + LZ4_memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) { *op++ = *copyFrom++; } + } else { + LZ4_memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + + /* copy match within block */ + cpy = op + length; + + assert((op <= oend) && (oend-op >= 32)); + if (unlikely(offset<16)) { + LZ4_memcpy_using_offset(op, match, cpy, offset); + } else { + LZ4_wildCopy32(op, match, cpy); + } + + op = cpy; /* wildcopy correction */ + } + safe_decode: +#endif + + /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */ + DEBUGLOG(6, "using safe decode loop"); + while (1) { + assert(ip < iend); + token = *ip++; + length = token >> ML_BITS; /* literal length */ + DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); + + /* A two-stage shortcut for the most common case: + * 1) If the literal length is 0..14, and there is enough space, + * enter the shortcut and copy 16 bytes on behalf of the literals + * (in the fast mode, only 8 bytes can be safely copied this way). + * 2) Further if the match length is 4..18, copy 18 bytes in a similar + * manner; but we ensure that there's enough space in the output for + * those 18 bytes earlier, upon entering the shortcut (in other words, + * there is a combined check for both stages). + */ + if ( (length != RUN_MASK) + /* strictly "less than" on input, to re-enter the loop with at least one byte */ + && likely((ip < shortiend) & (op <= shortoend)) ) { + /* Copy the literals */ + LZ4_memcpy(op, ip, 16); + op += length; ip += length; + + /* The second stage: prepare for match copying, decode full info. + * If it doesn't work out, the info won't be wasted. */ + length = token & ML_MASK; /* match length */ + DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4); + offset = LZ4_readLE16(ip); ip += 2; + match = op - offset; + assert(match <= op); /* check overflow */ + + /* Do not deal with overlapping matches. */ + if ( (length != ML_MASK) + && (offset >= 8) + && (dict==withPrefix64k || match >= lowPrefix) ) { + /* Copy the match. */ + LZ4_memcpy(op + 0, match + 0, 8); + LZ4_memcpy(op + 8, match + 8, 8); + LZ4_memcpy(op +16, match +16, 2); + op += length + MINMATCH; + /* Both stages worked, load the next token. */ + continue; + } + + /* The second stage didn't work out, but the info is ready. + * Propel it right to the point of match copying. */ + goto _copy_match; + } + + /* decode literal length */ + if (length == RUN_MASK) { + size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); + if (addl == rvl_error) { goto _output_error; } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ + if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ + } + +#if LZ4_FAST_DEC_LOOP + safe_literal_copy: +#endif + /* copy literals */ + cpy = op+length; + + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) { + /* We've either hit the input parsing restriction or the output parsing restriction. + * In the normal scenario, decoding a full block, it must be the last sequence, + * otherwise it's an error (invalid input or dimensions). + * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow. + */ + if (partialDecoding) { + /* Since we are partial decoding we may be in this block because of the output parsing + * restriction, which is not valid since the output buffer is allowed to be undersized. + */ + DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end") + DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length); + DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op)); + DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip)); + /* Finishing in the middle of a literals segment, + * due to lack of input. + */ + if (ip+length > iend) { + length = (size_t)(iend-ip); + cpy = op + length; + } + /* Finishing in the middle of a literals segment, + * due to lack of output space. + */ + if (cpy > oend) { + cpy = oend; + assert(op<=oend); + length = (size_t)(oend-op); + } + } else { + /* We must be on the last sequence (or invalid) because of the parsing limitations + * so check that we exactly consume the input and don't overrun the output buffer. + */ + if ((ip+length != iend) || (cpy > oend)) { + DEBUGLOG(5, "should have been last run of literals") + DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", (void*)ip, (int)length, (void*)(ip+length), (void*)iend); + DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", (void*)cpy, (void*)(oend-MFLIMIT)); + DEBUGLOG(5, "after writing %u bytes / %i bytes available", (unsigned)(op-(BYTE*)dst), outputSize); + goto _output_error; + } + } + LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */ + ip += length; + op += length; + /* Necessarily EOF when !partialDecoding. + * When partialDecoding, it is EOF if we've either + * filled the output buffer or + * can't proceed with reading an offset for following match. + */ + if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) { + break; + } + } else { + LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */ + ip += length; op = cpy; + } + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + match = op - offset; + + /* get matchlength */ + length = token & ML_MASK; + DEBUGLOG(7, "blockPos%6u: matchLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); + + _copy_match: + if (length == ML_MASK) { + size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); + if (addl == rvl_error) { goto _output_error; } + length += addl; + if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ + } + length += MINMATCH; + +#if LZ4_FAST_DEC_LOOP + safe_match_copy: +#endif + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ + /* match starting within external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + assert(dictEnd != NULL); + if (unlikely(op+length > oend-LASTLITERALS)) { + if (partialDecoding) length = MIN(length, (size_t)(oend-op)); + else goto _output_error; /* doesn't respect parsing restriction */ + } + + if (length <= (size_t)(lowPrefix-match)) { + /* match fits entirely within external dictionary : just copy */ + LZ4_memmove(op, dictEnd - (lowPrefix-match), length); + op += length; + } else { + /* match stretches into both external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + LZ4_memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) *op++ = *copyFrom++; + } else { + LZ4_memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + assert(match >= lowPrefix); + + /* copy match within block */ + cpy = op + length; + + /* partialDecoding : may end anywhere within the block */ + assert(op<=oend); + if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { + size_t const mlen = MIN(length, (size_t)(oend-op)); + const BYTE* const matchEnd = match + mlen; + BYTE* const copyEnd = op + mlen; + if (matchEnd > op) { /* overlap copy */ + while (op < copyEnd) { *op++ = *match++; } + } else { + LZ4_memcpy(op, match, mlen); + } + op = copyEnd; + if (op == oend) { break; } + continue; + } + + if (unlikely(offset<8)) { + LZ4_write32(op, 0); /* silence msan warning when offset==0 */ + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += inc32table[offset]; + LZ4_memcpy(op+4, match, 4); + match -= dec64table[offset]; + } else { + LZ4_memcpy(op, match, 8); + match += 8; + } + op += 8; + + if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { + BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1); + if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ + if (op < oCopyLimit) { + LZ4_wildCopy8(op, match, oCopyLimit); + match += oCopyLimit - op; + op = oCopyLimit; + } + while (op < cpy) { *op++ = *match++; } + } else { + LZ4_memcpy(op, match, 8); + if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } + } + op = cpy; /* wildcopy correction */ + } + + /* end of decoding */ + DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst)); + return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ + + /* Overflow error detected */ + _output_error: + return (int) (-(((const char*)ip)-src))-1; + } +} + + +/*===== Instantiate the API decoding functions. =====*/ + +LZ4_FORCE_O2 +int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, + decode_full_block, noDict, + (BYTE*)dest, NULL, 0); +} + +LZ4_FORCE_O2 +int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, + partial_decode, + noDict, (BYTE*)dst, NULL, 0); +} + +LZ4_FORCE_O2 +int LZ4_decompress_fast(const char* source, char* dest, int originalSize) +{ + DEBUGLOG(5, "LZ4_decompress_fast"); + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 0, NULL, 0); +} + +/*===== Instantiate a few more decoding cases, used more than once. =====*/ + +LZ4_FORCE_O2 /* Exported, an obsolete API function. */ +int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} + +/* Another obsolete API function, paired with the previous one. */ +int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +{ + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 64 KB, NULL, 0); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, noDict, + (BYTE*)dest-prefixSize, NULL, 0); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, + size_t prefixSize) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, noDict, + (BYTE*)dest-prefixSize, NULL, 0); +} + +LZ4_FORCE_O2 +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize) +{ + DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict"); + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_O2 +int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, + int compressedSize, int targetOutputSize, int dstCapacity, + const void* dictStart, size_t dictSize) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, + partial_decode, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_O2 +static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize, + const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + 0, (const BYTE*)dictStart, dictSize); +} + +/* The "double dictionary" mode, for use with e.g. ring buffers: the first part + * of the dictionary is passed as prefix, and the second via dictStart + dictSize. + * These routines are used only once, in LZ4_decompress_*_continue(). + */ +LZ4_FORCE_INLINE +int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize, const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + decode_full_block, usingExtDict, + (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); +} + +/*===== streaming decompression functions =====*/ + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4_streamDecode_t* LZ4_createStreamDecode(void) +{ + LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal)); + return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t)); +} + +int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) +{ + if (LZ4_stream == NULL) { return 0; } /* support free on NULL */ + FREEMEM(LZ4_stream); + return 0; +} +#endif + +/*! LZ4_setStreamDecode() : + * Use this function to instruct where to find the dictionary. + * This function is not necessary if previous data is still available where it was decoded. + * Loading a size of 0 is allowed (same effect as no dictionary). + * @return : 1 if OK, 0 if error + */ +int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + lz4sd->prefixSize = (size_t)dictSize; + if (dictSize) { + assert(dictionary != NULL); + lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; + } else { + lz4sd->prefixEnd = (const BYTE*) dictionary; + } + lz4sd->externalDict = NULL; + lz4sd->extDictSize = 0; + return 1; +} + +/*! LZ4_decoderRingBufferSize() : + * when setting a ring buffer for streaming decompression (optional scenario), + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * Note : in a ring buffer scenario, + * blocks are presumed decompressed next to each other. + * When not enough space remains for next block (remainingSize < maxBlockSize), + * decoding resumes from beginning of ring buffer. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +int LZ4_decoderRingBufferSize(int maxBlockSize) +{ + if (maxBlockSize < 0) return 0; + if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0; + if (maxBlockSize < 16) maxBlockSize = 16; + return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize); +} + +/* +*_continue() : + These decoding functions allow decompression of multiple blocks in "streaming" mode. + Previously decoded blocks must still be available at the memory position where they were decoded. + If it's not possible, save the relevant part of decoded data into a safe buffer, + and indicate where it stands using LZ4_setStreamDecode() +*/ +LZ4_FORCE_O2 +int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixSize == 0) { + /* The first call, no dictionary yet. */ + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + /* They're rolling the current segment. */ + if (lz4sd->prefixSize >= 64 KB - 1) + result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + else if (lz4sd->extDictSize == 0) + result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize); + else + result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += (size_t)result; + lz4sd->prefixEnd += result; + } else { + /* The buffer wraps around, or they're switching to another buffer. */ + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } + + return result; +} + +LZ4_FORCE_O2 int +LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, + const char* source, char* dest, int originalSize) +{ + LZ4_streamDecode_t_internal* const lz4sd = + (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse); + int result; + + DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize); + assert(originalSize >= 0); + + if (lz4sd->prefixSize == 0) { + DEBUGLOG(5, "first invocation : no prefix nor extDict"); + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_fast(source, dest, originalSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + DEBUGLOG(5, "continue using existing prefix"); + result = LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += (size_t)originalSize; + lz4sd->prefixEnd += originalSize; + } else { + DEBUGLOG(5, "prefix becomes extDict"); + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_fast_extDict(source, dest, originalSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } + + return result; +} + + +/* +Advanced decoding functions : +*_usingDict() : + These decoding functions work the same as "_continue" ones, + the dictionary must be explicitly provided within parameters +*/ + +int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) +{ + if (dictSize==0) + return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); + if (dictStart+dictSize == dest) { + if (dictSize >= 64 KB - 1) { + return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize); +} + +int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize) +{ + if (dictSize==0) + return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity); + if (dictStart+dictSize == dest) { + if (dictSize >= 64 KB - 1) { + return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize); +} + +int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) +{ + if (dictSize==0 || dictStart+dictSize == dest) + return LZ4_decompress_unsafe_generic( + (const BYTE*)source, (BYTE*)dest, originalSize, + (size_t)dictSize, NULL, 0); + assert(dictSize >= 0); + return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize); +} + + +/*=************************************************* +* Obsolete Functions +***************************************************/ +/* obsolete compression functions */ +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) +{ + return LZ4_compress_default(source, dest, inputSize, maxOutputSize); +} +int LZ4_compress(const char* src, char* dest, int srcSize) +{ + return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize)); +} +int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); +} +int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); +} +int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity) +{ + return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1); +} +int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) +{ + return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); +} + +/* +These decompression functions are deprecated and should no longer be used. +They are only provided here for compatibility with older user programs. +- LZ4_uncompress is totally equivalent to LZ4_decompress_fast +- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe +*/ +int LZ4_uncompress (const char* source, char* dest, int outputSize) +{ + return LZ4_decompress_fast(source, dest, outputSize); +} +int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) +{ + return LZ4_decompress_safe(source, dest, isize, maxOutputSize); +} + +/* Obsolete Streaming functions */ + +int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); } + +int LZ4_resetStreamState(void* state, char* inputBuffer) +{ + (void)inputBuffer; + LZ4_resetStream((LZ4_stream_t*)state); + return 0; +} + +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +void* LZ4_create (char* inputBuffer) +{ + (void)inputBuffer; + return LZ4_createStream(); +} +#endif + +char* LZ4_slideInputBuffer (void* state) +{ + /* avoid const char * -> char * conversion warning */ + return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; +} + +#endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/hbase-compression/hbase-compression-lz4/src/native/lz4.h b/hbase-compression/hbase-compression-lz4/src/native/lz4.h new file mode 100644 index 000000000000..eb84831e0a00 --- /dev/null +++ b/hbase-compression/hbase-compression-lz4/src/native/lz4.h @@ -0,0 +1,886 @@ +/* + * LZ4 - Fast LZ compression algorithm + * Header File + * Copyright (c) Yann Collet. All rights reserved. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef LZ4_H_2983827168210 +#define LZ4_H_2983827168210 + +/* --- Dependency --- */ +#include /* size_t */ + + +/** + Introduction + + LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core, + scalable with multi-cores CPU. It features an extremely fast decoder, with speed in + multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. + + The LZ4 compression library provides in-memory compression and decompression functions. + It gives full buffer control to user. + Compression can be done in: + - a single step (described as Simple Functions) + - a single step, reusing a context (described in Advanced Functions) + - unbounded multiple steps (described as Streaming compression) + + lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md). + Decompressing such a compressed block requires additional metadata. + Exact metadata depends on exact decompression function. + For the typical case of LZ4_decompress_safe(), + metadata includes block's compressed size, and maximum bound of decompressed size. + Each application is free to encode and pass such metadata in whichever way it wants. + + lz4.h only handle blocks, it can not generate Frames. + + Blocks are different from Frames (doc/lz4_Frame_format.md). + Frames bundle both blocks and metadata in a specified manner. + Embedding metadata is required for compressed data to be self-contained and portable. + Frame format is delivered through a companion API, declared in lz4frame.h. + The `lz4` CLI can only manage frames. +*/ + +/*^*************************************************************** +* Export parameters +*****************************************************************/ +/* +* LZ4_DLL_EXPORT : +* Enable exporting of functions when building a Windows DLL +* LZ4LIB_VISIBILITY : +* Control library symbols visibility. +*/ +#ifndef LZ4LIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define LZ4LIB_VISIBILITY +# endif +#endif +#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) +# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY +#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) +# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define LZ4LIB_API LZ4LIB_VISIBILITY +#endif + +/*! LZ4_FREESTANDING : + * When this macro is set to 1, it enables "freestanding mode" that is + * suitable for typical freestanding environment which doesn't support + * standard C library. + * + * - LZ4_FREESTANDING is a compile-time switch. + * - It requires the following macros to be defined: + * LZ4_memcpy, LZ4_memmove, LZ4_memset. + * - It only enables LZ4/HC functions which don't use heap. + * All LZ4F_* functions are not supported. + * - See tests/freestanding.c to check its basic setup. + */ +#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1) +# define LZ4_HEAPMODE 0 +# define LZ4HC_HEAPMODE 0 +# define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1 +# if !defined(LZ4_memcpy) +# error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'." +# endif +# if !defined(LZ4_memset) +# error "LZ4_FREESTANDING requires macro 'LZ4_memset'." +# endif +# if !defined(LZ4_memmove) +# error "LZ4_FREESTANDING requires macro 'LZ4_memmove'." +# endif +#elif ! defined(LZ4_FREESTANDING) +# define LZ4_FREESTANDING 0 +#endif + + +/*------ Version ------*/ +#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ +#define LZ4_VERSION_MINOR 10 /* for new (non-breaking) interface capabilities */ +#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */ + +#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) + +#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE +#define LZ4_QUOTE(str) #str +#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) +#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */ + +LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */ +LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */ + + +/*-************************************ +* Tuning memory usage +**************************************/ +/*! + * LZ4_MEMORY_USAGE : + * Can be selected at compile time, by setting LZ4_MEMORY_USAGE. + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB) + * Increasing memory usage improves compression ratio, generally at the cost of speed. + * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality. + * Default value is 14, for 16KB, which nicely fits into most L1 caches. + */ +#ifndef LZ4_MEMORY_USAGE +# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT +#endif + +/* These are absolute limits, they should not be changed by users */ +#define LZ4_MEMORY_USAGE_MIN 10 +#define LZ4_MEMORY_USAGE_DEFAULT 14 +#define LZ4_MEMORY_USAGE_MAX 20 + +#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN) +# error "LZ4_MEMORY_USAGE is too small !" +#endif + +#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX) +# error "LZ4_MEMORY_USAGE is too large !" +#endif + +/*-************************************ +* Simple Functions +**************************************/ +/*! LZ4_compress_default() : + * Compresses 'srcSize' bytes from buffer 'src' + * into already allocated 'dst' buffer of size 'dstCapacity'. + * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). + * It also runs faster, so it's a recommended setting. + * If the function cannot compress 'src' into a more limited 'dst' budget, + * compression stops *immediately*, and the function result is zero. + * In which case, 'dst' content is undefined (invalid). + * srcSize : max supported value is LZ4_MAX_INPUT_SIZE. + * dstCapacity : size of buffer 'dst' (which must be already allocated) + * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) + * or 0 if compression fails + * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). + */ +LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); + +/*! LZ4_decompress_safe() : + * @compressedSize : is the exact complete size of the compressed block. + * @dstCapacity : is the size of destination buffer (which must be already allocated), + * presumed an upper bound of decompressed size. + * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) + * If destination buffer is not large enough, decoding will stop and output an error code (negative value). + * If the source stream is detected malformed, the function will stop decoding and return a negative result. + * Note 1 : This function is protected against malicious data packets : + * it will never writes outside 'dst' buffer, nor read outside 'source' buffer, + * even if the compressed block is maliciously modified to order the decoder to do these actions. + * In such case, the decoder stops immediately, and considers the compressed block malformed. + * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. + * The implementation is free to send / store / derive this information in whichever way is most beneficial. + * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. + */ +LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); + + +/*-************************************ +* Advanced Functions +**************************************/ +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) + +/*! LZ4_compressBound() : + Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) + This function is primarily useful for memory allocation purposes (destination buffer size). + Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). + Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) + inputSize : max supported value is LZ4_MAX_INPUT_SIZE + return : maximum output size in a "worst case" scenario + or 0, if input size is incorrect (too large or negative) +*/ +LZ4LIB_API int LZ4_compressBound(int inputSize); + +/*! LZ4_compress_fast() : + Same as LZ4_compress_default(), but allows selection of "acceleration" factor. + The larger the acceleration value, the faster the algorithm, but also the lesser the compression. + It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. + An acceleration value of "1" is the same as regular LZ4_compress_default() + Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c). + Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c). +*/ +LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + + +/*! LZ4_compress_fast_extState() : + * Same as LZ4_compress_fast(), using an externally allocated memory space for its state. + * Use LZ4_sizeofState() to know how much memory must be allocated, + * and allocate it on 8-bytes boundaries (using `malloc()` typically). + * Then, provide this buffer as `void* state` to compression function. + */ +LZ4LIB_API int LZ4_sizeofState(void); +LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_compress_destSize() : + * Reverse the logic : compresses as much data as possible from 'src' buffer + * into already allocated buffer 'dst', of size >= 'dstCapacity'. + * This function either compresses the entire 'src' content into 'dst' if it's large enough, + * or fill 'dst' buffer completely with as much data as possible from 'src'. + * note: acceleration parameter is fixed to "default". + * + * *srcSizePtr : in+out parameter. Initially contains size of input. + * Will be modified to indicate how many bytes where read from 'src' to fill 'dst'. + * New value is necessarily <= input value. + * @return : Nb bytes written into 'dst' (necessarily <= dstCapacity) + * or 0 if compression fails. + * + * Note : 'targetDstSize' must be >= 1, because it's the smallest valid lz4 payload. + * + * Note 2:from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+): + * the produced compressed content could, in rare circumstances, + * require to be decompressed into a destination buffer + * larger by at least 1 byte than decompressesSize. + * If an application uses `LZ4_compress_destSize()`, + * it's highly recommended to update liblz4 to v1.9.2 or better. + * If this can't be done or ensured, + * the receiving decompression function should provide + * a dstCapacity which is > decompressedSize, by at least 1 byte. + * See https://github.com/lz4/lz4/issues/859 for details + */ +LZ4LIB_API int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize); + +/*! LZ4_decompress_safe_partial() : + * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', + * into destination buffer 'dst' of size 'dstCapacity'. + * Up to 'targetOutputSize' bytes will be decoded. + * The function stops decoding on reaching this objective. + * This can be useful to boost performance + * whenever only the beginning of a block is required. + * + * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize) + * If source stream is detected malformed, function returns a negative result. + * + * Note 1 : @return can be < targetOutputSize, if compressed block contains less data. + * + * Note 2 : targetOutputSize must be <= dstCapacity + * + * Note 3 : this function effectively stops decoding on reaching targetOutputSize, + * so dstCapacity is kind of redundant. + * This is because in older versions of this function, + * decoding operation would still write complete sequences. + * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize, + * it could write more bytes, though only up to dstCapacity. + * Some "margin" used to be required for this operation to work properly. + * Thankfully, this is no longer necessary. + * The function nonetheless keeps the same signature, in an effort to preserve API compatibility. + * + * Note 4 : If srcSize is the exact size of the block, + * then targetOutputSize can be any value, + * including larger than the block's decompressed size. + * The function will, at most, generate block's decompressed size. + * + * Note 5 : If srcSize is _larger_ than block's compressed size, + * then targetOutputSize **MUST** be <= block's decompressed size. + * Otherwise, *silent corruption will occur*. + */ +LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); + + +/*-********************************************* +* Streaming Compression Functions +***********************************************/ +typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ + +/*! + Note about RC_INVOKED + + - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio). + https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros + + - Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars) + and reports warning "RC4011: identifier truncated". + + - To eliminate the warning, we surround long preprocessor symbol with + "#if !defined(RC_INVOKED) ... #endif" block that means + "skip this block when rc.exe is trying to read it". +*/ +#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); +LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); +#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ +#endif + +/*! LZ4_resetStream_fast() : v1.9.0+ + * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks + * (e.g., LZ4_compress_fast_continue()). + * + * An LZ4_stream_t must be initialized once before usage. + * This is automatically done when created by LZ4_createStream(). + * However, should the LZ4_stream_t be simply declared on stack (for example), + * it's necessary to initialize it first, using LZ4_initStream(). + * + * After init, start any new stream with LZ4_resetStream_fast(). + * A same LZ4_stream_t can be re-used multiple times consecutively + * and compress multiple streams, + * provided that it starts each new stream with LZ4_resetStream_fast(). + * + * LZ4_resetStream_fast() is much faster than LZ4_initStream(), + * but is not compatible with memory regions containing garbage data. + * + * Note: it's only useful to call LZ4_resetStream_fast() + * in the context of streaming compression. + * The *extState* functions perform their own resets. + * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. + */ +LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); + +/*! LZ4_loadDict() : + * Use this function to reference a static dictionary into LZ4_stream_t. + * The dictionary must remain available during compression. + * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. + * The same dictionary will have to be loaded on decompression side for successful decoding. + * Dictionary are useful for better compression of small data (KB range). + * While LZ4 itself accepts any input as dictionary, dictionary efficiency is also a topic. + * When in doubt, employ the Zstandard's Dictionary Builder. + * Loading a size of 0 is allowed, and is the same as reset. + * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded) + */ +LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); + +/*! LZ4_loadDictSlow() : v1.10.0+ + * Same as LZ4_loadDict(), + * but uses a bit more cpu to reference the dictionary content more thoroughly. + * This is expected to slightly improve compression ratio. + * The extra-cpu cost is likely worth it if the dictionary is re-used across multiple sessions. + * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded) + */ +LZ4LIB_API int LZ4_loadDictSlow(LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); + +/*! LZ4_attach_dictionary() : stable since v1.10.0 + * + * This allows efficient re-use of a static dictionary multiple times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a + * working LZ4_stream_t, this function introduces a no-copy setup mechanism, + * in which the working stream references @dictionaryStream in-place. + * + * Several assumptions are made about the state of @dictionaryStream. + * Currently, only states which have been prepared by LZ4_loadDict() or + * LZ4_loadDictSlow() should be expected to work. + * + * Alternatively, the provided @dictionaryStream may be NULL, + * in which case any existing dictionary stream is unset. + * + * If a dictionary is provided, it replaces any pre-existing stream history. + * The dictionary contents are the only history that can be referenced and + * logically immediately precede the data compressed in the first subsequent + * compression call. + * + * The dictionary will only remain attached to the working stream through the + * first compression call, at the end of which it is cleared. + * @dictionaryStream stream (and source buffer) must remain in-place / accessible / unchanged + * through the completion of the compression session. + * + * Note: there is no equivalent LZ4_attach_*() method on the decompression side + * because there is no initialization cost, hence no need to share the cost across multiple sessions. + * To decompress LZ4 blocks using dictionary, attached or not, + * just employ the regular LZ4_setStreamDecode() for streaming, + * or the stateless LZ4_decompress_safe_usingDict() for one-shot decompression. + */ +LZ4LIB_API void +LZ4_attach_dictionary(LZ4_stream_t* workingStream, + const LZ4_stream_t* dictionaryStream); + +/*! LZ4_compress_fast_continue() : + * Compress 'src' content using data from previously compressed blocks, for better compression ratio. + * 'dst' buffer must be already allocated. + * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. + * + * @return : size of compressed block + * or 0 if there is an error (typically, cannot fit into 'dst'). + * + * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. + * Each block has precise boundaries. + * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. + * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. + * + * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! + * + * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. + * Make sure that buffers are separated, by at least one byte. + * This construction ensures that each block only depends on previous block. + * + * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. + * + * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. + */ +LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_saveDict() : + * If last 64KB data cannot be guaranteed to remain available at its current memory location, + * save it into a safer place (char* safeBuffer). + * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), + * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. + * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. + */ +LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); + + +/*-********************************************** +* Streaming Decompression Functions +* Bufferless synchronous API +************************************************/ +typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ + +/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : + * creation / destruction of streaming decompression tracking context. + * A tracking context can be re-used multiple times. + */ +#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ +#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) +LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); +LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); +#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ +#endif + +/*! LZ4_setStreamDecode() : + * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. + * Use this function to start decompression of a new stream of blocks. + * A dictionary can optionally be set. Use NULL or size 0 for a reset order. + * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. + * @return : 1 if OK, 0 if error + */ +LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); + +/*! LZ4_decoderRingBufferSize() : v1.8.2+ + * Note : in a ring buffer scenario (optional), + * blocks are presumed decompressed next to each other + * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), + * at which stage it resumes from beginning of ring buffer. + * When setting such a ring buffer for streaming decompression, + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); +#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ + +/*! LZ4_decompress_safe_continue() : + * This decoding function allows decompression of consecutive blocks in "streaming" mode. + * The difference with the usual independent blocks is that + * new blocks are allowed to find references into former blocks. + * A block is an unsplittable entity, and must be presented entirely to the decompression function. + * LZ4_decompress_safe_continue() only accepts one block at a time. + * It's modeled after `LZ4_decompress_safe()` and behaves similarly. + * + * @LZ4_streamDecode : decompression state, tracking the position in memory of past data + * @compressedSize : exact complete size of one compressed block. + * @dstCapacity : size of destination buffer (which must be already allocated), + * must be an upper bound of decompressed size. + * @return : number of bytes decompressed into destination buffer (necessarily <= dstCapacity) + * If destination buffer is not large enough, decoding will stop and output an error code (negative value). + * If the source stream is detected malformed, the function will stop decoding and return a negative result. + * + * The last 64KB of previously decoded data *must* remain available and unmodified + * at the memory position where they were previously decoded. + * If less than 64KB of data has been decoded, all the data must be present. + * + * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : + * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). + * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. + * In which case, encoding and decoding buffers do not need to be synchronized. + * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. + * - Synchronized mode : + * Decompression buffer size is _exactly_ the same as compression buffer size, + * and follows exactly same update rule (block boundaries at same positions), + * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), + * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). + * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * In which case, encoding and decoding buffers do not need to be synchronized, + * and encoding ring buffer can have any size, including small ones ( < 64 KB). + * + * Whenever these conditions are not possible, + * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, + * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. +*/ +LZ4LIB_API int +LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, + const char* src, char* dst, + int srcSize, int dstCapacity); + + +/*! LZ4_decompress_safe_usingDict() : + * Works the same as + * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_safe_continue() + * However, it's stateless: it doesn't need any LZ4_streamDecode_t state. + * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. + * Performance tip : Decompression speed can be substantially increased + * when dst == dictStart + dictSize. + */ +LZ4LIB_API int +LZ4_decompress_safe_usingDict(const char* src, char* dst, + int srcSize, int dstCapacity, + const char* dictStart, int dictSize); + +/*! LZ4_decompress_safe_partial_usingDict() : + * Behaves the same as LZ4_decompress_safe_partial() + * with the added ability to specify a memory segment for past data. + * Performance tip : Decompression speed can be substantially increased + * when dst == dictStart + dictSize. + */ +LZ4LIB_API int +LZ4_decompress_safe_partial_usingDict(const char* src, char* dst, + int compressedSize, + int targetOutputSize, int maxOutputSize, + const char* dictStart, int dictSize); + +#endif /* LZ4_H_2983827168210 */ + + +/*^************************************* + * !!!!!! STATIC LINKING ONLY !!!!!! + ***************************************/ + +/*-**************************************************************************** + * Experimental section + * + * Symbols declared in this section must be considered unstable. Their + * signatures or semantics may change, or they may be removed altogether in the + * future. They are therefore only safe to depend on when the caller is + * statically linked against the library. + * + * To protect against unsafe usage, not only are the declarations guarded, + * the definitions are hidden by default + * when building LZ4 as a shared/dynamic library. + * + * In order to access these declarations, + * define LZ4_STATIC_LINKING_ONLY in your application + * before including LZ4's headers. + * + * In order to make their implementations accessible dynamically, you must + * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library. + ******************************************************************************/ + +#ifdef LZ4_STATIC_LINKING_ONLY + +#ifndef LZ4_STATIC_3504398509 +#define LZ4_STATIC_3504398509 + +#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS +# define LZ4LIB_STATIC_API LZ4LIB_API +#else +# define LZ4LIB_STATIC_API +#endif + + +/*! LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. + * It is only safe to call if the state buffer is known to be correctly initialized already + * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). + * From a high level, the difference is that + * this function initializes the provided state with a call to something like LZ4_resetStream_fast() + * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). + */ +LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_compress_destSize_extState() : introduced in v1.10.0 + * Same as LZ4_compress_destSize(), but using an externally allocated state. + * Also: exposes @acceleration + */ +int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration); + +/*! In-place compression and decompression + * + * It's possible to have input and output sharing the same buffer, + * for highly constrained memory environments. + * In both cases, it requires input to lay at the end of the buffer, + * and decompression to start at beginning of the buffer. + * Buffer size must feature some margin, hence be larger than final size. + * + * |<------------------------buffer--------------------------------->| + * |<-----------compressed data--------->| + * |<-----------decompressed size------------------>| + * |<----margin---->| + * + * This technique is more useful for decompression, + * since decompressed size is typically larger, + * and margin is short. + * + * In-place decompression will work inside any buffer + * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). + * This presumes that decompressedSize > compressedSize. + * Otherwise, it means compression actually expanded data, + * and it would be more efficient to store such data with a flag indicating it's not compressed. + * This can happen when data is not compressible (already compressed, or encrypted). + * + * For in-place compression, margin is larger, as it must be able to cope with both + * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, + * and data expansion, which can happen when input is not compressible. + * As a consequence, buffer size requirements are much higher, + * and memory savings offered by in-place compression are more limited. + * + * There are ways to limit this cost for compression : + * - Reduce history size, by modifying LZ4_DISTANCE_MAX. + * Note that it is a compile-time constant, so all compressions will apply this limit. + * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, + * so it's a reasonable trick when inputs are known to be small. + * - Require the compressor to deliver a "maximum compressed size". + * This is the `dstCapacity` parameter in `LZ4_compress*()`. + * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, + * in which case, the return code will be 0 (zero). + * The caller must be ready for these cases to happen, + * and typically design a backup scheme to send data uncompressed. + * The combination of both techniques can significantly reduce + * the amount of margin required for in-place compression. + * + * In-place compression can work in any buffer + * which size is >= (maxCompressedSize) + * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. + * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, + * so it's possible to reduce memory requirements by playing with them. + */ + +#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) +#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */ + +#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ +# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ +#endif + +#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */ +#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */ + +#endif /* LZ4_STATIC_3504398509 */ +#endif /* LZ4_STATIC_LINKING_ONLY */ + + + +#ifndef LZ4_H_98237428734687 +#define LZ4_H_98237428734687 + +/*-************************************************************ + * Private Definitions + ************************************************************** + * Do not use these definitions directly. + * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. + * Accessing members will expose user code to API and/or ABI break in future versions of the library. + **************************************************************/ +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ + +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef int8_t LZ4_i8; + typedef unsigned char LZ4_byte; + typedef uint16_t LZ4_u16; + typedef uint32_t LZ4_u32; +#else + typedef signed char LZ4_i8; + typedef unsigned char LZ4_byte; + typedef unsigned short LZ4_u16; + typedef unsigned int LZ4_u32; +#endif + +/*! LZ4_stream_t : + * Never ever use below internal definitions directly ! + * These definitions are not API/ABI safe, and may change in future versions. + * If you need static allocation, declare or allocate an LZ4_stream_t object. +**/ + +typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; +struct LZ4_stream_t_internal { + LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]; + const LZ4_byte* dictionary; + const LZ4_stream_t_internal* dictCtx; + LZ4_u32 currentOffset; + LZ4_u32 tableType; + LZ4_u32 dictSize; + /* Implicit padding to ensure structure is aligned */ +}; + +#define LZ4_STREAM_MINSIZE ((1UL << (LZ4_MEMORY_USAGE)) + 32) /* static size, for inter-version compatibility */ +union LZ4_stream_u { + char minStateSize[LZ4_STREAM_MINSIZE]; + LZ4_stream_t_internal internal_donotuse; +}; /* previously typedef'd to LZ4_stream_t */ + + +/*! LZ4_initStream() : v1.9.0+ + * An LZ4_stream_t structure must be initialized at least once. + * This is automatically done when invoking LZ4_createStream(), + * but it's not when the structure is simply declared on stack (for example). + * + * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. + * It can also initialize any arbitrary buffer of sufficient size, + * and will @return a pointer of proper type upon initialization. + * + * Note : initialization fails if size and alignment conditions are not respected. + * In which case, the function will @return NULL. + * Note2: An LZ4_stream_t structure guarantees correct alignment and size. + * Note3: Before v1.9.0, use LZ4_resetStream() instead +**/ +LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* stateBuffer, size_t size); + + +/*! LZ4_streamDecode_t : + * Never ever use below internal definitions directly ! + * These definitions are not API/ABI safe, and may change in future versions. + * If you need static allocation, declare or allocate an LZ4_streamDecode_t object. +**/ +typedef struct { + const LZ4_byte* externalDict; + const LZ4_byte* prefixEnd; + size_t extDictSize; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +#define LZ4_STREAMDECODE_MINSIZE 32 +union LZ4_streamDecode_u { + char minStateSize[LZ4_STREAMDECODE_MINSIZE]; + LZ4_streamDecode_t_internal internal_donotuse; +} ; /* previously typedef'd to LZ4_streamDecode_t */ + + + +/*-************************************ +* Obsolete Functions +**************************************/ + +/*! Deprecation warnings + * + * Deprecated functions make the compiler generate a warning when invoked. + * This is meant to invite users to update their source code. + * Should deprecation warnings be a problem, it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc + * or _CRT_SECURE_NO_WARNINGS in Visual. + * + * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS + * before including the header file. + */ +#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS +# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ +#else +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define LZ4_DEPRECATED(message) [[deprecated(message)]] +# elif defined(_MSC_VER) +# define LZ4_DEPRECATED(message) __declspec(deprecated(message)) +# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45)) +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31) +# define LZ4_DEPRECATED(message) __attribute__((deprecated)) +# else +# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler") +# define LZ4_DEPRECATED(message) /* disabled */ +# endif +#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ + +/*! Obsolete compression functions (since v1.7.3) */ +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize); +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/*! Obsolete decompression functions (since v1.8.0) */ +LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); +LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); + +/* Obsolete streaming functions (since v1.7.0) + * degraded functionality; do not use! + * + * In order to perform streaming compression, these functions depended on data + * that is no longer tracked in the state. They have been preserved as well as + * possible: using them will still produce a correct output. However, they don't + * actually retain any history between compression calls. The compression ratio + * achieved will therefore be no better than compressing each chunk + * independently. + */ +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); +LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); + +/*! Obsolete streaming decoding functions (since v1.7.0) */ +LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); +LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); + +/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) : + * These functions used to be faster than LZ4_decompress_safe(), + * but this is no longer the case. They are now slower. + * This is because LZ4_decompress_fast() doesn't know the input size, + * and therefore must progress more cautiously into the input buffer to not read beyond the end of block. + * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. + * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. + * + * The last remaining LZ4_decompress_fast() specificity is that + * it can decompress a block without knowing its compressed size. + * Such functionality can be achieved in a more secure manner + * by employing LZ4_decompress_safe_partial(). + * + * Parameters: + * originalSize : is the uncompressed size to regenerate. + * `dst` must be already allocated, its size must be >= 'originalSize' bytes. + * @return : number of bytes read from source buffer (== compressed size). + * The function expects to finish at block's end exactly. + * If the source stream is detected malformed, the function stops decoding and returns a negative result. + * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. + * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. + * Also, since match offsets are not validated, match reads from 'src' may underflow too. + * These issues never happen if input (compressed) data is correct. + * But they may happen if input data is invalid (error or intentional tampering). + * As a consequence, use these functions in trusted environments with trusted data **only**. + */ +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial() instead") +LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider migrating towards LZ4_decompress_safe_continue() instead. " + "Note that the contract will change (requires block's compressed size, instead of decompressed size)") +LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial_usingDict() instead") +LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); + +/*! LZ4_resetStream() : + * An LZ4_stream_t structure must be initialized at least once. + * This is done with LZ4_initStream(), or LZ4_resetStream(). + * Consider switching to LZ4_initStream(), + * invoking LZ4_resetStream() will trigger deprecation warnings in the future. + */ +LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); + + +#endif /* LZ4_H_98237428734687 */ + + +#if defined (__cplusplus) +} +#endif \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java index 78860950be7b..56de6efd65e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java @@ -20,10 +20,8 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.util.ArrayDeque; -import java.util.Objects; +import java.util.LinkedList; import java.util.Queue; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; @@ -31,6 +29,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilterChunk; +import org.apache.hadoop.hbase.util.BloomFilterRvvNative; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; @@ -40,12 +39,14 @@ import org.slf4j.LoggerFactory; /** - * Adds methods required for writing a compound Bloom filter to the data section of an - * {@link org.apache.hadoop.hbase.io.hfile.HFile} to the {@link CompoundBloomFilter} class. + * Adds methods required for writing a compound Bloom filter to the data section + * of an + * {@link org.apache.hadoop.hbase.io.hfile.HFile} to the + * {@link CompoundBloomFilter} class. */ @InterfaceAudience.Private public class CompoundBloomFilterWriter extends CompoundBloomFilterBase - implements BloomFilterWriter, InlineBlockWriter { + implements BloomFilterWriter, InlineBlockWriter { private static final Logger LOG = LoggerFactory.getLogger(CompoundBloomFilterWriter.class); @@ -70,26 +71,29 @@ private static class ReadyChunk { BloomFilterChunk chunk; } - private Queue readyChunks = new ArrayDeque<>(); + private Queue readyChunks = new LinkedList<>(); /** The first key in the current Bloom filter chunk. */ private byte[] firstKeyInChunk = null; - private HFileBlockIndex.BlockIndexWriter bloomBlockIndexWriter = - new HFileBlockIndex.BlockIndexWriter(); + private HFileBlockIndex.BlockIndexWriter bloomBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(); /** Whether to cache-on-write compound Bloom filter chunks */ private boolean cacheOnWrite; private BloomType bloomType; + /** RVV 本地调用对象,用于 Bloom 位批量操作 */ + private final BloomFilterRvvNative rvvNative = new BloomFilterRvvNative(); /** - * each chunk's size in bytes. The real chunk size might be different as required by the fold - * factor. target false positive rate hash function type to use maximum degree of folding allowed + * each chunk's size in bytes. The real chunk size might be different as + * required by the fold + * factor. target false positive rate hash function type to use maximum degree + * of folding allowed * the bloom type */ public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType, - int maxFold, boolean cacheOnWrite, CellComparator comparator, BloomType bloomType) { + int maxFold, boolean cacheOnWrite, CellComparator comparator, BloomType bloomType) { chunkByteSize = BloomFilterUtil.computeFoldableByteSize(chunkByteSizeHint * 8L, maxFold); this.errorRate = errorRate; @@ -108,17 +112,22 @@ public boolean shouldWriteBlock(boolean closing) { /** * Enqueue the current chunk if it is ready to be written out. - * @param closing true if we are closing the file, so we do not expect new keys to show up + * + * @param closing true if we are closing the file, so we do not expect new keys + * to show up */ private void enqueueReadyChunk(boolean closing) { if (chunk == null || (chunk.getKeyCount() < chunk.getMaxKeys() && !closing)) { return; } + // 新增:flush剩余缓存位,防止数据丢失 + chunk.flushHashLocBuffer(); + if (firstKeyInChunk == null) { throw new NullPointerException( - "Trying to enqueue a chunk, " + "but first key is null: closing=" + closing + ", keyCount=" - + chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys()); + "Trying to enqueue a chunk, " + "but first key is null: closing=" + closing + ", keyCount=" + + chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys()); } ReadyChunk readyChunk = new ReadyChunk(); @@ -134,8 +143,8 @@ private void enqueueReadyChunk(boolean closing) { if (LOG.isTraceEnabled() && prevByteSize != chunk.getByteSize()) { LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from [" + prevMaxKeys - + " max keys, " + prevByteSize + " bytes] to [" + chunk.getMaxKeys() + " max keys, " - + chunk.getByteSize() + " bytes]"); + + " max keys, " + prevByteSize + " bytes] to [" + chunk.getMaxKeys() + " max keys, " + + chunk.getByteSize() + " bytes]"); } totalMaxKeys += chunk.getMaxKeys(); @@ -148,26 +157,26 @@ private void enqueueReadyChunk(boolean closing) { @Override public void append(ExtendedCell cell) throws IOException { - Objects.requireNonNull(cell); + if (cell == null) + throw new NullPointerException(); enqueueReadyChunk(false); if (chunk == null) { if (firstKeyInChunk != null) { throw new IllegalStateException( - "First key in chunk already set: " + Bytes.toStringBinary(firstKeyInChunk)); + "First key in chunk already set: " + Bytes.toStringBinary(firstKeyInChunk)); } - // This will be done only once per chunk if (bloomType == BloomType.ROWCOL) { firstKeyInChunk = PrivateCellUtil - .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); + .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); } else { firstKeyInChunk = CellUtil.copyRow(cell); } allocateNewChunk(); } - chunk.add(cell); + chunk.add(cell); // 这里原本是逐位添加 Bloom 位 this.prevCell = cell; ++totalKeyCount; } @@ -180,7 +189,7 @@ public void beforeShipped() throws IOException { } @Override - public Cell getPrevCell() { + public ExtendedCell getPrevCell() { return this.prevCell; } @@ -233,8 +242,10 @@ public void readFields(DataInput in) throws IOException { } /** - * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, although - * the two metadata formats do not have to be consistent. This does have to be consistent with + * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for + * simplicity, although + * the two metadata formats do not have to be consistent. This does have to be + * consistent with * how * {@link CompoundBloomFilter#CompoundBloomFilter(DataInput, org.apache.hadoop.hbase.io.hfile.HFile.Reader, BloomFilterMetrics)} * reads fields. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java index e9eeba2b6f4b..83bfdd62ac19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java @@ -87,7 +87,6 @@ protected boolean seekAsDirection(ExtendedCell kv) throws IOException { return backwardSeek(kv); } - @Override protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) throws IOException { // Check that the heap gives us KVs in an increasing order for same row and diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 76f3e15c90c8..4b73287b61ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -22,29 +22,24 @@ import java.util.ArrayList; import java.util.List; import java.util.NavigableSet; -import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantLock; -import java.util.function.IntConsumer; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.PrivateConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.PrivateConstants; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.conf.ConfigKey; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.ipc.RpcCall; -import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler; @@ -60,16 +55,19 @@ import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** - * Scanner scans both the memstore and the Store. Coalesce KeyValue stream into List<KeyValue> + * Scanner scans both the memstore and the Store. Coalesce KeyValue stream into + * List<KeyValue> * for a single row. *

- * The implementation is not thread safe. So there will be no race between next and close. The only - * exception is updateReaders, it will be called in the memstore flush thread to indicate that there + * The implementation is not thread safe. So there will be no race between next + * and close. The only + * exception is updateReaders, it will be called in the memstore flush thread to + * indicate that there * is a flush. */ @InterfaceAudience.Private public class StoreScanner extends NonReversedNonLazyKeyValueScanner - implements KeyValueScanner, InternalScanner, ChangedReadersObserver { + implements KeyValueScanner, InternalScanner, ChangedReadersObserver { private static final Logger LOG = LoggerFactory.getLogger(StoreScanner.class); // In unit tests, the store could be null protected final HStore store; @@ -109,7 +107,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner private final List scannersForDelayedClose = new ArrayList<>(); /** - * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not KVs skipped via + * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but + * not KVs skipped via * seeking to next row/column. TODO: estimate them? */ private long kvsScanned = 0; @@ -120,19 +119,19 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /** We don't ever expect to change this, the constant is just for clarity. */ static final boolean LAZY_SEEK_ENABLED_BY_DEFAULT = true; - public static final String STORESCANNER_PARALLEL_SEEK_ENABLE = - "hbase.storescanner.parallel.seek.enable"; + public static final String STORESCANNER_PARALLEL_SEEK_ENABLE = "hbase.storescanner.parallel.seek.enable"; /** Used during unit testing to ensure that lazy seek does save seek ops */ private static boolean lazySeekEnabledGlobally = LAZY_SEEK_ENABLED_BY_DEFAULT; /** - * The number of cells scanned in between timeout checks. Specifying a larger value means that - * timeout checks will occur less frequently. Specifying a small value will lead to more frequent + * The number of cells scanned in between timeout checks. Specifying a larger + * value means that + * timeout checks will occur less frequently. Specifying a small value will lead + * to more frequent * timeout checks. */ - public static final String HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK = - ConfigKey.LONG("hbase.cells.scanned.per.heartbeat.check"); + public static final String HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK = "hbase.cells.scanned.per.heartbeat.check"; /** * Default value of {@link #HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK}. @@ -140,24 +139,28 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner public static final long DEFAULT_HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK = 10000; /** - * If the read type is Scan.ReadType.DEFAULT, we will start with pread, and if the kvs we scanned - * reaches this limit, we will reopen the scanner with stream. The default value is 4 times of - * block size for this store. If configured with a value <0, for all scans with ReadType DEFAULT, + * If the read type is Scan.ReadType.DEFAULT, we will start with pread, and if + * the kvs we scanned + * reaches this limit, we will reopen the scanner with stream. The default value + * is 4 times of + * block size for this store. If configured with a value <0, for all scans with + * ReadType DEFAULT, * we will open scanner with stream mode itself. */ - public static final String STORESCANNER_PREAD_MAX_BYTES = - ConfigKey.LONG("hbase.storescanner.pread.max.bytes"); + public static final String STORESCANNER_PREAD_MAX_BYTES = "hbase.storescanner.pread.max.bytes"; private final Scan.ReadType readType; // A flag whether use pread for scan - // it maybe changed if we use Scan.ReadType.DEFAULT and we have read lots of data. + // it maybe changed if we use Scan.ReadType.DEFAULT and we have read lots of + // data. private boolean scanUsePread; // Indicates whether there was flush during the course of the scan private volatile boolean flushed = false; // generally we get one file from a flush private final List flushedstoreFileScanners = new ArrayList<>(1); - // Since CompactingMemstore is now default, we get three memstore scanners from a flush + // Since CompactingMemstore is now default, we get three memstore scanners from + // a flush private final List memStoreScannersAfterFlush = new ArrayList<>(3); // The current list of scanners final List currentScanners = new ArrayList<>(); @@ -175,7 +178,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /** An internal constructor. */ private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, int numColumns, long readPt, - boolean cacheBlocks, ScanType scanType) { + boolean cacheBlocks, ScanType scanType) { this.readPt = readPt; this.store = store; this.cacheBlocks = cacheBlocks; @@ -192,14 +195,15 @@ private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, int numColumns, // for multi-row (non-"get") scans because this is not done in // StoreFile.passesBloomFilter(Scan, SortedSet). this.useRowColBloom = numColumns > 1 || (!get && numColumns == 1) && (store == null - || store.getColumnFamilyDescriptor().getBloomFilterType() == BloomType.ROWCOL); + || store.getColumnFamilyDescriptor().getBloomFilterType() == BloomType.ROWCOL); this.maxRowSize = scanInfo.getTableMaxRowSize(); this.preadMaxBytes = scanInfo.getPreadMaxBytes(); if (get) { this.readType = Scan.ReadType.PREAD; this.scanUsePread = true; } else if (scanType != ScanType.USER_SCAN) { - // For compaction scanners never use Pread as already we have stream based scanners on the + // For compaction scanners never use Pread as already we have stream based + // scanners on the // store files to be compacted this.readType = Scan.ReadType.STREAM; this.scanUsePread = false; @@ -215,12 +219,14 @@ private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, int numColumns, } else { this.readType = scan.getReadType(); } - // Always start with pread unless user specific stream. Will change to stream later if + // Always start with pread unless user specific stream. Will change to stream + // later if // readType is default if the scan keeps running for a long time. this.scanUsePread = this.readType != Scan.ReadType.STREAM; } this.cellsPerHeartbeatCheck = scanInfo.getCellsPerTimeoutCheck(); - // Parallel seeking is on if the config allows and more there is more than one store file. + // Parallel seeking is on if the config allows and more there is more than one + // store file. if (store != null && store.getStorefilesCount() > 1) { RegionServerServices rsService = store.getHRegion().getRegionServerServices(); if (rsService != null && scanInfo.isParallelSeekEnabled()) { @@ -235,27 +241,31 @@ private void addCurrentScanners(List scanners) { } private static boolean isOnlyLatestVersionScan(Scan scan) { - // No need to check for Scan#getMaxVersions because live version files generated by store file - // writer retains max versions specified in ColumnFamilyDescriptor for the given CF + // No need to check for Scan#getMaxVersions because live version files generated + // by store file + // writer retains max versions specified in ColumnFamilyDescriptor for the given + // CF return !scan.isRaw() && scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP; } /** - * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we are not in a + * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we are + * not in a * compaction. + * * @param store who we scan * @param scan the spec * @param columns which columns we are scanning */ public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet columns, - long readPt) throws IOException { + long readPt) throws IOException { this(store, scan, scanInfo, columns != null ? columns.size() : 0, readPt, scan.getCacheBlocks(), - ScanType.USER_SCAN); + ScanType.USER_SCAN); if (columns != null && scan.isRaw()) { throw new DoNotRetryIOException("Cannot specify any column for a raw scan"); } matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, - store.getCoprocessorHost()); + store.getCoprocessorHost()); store.addChangedReaderObserver(this); @@ -263,16 +273,16 @@ public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet * Opens a scanner across specified StoreFiles/MemStoreSegments. + * * @param store who we scan * @param scanners ancillary scanners - * @param smallestReadPoint the readPoint that we should use for tracking versions + * @param smallestReadPoint the readPoint that we should use for tracking + * versions */ public StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { + ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { this(store, scanInfo, scanners, scanType, smallestReadPoint, earliestPutTs, null, null); } @@ -311,27 +324,30 @@ public StoreScanner(HStore store, ScanInfo scanInfo, List * Opens a scanner across specified StoreFiles. + * * @param store who we scan * @param scanners ancillary scanners - * @param smallestReadPoint the readPoint that we should use for tracking versions - * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW. - * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW. + * @param smallestReadPoint the readPoint that we should use for tracking + * versions + * @param dropDeletesFromRow The inclusive left bound of the range; can be + * EMPTY_START_ROW. + * @param dropDeletesToRow The exclusive right bound of the range; can be + * EMPTY_END_ROW. */ public StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) - throws IOException { + long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) + throws IOException { this(store, scanInfo, scanners, ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, - earliestPutTs, dropDeletesFromRow, dropDeletesToRow); + earliestPutTs, dropDeletesFromRow, dropDeletesToRow); } private StoreScanner(HStore store, ScanInfo scanInfo, List scanners, - ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, - byte[] dropDeletesToRow) throws IOException { + ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, + byte[] dropDeletesToRow) throws IOException { this(store, SCAN_FOR_COMPACTION, scanInfo, 0, - store.getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false, scanType); + store.getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false, scanType); assert scanType != ScanType.USER_SCAN; - matcher = - CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint, earliestPutTs, + matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); // Filter the list of scanners using Bloom filters, time range, TTL, etc. @@ -345,58 +361,57 @@ private StoreScanner(HStore store, ScanInfo scanInfo, List scanners) - throws IOException { + throws IOException { // Seek all scanners to the initial key seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled); addCurrentScanners(scanners); resetKVHeap(scanners, comparator); } - // For mob compaction only as we do not have a Store instance when doing mob compaction. + // For mob compaction only as we do not have a Store instance when doing mob + // compaction. public StoreScanner(ScanInfo scanInfo, ScanType scanType, - List scanners) throws IOException { + List scanners) throws IOException { this(null, SCAN_FOR_COMPACTION, scanInfo, 0, Long.MAX_VALUE, false, scanType); assert scanType != ScanType.USER_SCAN; this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, 0L, - oldestUnexpiredTS, now, null, null, null); + oldestUnexpiredTS, now, null, null, null); seekAllScanner(scanInfo, scanners); } // Used to instantiate a scanner for user scan in test StoreScanner(Scan scan, ScanInfo scanInfo, NavigableSet columns, - List scanners, ScanType scanType) throws IOException { + List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store this(null, scan, scanInfo, columns != null ? columns.size() : 0, 0L, scan.getCacheBlocks(), - scanType); - if (scanType == ScanType.USER_SCAN) { - this.matcher = - UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); - } else { - this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, - PrivateConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); - } + ScanType.USER_SCAN); + this.matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); seekAllScanner(scanInfo, scanners); } // Used to instantiate a scanner for user scan in test StoreScanner(Scan scan, ScanInfo scanInfo, NavigableSet columns, - List scanners) throws IOException { + List scanners, ScanType scanType) throws IOException { // 0 is passed as readpoint because the test bypasses Store this(null, scan, scanInfo, columns != null ? columns.size() : 0, 0L, scan.getCacheBlocks(), - ScanType.USER_SCAN); - this.matcher = - UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); + scanType); + if (scanType == ScanType.USER_SCAN) { + this.matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null); + } else { + this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, + PrivateConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); + } seekAllScanner(scanInfo, scanners); } // Used to instantiate a scanner for compaction in test StoreScanner(ScanInfo scanInfo, int maxVersions, ScanType scanType, - List scanners) throws IOException { + List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store this(null, maxVersions > 0 ? new Scan().readVersions(maxVersions) : SCAN_FOR_COMPACTION, - scanInfo, 0, 0L, false, scanType); + scanInfo, 0, 0L, false, scanType); this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, - PrivateConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); + PrivateConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); seekAllScanner(scanInfo, scanners); } @@ -406,11 +421,12 @@ boolean isScanUsePread() { /** * Seek the specified scanners with the given key + * * @param isLazy true if using lazy seek * @param isParallelSeek true if using parallel seek */ protected void seekScanners(List scanners, ExtendedCell seekKey, - boolean isLazy, boolean isParallelSeek) throws IOException { + boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). // Always check bloom filter to optimize the top row seek for delete @@ -425,7 +441,7 @@ protected void seekScanners(List scanners, ExtendedCe for (KeyValueScanner scanner : scanners) { if (matcher.isUserScan() && totalScannersSoughtBytes >= maxRowSize) { throw new RowTooBigException( - "Max row size allowed: " + maxRowSize + ", but row is bigger than that"); + "Max row size allowed: " + maxRowSize + ", but row is bigger than that"); } scanner.seek(seekKey); Cell c = scanner.peek(); @@ -440,13 +456,13 @@ protected void seekScanners(List scanners, ExtendedCe } protected void resetKVHeap(List scanners, CellComparator comparator) - throws IOException { + throws IOException { // Combine all seeked scanners with a heap heap = newKVHeap(scanners, comparator); } protected KeyValueHeap newKVHeap(List scanners, - CellComparator comparator) throws IOException { + CellComparator comparator) throws IOException { return new KeyValueHeap(scanners, comparator); } @@ -456,7 +472,7 @@ protected KeyValueHeap newKVHeap(List scanners, * Will be overridden by testcase so declared as protected. */ protected List selectScannersFrom(HStore store, - List allScanners) { + List allScanners) { boolean memOnly; boolean filesOnly; if (scan instanceof InternalScan) { @@ -553,11 +569,11 @@ public boolean seek(ExtendedCell key) throws IOException { /** * Get the next row of values from this Store. + * * @return true if there are more rows, false if scanner is done */ @Override - public boolean next(List outResult, ScannerContext scannerContext) - throws IOException { + public boolean next(List outResult, ScannerContext scannerContext) throws IOException { if (scannerContext == null) { throw new IllegalArgumentException("Scanner context cannot be null"); } @@ -565,7 +581,8 @@ public boolean next(List outResult, ScannerContext scanner return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); } - // if the heap was left null, then the scanners had previously run out anyways, close and + // if the heap was left null, then the scanners had previously run out anyways, + // close and // return. if (this.heap == null) { // By this time partial close should happened because already heap is null @@ -582,8 +599,10 @@ public boolean next(List outResult, ScannerContext scanner // only call setRow if the row changes; avoids confusing the query matcher // if scanning intra-row - // If no limits exists in the scope LimitScope.Between_Cells then we are sure we are changing - // rows. Else it is possible we are still traversing the same row so we must perform the row + // If no limits exists in the scope LimitScope.Between_Cells then we are sure we + // are changing + // rows. Else it is possible we are still traversing the same row so we must + // perform the row // comparison. if (!scannerContext.hasAnyLimit(LimitScope.BETWEEN_CELLS) || matcher.currentRow() == null) { this.countPerRow = 0; @@ -591,34 +610,23 @@ public boolean next(List outResult, ScannerContext scanner } // Clear progress away unless invoker has indicated it should be kept. - if (!scannerContext.getKeepProgress() && !scannerContext.getSkippingRow()) { + if (!scannerContext.getKeepProgress()) { scannerContext.clearProgress(); } - Optional rpcCall = - matcher.isUserScan() ? RpcServer.getCurrentCall() : Optional.empty(); - // re-useable closure to avoid allocations - IntConsumer recordBlockSize = blockSize -> { - if (rpcCall.isPresent()) { - rpcCall.get().incrementBlockBytesScanned(blockSize); - } - scannerContext.incrementBlockProgress(blockSize); - }; - int count = 0; long totalBytesRead = 0; - // track the cells for metrics only if it is a user read request. boolean onlyFromMemstore = matcher.isUserScan(); try { LOOP: do { - // Update and check the time limit based on the configured value of cellsPerTimeoutCheck - // Or if the preadMaxBytes is reached and we may want to return so we can switch to stream + // Update and check the time limit based on the configured value of + // cellsPerTimeoutCheck + // Or if the preadMaxBytes is reached and we may want to return so we can switch + // to stream // in // the shipped method below. - if ( - kvsScanned % cellsPerHeartbeatCheck == 0 - || (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes) - ) { + if (kvsScanned % cellsPerHeartbeatCheck == 0 + || (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes)) { if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { return scannerContext.setScannerState(NextState.TIME_LIMIT_REACHED).hasMoreValues(); } @@ -631,22 +639,24 @@ public boolean next(List outResult, ScannerContext scanner int cellSize = PrivateCellUtil.estimatedSerializedSizeOf(cell); bytesRead += cellSize; if (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes) { - // return immediately if we want to switch from pread to stream. We need this because we + // return immediately if we want to switch from pread to stream. We need this + // because we // can - // only switch in the shipped method, if user use a filter to filter out everything and + // only switch in the shipped method, if user use a filter to filter out + // everything and // rpc - // timeout is very large then the shipped method will never be called until the whole scan + // timeout is very large then the shipped method will never be called until the + // whole scan // is finished, but at that time we have already scan all the data... // See HBASE-20457 for more details. - // And there is still a scenario that can not be handled. If we have a very large row, + // And there is still a scenario that can not be handled. If we have a very + // large row, // which - // have millions of qualifiers, and filter.filterRow is used, then even if we set the flag + // have millions of qualifiers, and filter.filterRow is used, then even if we + // set the flag // here, we still need to scan all the qualifiers before returning... scannerContext.returnImmediately(); } - - heap.recordBlockSize(recordBlockSize); - prevCell = cell; scannerContext.setLastPeekedCell(cell); topChanged = false; @@ -657,17 +667,7 @@ public boolean next(List outResult, ScannerContext scanner case INCLUDE_AND_SEEK_NEXT_COL: Filter f = matcher.getFilter(); if (f != null) { - Cell transformedCell = f.transformCell(cell); - // fast path, most filters just return the same cell instance - if (transformedCell != cell) { - if (transformedCell instanceof ExtendedCell) { - cell = (ExtendedCell) transformedCell; - } else { - throw new DoNotRetryIOException("Incorrect filter implementation, " - + "the Cell returned by transformCell is not an ExtendedCell. Filter class: " - + f.getClass().getName()); - } - } + cell = (ExtendedCell) f.transformCell(cell); } this.countPerRow++; @@ -681,7 +681,8 @@ public boolean next(List outResult, ScannerContext scanner totalBytesRead += cellSize; /** - * Increment the metric if all the cells are from memstore. If not we will account it + * Increment the metric if all the cells are from memstore. If not we will + * account it * for mixed reads */ onlyFromMemstore = onlyFromMemstore && heap.isLatestCellFromMemstore(); @@ -691,10 +692,10 @@ public boolean next(List outResult, ScannerContext scanner if (matcher.isUserScan() && totalBytesRead > maxRowSize) { String message = "Max row size allowed: " + maxRowSize - + ", but the row is bigger than that, the row info: " - + CellUtil.toString(cell, false) + ", already have process row cells = " - + outResult.size() + ", it belong to region = " - + store.getHRegion().getRegionInfo().getRegionNameAsString(); + + ", but the row is bigger than that, the row info: " + + CellUtil.toString(cell, false) + ", already have process row cells = " + + outResult.size() + ", it belong to region = " + + store.getHRegion().getRegionInfo().getRegionNameAsString(); LOG.warn(message); throw new RowTooBigException(message); } @@ -778,9 +779,7 @@ public boolean next(List outResult, ScannerContext scanner ExtendedCell nextKV = matcher.getNextKeyHint(cell); if (nextKV != null) { int difference = comparator.compare(nextKV, cell); - if ( - ((!scan.isReversed() && difference > 0) || (scan.isReversed() && difference < 0)) - ) { + if (((!scan.isReversed() && difference > 0) || (scan.isReversed() && difference < 0))) { seekAsDirection(nextKV); NextState stateAfterSeekByHint = needToReturn(); if (stateAfterSeekByHint != null) { @@ -795,13 +794,6 @@ public boolean next(List outResult, ScannerContext scanner default: throw new RuntimeException("UNEXPECTED"); } - - // One last chance to break due to size limit. The INCLUDE* cases above already check - // limit and continue. For the various filtered cases, we need to check because block - // size limit may have been exceeded even if we don't add cells to result list. - if (scannerContext.checkSizeLimit(LimitScope.BETWEEN_CELLS)) { - return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); - } } while ((cell = this.heap.peek()) != null); if (count > 0) { @@ -834,12 +826,18 @@ private void updateMetricsStore(boolean memstoreRead) { } /** - * If the top cell won't be flushed into disk, the new top cell may be changed after - * #reopenAfterFlush. Because the older top cell only exist in the memstore scanner but the - * memstore scanner is replaced by hfile scanner after #reopenAfterFlush. If the row of top cell - * is changed, we should return the current cells. Otherwise, we may return the cells across + * If the top cell won't be flushed into disk, the new top cell may be changed + * after + * #reopenAfterFlush. Because the older top cell only exist in the memstore + * scanner but the + * memstore scanner is replaced by hfile scanner after #reopenAfterFlush. If the + * row of top cell + * is changed, we should return the current cells. Otherwise, we may return the + * cells across * different rows. - * @return null is the top cell doesn't change. Otherwise, the NextState to return + * + * @return null is the top cell doesn't change. Otherwise, the NextState to + * return */ private NextState needToReturn() { if (topChanged) { @@ -849,7 +847,8 @@ private NextState needToReturn() { } private void seekOrSkipToNextRow(ExtendedCell cell) throws IOException { - // If it is a Get Scan, then we know that we are done with this row; there are no more + // If it is a Get Scan, then we know that we are done with this row; there are + // no more // rows beyond the current one: don't try to optimize. if (!get) { if (trySkipToNextRow(cell)) { @@ -866,30 +865,43 @@ private void seekOrSkipToNextColumn(ExtendedCell cell) throws IOException { } /** - * See if we should actually SEEK or rather just SKIP to the next Cell (see HBASE-13109). - * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, or seek to an - * arbitrary seek key. This method decides whether a seek is the most efficient _actual_ way to - * get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, SKIP inside the - * current, loaded block). It does this by looking at the next indexed key of the current HFile. - * This key is then compared with the _SEEK_ key, where a SEEK key is an artificial 'last possible - * key on the row' (only in here, we avoid actually creating a SEEK key; in the compare we work + * See if we should actually SEEK or rather just SKIP to the next Cell (see + * HBASE-13109). + * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, + * or seek to an + * arbitrary seek key. This method decides whether a seek is the most efficient + * _actual_ way to + * get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, SKIP + * inside the + * current, loaded block). It does this by looking at the next indexed key of + * the current HFile. + * This key is then compared with the _SEEK_ key, where a SEEK key is an + * artificial 'last possible + * key on the row' (only in here, we avoid actually creating a SEEK key; in the + * compare we work * with the current Cell but compare as though it were a seek key; see down in - * matcher.compareKeyForNextRow, etc). If the compare gets us onto the next block we *_SEEK, + * matcher.compareKeyForNextRow, etc). If the compare gets us onto the next + * block we *_SEEK, * otherwise we just SKIP to the next requested cell. *

* Other notes: *

    *
  • Rows can straddle block boundaries
  • - *
  • Versions of columns can straddle block boundaries (i.e. column C1 at T1 might be in a + *
  • Versions of columns can straddle block boundaries (i.e. column C1 at T1 + * might be in a * different block than column C1 at T2)
  • - *
  • We want to SKIP if the chance is high that we'll find the desired Cell after a few + *
  • We want to SKIP if the chance is high that we'll find the desired Cell + * after a few * SKIPs...
  • - *
  • We want to SEEK when the chance is high that we'll be able to seek past many Cells, + *
  • We want to SEEK when the chance is high that we'll be able to seek past + * many Cells, * especially if we know we need to go to the next block.
  • *
*

- * A good proxy (best effort) to determine whether SKIP is better than SEEK is whether we'll - * likely end up seeking to the next block (or past the next block) to get our next column. + * A good proxy (best effort) to determine whether SKIP is better than SEEK is + * whether we'll + * likely end up seeking to the next block (or past the next block) to get our + * next column. * Example: * *

@@ -907,27 +919,32 @@ private void seekOrSkipToNextColumn(ExtendedCell cell) throws IOException {
    *                                    Next Index Key        SEEK_NEXT_COL
    * 
* - * Now imagine we want columns c1 and c3 (see first diagram above), the 'Next Index Key' of r1/c4 - * is > r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we only - * want one version of c1, after we have it, a SEEK_COL will be issued to get to c2. Looking at - * the 'Next Index Key', it would land us in the next block, so we should SEEK. In other scenarios - * where the SEEK will not land us in the next block, it is very likely better to issues a series + * Now imagine we want columns c1 and c3 (see first diagram above), the 'Next + * Index Key' of r1/c4 + * is > r1/c3 so we should seek to get to the c1 on the next row, r2. In second + * case, say we only + * want one version of c1, after we have it, a SEEK_COL will be issued to get to + * c2. Looking at + * the 'Next Index Key', it would land us in the next block, so we should SEEK. + * In other scenarios + * where the SEEK will not land us in the next block, it is very likely better + * to issues a series * of SKIPs. + * * @param cell current cell * @return true means skip to next row, false means not */ protected boolean trySkipToNextRow(ExtendedCell cell) throws IOException { ExtendedCell nextCell = null; - // used to guard against a changed next indexed key by doing a identity comparison + // used to guard against a changed next indexed key by doing a identity + // comparison // when the identity changes we need to compare the bytes again ExtendedCell previousIndexedKey = null; do { ExtendedCell nextIndexedKey = getNextIndexedKey(); - if ( - nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY + if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && (nextIndexedKey == previousIndexedKey - || matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) - ) { + || matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; @@ -939,22 +956,23 @@ protected boolean trySkipToNextRow(ExtendedCell cell) throws IOException { } /** - * See {@link #trySkipToNextRow(ExtendedCell)} + * See + * {@link org.apache.hadoop.hbase.regionserver.StoreScanner#trySkipToNextRow(Cell)} + * * @param cell current cell * @return true means skip to next column, false means not */ protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { ExtendedCell nextCell = null; - // used to guard against a changed next indexed key by doing a identity comparison + // used to guard against a changed next indexed key by doing a identity + // comparison // when the identity changes we need to compare the bytes again ExtendedCell previousIndexedKey = null; do { ExtendedCell nextIndexedKey = getNextIndexedKey(); - if ( - nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY + if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && (nextIndexedKey == previousIndexedKey - || matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) - ) { + || matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; @@ -963,11 +981,10 @@ protected boolean trySkipToNextColumn(ExtendedCell cell) throws IOException { } } while ((nextCell = this.heap.peek()) != null && CellUtil.matchingRowColumn(cell, nextCell)); // We need this check because it may happen that the new scanner that we get - // during heap.next() is requiring reseek due of fake KV previously generated for + // during heap.next() is requiring reseek due of fake KV previously generated + // for // ROWCOL bloom filter optimization. See HBASE-19863 for more details - if ( - useRowColBloom && nextCell != null && cell.getTimestamp() == PrivateConstants.OLDEST_TIMESTAMP - ) { + if (useRowColBloom && nextCell != null && cell.getTimestamp() == PrivateConstants.OLDEST_TIMESTAMP) { return false; } return true; @@ -991,7 +1008,7 @@ private static void clearAndClose(List scanners) { // Implementation of ChangedReadersObserver @Override public void updateReaders(List sfs, List memStoreScanners) - throws IOException { + throws IOException { if (CollectionUtils.isEmpty(sfs) && CollectionUtils.isEmpty(memStoreScanners)) { return; } @@ -999,10 +1016,14 @@ public void updateReaders(List sfs, List memStoreSc flushLock.lock(); try { if (!closeLock.tryLock()) { - // The reason for doing this is that when the current store scanner does not retrieve - // any new cells, then the scanner is considered to be done. The heap of this scanner - // is not closed till the shipped() call is completed. Hence in that case if at all - // the partial close (close (false)) has been called before updateReaders(), there is no + // The reason for doing this is that when the current store scanner does not + // retrieve + // any new cells, then the scanner is considered to be done. The heap of this + // scanner + // is not closed till the shipped() call is completed. Hence in that case if at + // all + // the partial close (close (false)) has been called before updateReaders(), + // there is no // need for the updateReaders() to happen. LOG.debug("StoreScanner already has the close lock. There is no need to updateReaders"); // no lock acquired. @@ -1019,14 +1040,19 @@ public void updateReaders(List sfs, List memStoreSc flushed = true; final boolean isCompaction = false; boolean usePread = get || scanUsePread; - // SEE HBASE-19468 where the flushed files are getting compacted even before a scanner - // calls next(). So its better we create scanners here rather than next() call. Ensure - // these scanners are properly closed() whether or not the scan is completed successfully - // Eagerly creating scanners so that we have the ref counting ticking on the newly created - // store files. In case of stream scanners this eager creation does not induce performance - // penalty because in scans (that uses stream scanners) the next() call is bound to happen. - List scanners = - store.getScanners(sfs, cacheBlocks, get, usePread, isCompaction, matcher, + // SEE HBASE-19468 where the flushed files are getting compacted even before a + // scanner + // calls next(). So its better we create scanners here rather than next() call. + // Ensure + // these scanners are properly closed() whether or not the scan is completed + // successfully + // Eagerly creating scanners so that we have the ref counting ticking on the + // newly created + // store files. In case of stream scanners this eager creation does not induce + // performance + // penalty because in scans (that uses stream scanners) the next() call is bound + // to happen. + List scanners = store.getScanners(sfs, cacheBlocks, get, usePread, isCompaction, matcher, scan.getStartRow(), scan.getStopRow(), this.readPt, false, isOnlyLatestVersionScan(scan)); flushedstoreFileScanners.addAll(scanners); if (!CollectionUtils.isEmpty(memStoreScanners)) { @@ -1045,22 +1071,28 @@ public void updateReaders(List sfs, List memStoreSc // Let the next() call handle re-creating and seeking } - /** Returns if top of heap has changed (and KeyValueHeap has to try the next KV) */ + /** + * Returns if top of heap has changed (and KeyValueHeap has to try the next KV) + */ protected final boolean reopenAfterFlush() throws IOException { - // here we can make sure that we have a Store instance so no null check on store. + // here we can make sure that we have a Store instance so no null check on + // store. ExtendedCell lastTop = heap.peek(); - // When we have the scan object, should we not pass it to getScanners() to get a limited set of - // scanners? We did so in the constructor and we could have done it now by storing the scan + // When we have the scan object, should we not pass it to getScanners() to get a + // limited set of + // scanners? We did so in the constructor and we could have done it now by + // storing the scan // object from the constructor List scanners; flushLock.lock(); try { - List allScanners = - new ArrayList<>(flushedstoreFileScanners.size() + memStoreScannersAfterFlush.size()); + List allScanners = new ArrayList<>( + flushedstoreFileScanners.size() + memStoreScannersAfterFlush.size()); allScanners.addAll(flushedstoreFileScanners); allScanners.addAll(memStoreScannersAfterFlush); scanners = selectScannersFrom(store, allScanners); - // Clear the current set of flushed store files scanners so that they don't get added again + // Clear the current set of flushed store files scanners so that they don't get + // added again flushedstoreFileScanners.clear(); memStoreScannersAfterFlush.clear(); } finally { @@ -1078,14 +1110,15 @@ protected final boolean reopenAfterFlush() throws IOException { break; } } - // add the newly created scanners on the flushed files and the current active memstore scanner + // add the newly created scanners on the flushed files and the current active + // memstore scanner addCurrentScanners(scanners); // Combine all seeked scanners with a heap resetKVHeap(this.currentScanners, store.getComparator()); resetQueryMatcher(lastTop); if (heap.peek() == null || store.getComparator().compareRows(lastTop, this.heap.peek()) != 0) { LOG.info("Storescanner.peek() is changed where before = " + lastTop.toString() - + ",and after = " + heap.peek()); + + ",and after = " + heap.peek()); topChanged = true; } else { topChanged = false; @@ -1111,11 +1144,11 @@ private void resetQueryMatcher(ExtendedCell lastTopKey) { /** * Check whether scan as expected order */ - protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) - throws IOException { + protected void checkScanOrder(ExtendedCell prevKV, ExtendedCell kv, CellComparator comparator) + throws IOException { // Check that the heap gives us KVs in an increasing order. assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 - : "Key " + prevKV + " followed by a smaller key " + kv + " in cf " + store; + : "Key " + prevKV + " followed by a smaller key " + kv + " in cf " + store; } protected boolean seekToNextRow(ExtendedCell c) throws IOException { @@ -1124,6 +1157,7 @@ protected boolean seekToNextRow(ExtendedCell c) throws IOException { /** * Do a reseek in a normal StoreScanner(scan forward) + * * @return true if scanner has values left, false if end of scanner */ protected boolean seekAsDirection(ExtendedCell kv) throws IOException { @@ -1142,14 +1176,12 @@ public boolean reseek(ExtendedCell kv) throws IOException { } void trySwitchToStreamRead() { - if ( - readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || heap.peek() == null - || bytesRead < preadMaxBytes - ) { + if (readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || heap.peek() == null + || bytesRead < preadMaxBytes) { return; } LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead, - this.store.getColumnFamilyName()); + this.store.getColumnFamilyName()); scanUsePread = false; ExtendedCell lastTop = heap.peek(); List memstoreScanners = new ArrayList<>(); @@ -1169,8 +1201,8 @@ void trySwitchToStreamRead() { // We must have a store instance here so no null check // recreate the scanners on the current file scanners fileScanners = store.recreateScanners(scannersToClose, cacheBlocks, false, false, matcher, - scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), scan.includeStopRow(), - readPt, false); + scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), scan.includeStopRow(), + readPt, false); if (fileScanners == null) { return; } @@ -1199,11 +1231,14 @@ void trySwitchToStreamRead() { protected final boolean checkFlushed() { // check the var without any lock. Suppose even if we see the old // value here still it is ok to continue because we will not be resetting - // the heap but will continue with the referenced memstore's snapshot. For compactions - // any way we don't need the updateReaders at all to happen as we still continue with + // the heap but will continue with the referenced memstore's snapshot. For + // compactions + // any way we don't need the updateReaders at all to happen as we still continue + // with // the older files if (flushed) { - // If there is a flush and the current scan is notified on the flush ensure that the + // If there is a flush and the current scan is notified on the flush ensure that + // the // scan's heap gets reset and we do a seek on the newly flushed file. if (this.closing) { return false; @@ -1217,12 +1252,14 @@ protected final boolean checkFlushed() { /** * Seek storefiles in parallel to optimize IO latency as much as possible + * * @param scanners the list {@link KeyValueScanner}s to be read from * @param kv the KeyValue on which the operation is being requested */ private void parallelSeek(final List scanners, final ExtendedCell kv) - throws IOException { - if (scanners.isEmpty()) return; + throws IOException { + if (scanners.isEmpty()) + return; int storeFileScannerCount = scanners.size(); CountDownLatch latch = new CountDownLatch(storeFileScannerCount); List handlers = new ArrayList<>(storeFileScannerCount); @@ -1252,12 +1289,14 @@ private void parallelSeek(final List scanners, final /** * Used in testing. + * * @return all scanners in no particular order */ List getAllScannersForTesting() { List allScanners = new ArrayList<>(); KeyValueScanner current = heap.getCurrentForTesting(); - if (current != null) allScanners.add(current); + if (current != null) + allScanners.add(current); for (KeyValueScanner scanner : heap.getHeap()) allScanners.add(scanner); return allScanners; @@ -1267,7 +1306,10 @@ static void enableLazySeekGlobally(boolean enable) { lazySeekEnabledGlobally = enable; } - /** Returns The estimated number of KVs seen by this scanner (includes some skipped KVs). */ + /** + * Returns The estimated number of KVs seen by this scanner (includes some + * skipped KVs). + */ public long getEstimatedNumberOfKvsScanned() { return this.kvsScanned; } @@ -1282,8 +1324,10 @@ public void shipped() throws IOException { if (prevCell != null) { // Do the copy here so that in case the prevCell ref is pointing to the previous // blocks we can safely release those blocks. - // This applies to blocks that are got from Bucket cache, L1 cache and the blocks - // fetched from HDFS. Copying this would ensure that we let go the references to these + // This applies to blocks that are got from Bucket cache, L1 cache and the + // blocks + // fetched from HDFS. Copying this would ensure that we let go the references to + // these // blocks so that they can be GCed safely(in case of bucket cache) prevCell = KeyValueUtil.toNewKeyCell(this.prevCell); } @@ -1292,11 +1336,16 @@ public void shipped() throws IOException { clearAndClose(scannersForDelayedClose); if (this.heap != null) { this.heap.shipped(); - // When switching from pread to stream, we will open a new scanner for each store file, but - // the old scanner may still track the HFileBlocks we have scanned but not sent back to client - // yet. If we close the scanner immediately then the HFileBlocks may be messed up by others - // before we serialize and send it back to client. The HFileBlocks will be released in shipped - // method, so we here will also open new scanners and close old scanners in shipped method. + // When switching from pread to stream, we will open a new scanner for each + // store file, but + // the old scanner may still track the HFileBlocks we have scanned but not sent + // back to client + // yet. If we close the scanner immediately then the HFileBlocks may be messed + // up by others + // before we serialize and send it back to client. The HFileBlocks will be + // released in shipped + // method, so we here will also open new scanners and close old scanners in + // shipped method. // See HBASE-18055 for more details. trySwitchToStreamRead(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java index bc0ca0490932..659c04fd00a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java @@ -24,9 +24,11 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.util.BloomFilterRvvNative; /** - * The basic building block for the {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter} + * The basic building block for the + * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter} */ @InterfaceAudience.Private public class BloomFilterChunk implements BloomFilterBase { @@ -48,8 +50,27 @@ public class BloomFilterChunk implements BloomFilterBase { /** The type of bloom */ protected BloomType bloomType; + private int offset; + + // JNI native方法声明:批量设置位 + private static final BloomFilterRvvNative rvvNative = new BloomFilterRvvNative(); + private static final int BATCH_SIZE = 256; + private long[] hashLocBuffer = new long[BATCH_SIZE]; + private int bufferPos = 0; + + // 加载本地库 + static { + // 确保本地库已加载;通常由调用方/测试用例先行加载 + try { + System.loadLibrary("bloomfilter_rvv"); + } catch (Throwable ignore) { + // 如果外部已加载,这里失败也不影响;真正调用时如未加载会抛错,方便定位 + } + } + /** * Loads bloom filter meta data from file input. + * * @param meta stored bloom meta data * @throws IllegalArgumentException meta data is invalid */ @@ -68,10 +89,14 @@ public BloomFilterChunk(DataInput meta) throws IOException, IllegalArgumentExcep } /** - * Computes the error rate for this Bloom filter, taking into account the actual number of hash - * functions and keys inserted. The return value of this function changes as a Bloom filter is - * being populated. Used for reporting the actual error rate of compound Bloom filters when + * Computes the error rate for this Bloom filter, taking into account the actual + * number of hash + * functions and keys inserted. The return value of this function changes as a + * Bloom filter is + * being populated. Used for reporting the actual error rate of compound Bloom + * filters when * writing them out. + * * @return error rate for this particular Bloom filter */ public double actualErrorRate() { @@ -87,16 +112,21 @@ public BloomFilterChunk(int hashType, BloomType bloomType) { /** * Determines & initializes bloom filter meta data from user config. Call * {@link #allocBloom()} to allocate bloom filter data. - * @param maxKeys Maximum expected number of keys that will be stored in this bloom - * @param errorRate Desired false positive error rate. Lower rate = more storage required + * + * @param maxKeys Maximum expected number of keys that will be stored in this + * bloom + * @param errorRate Desired false positive error rate. Lower rate = more + * storage required * @param hashType Type of hash function to use - * @param foldFactor When finished adding entries, you may be able to 'fold' this bloom to save - * space. Tradeoff potentially excess bytes in bloom for ability to fold if + * @param foldFactor When finished adding entries, you may be able to 'fold' + * this bloom to save + * space. Tradeoff potentially excess bytes in bloom for + * ability to fold if * keyCount is exponentially greater than maxKeys. */ // Used only in testcases public BloomFilterChunk(int maxKeys, double errorRate, int hashType, int foldFactor) - throws IllegalArgumentException { + throws IllegalArgumentException { this(hashType, BloomType.ROW); long bitSize = BloomFilterUtil.computeBitSize(maxKeys, errorRate); @@ -110,8 +140,10 @@ public BloomFilterChunk(int maxKeys, double errorRate, int hashType, int foldFac } /** - * Creates another similar Bloom filter. Does not copy the actual bits, and sets the new filter's + * Creates another similar Bloom filter. Does not copy the actual bits, and sets + * the new filter's * key count to zero. + * * @return a Bloom filter with the same configuration as this */ public BloomFilterChunk createAnother() { @@ -171,35 +203,63 @@ public void add(Cell cell) { * For faster hashing, use combinatorial generation * http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf */ - int hash1; - int hash2; + int hash1, hash2; HashKey hashKey; if (this.bloomType == BloomType.ROWCOL) { hashKey = new RowColBloomHashKey(cell); - hash1 = this.hash.hash(hashKey, 0); - hash2 = this.hash.hash(hashKey, hash1); } else { hashKey = new RowBloomHashKey(cell); - hash1 = this.hash.hash(hashKey, 0); - hash2 = this.hash.hash(hashKey, hash1); } + hash1 = this.hash.hash(hashKey, 0); + hash2 = this.hash.hash(hashKey, hash1); + setHashLoc(hash1, hash2); } private void setHashLoc(int hash1, int hash2) { + final long bitSize = this.byteSize * 8L; + // 使用 Kirsch–Mitzenmacher 组合哈希:hash1 + i*hash2 for (int i = 0; i < this.hashCount; i++) { - long hashLoc = Math.abs((hash1 + i * hash2) % (this.byteSize * 8)); - set(hashLoc); + long combined = (hash1 & 0xffffffffL) + (i * (hash2 & 0xffffffffL)); + long hashLoc = combined % bitSize; + if (hashLoc < 0) + hashLoc += bitSize; // 防止负数 + // 缓存位置,批量刷写 + hashLocBuffer[bufferPos++] = hashLoc; + if (bufferPos == BATCH_SIZE) { + flushHashLocBuffer(); + } } - ++this.keyCount; } + // private void setHashLoc(int hash1, int hash2) { + // for (int i = 0; i < this.hashCount; i++) { + // long hashLoc = Math.abs((hash1 + i * hash2) % (this.byteSize * 8)); + // set(hashLoc); + // } + // ++this.keyCount; + // } + + /** 将缓存的 hash 位置批量写入 bitmap(供 writer/写盘前调用) */ + public void flushHashLocBuffer() { + if (bufferPos <= 0) + return; + if (bloom == null || !bloom.hasArray()) { + throw new IllegalStateException("Bloom bit array is not a heap array"); + } + byte[] array = bloom.array(); + int offset = bloom.arrayOffset(); + rvvNative.nativeSetBitsBatch(array, offset, hashLocBuffer, bufferPos); + bufferPos = 0; + } + // --------------------------------------------------------------------------- /** Private helpers */ /** * Set the bit at the specified index to 1. + * * @param pos index of bit */ void set(long pos) { @@ -210,20 +270,41 @@ void set(long pos) { bloom.put(bytePos, curByte); } + public void setBitsBatch(long[] positions) { + if (positions == null || positions.length == 0) + return; + if (bloom == null || !bloom.hasArray()) { + throw new IllegalStateException("Bloom bit array not initialized"); + } + + byte[] array = bloom.array(); + + rvvNative.nativeSetBitsBatch(bloom.array(), bloom.arrayOffset(), positions, positions.length); + + } + /** * Check if bit at specified index is 1. + * * @param pos index of bit * @return true if bit at specified index is 1, false if 0. */ static boolean get(int pos, ByteBuffer bloomBuf, int bloomOffset) { int bytePos = pos >> 3; // pos / 8 int bitPos = pos & 0x7; // pos % 8 - // TODO access this via Util API which can do Unsafe access if possible(?) byte curByte = bloomBuf.get(bloomOffset + bytePos); curByte &= BloomFilterUtil.bitvals[bitPos]; return (curByte != 0); } + // 新增批量查询 + public void getBatch(long[] positions, boolean[] results) { + if (positions == null || results == null || positions.length != results.length) { + throw new IllegalArgumentException("positions/results length mismatch"); + } + rvvNative.nativeCheckBitsBatch(bloom.array(), bloom.arrayOffset(), positions, positions.length, results); + } + @Override public long getKeyCount() { return keyCount; @@ -244,20 +325,21 @@ public int getHashType() { } public void compactBloom() { - // see if the actual size is exponentially smaller than expected. + // 先把缓存的位全部写入 + flushHashLocBuffer(); + + // 下面保持原有折叠逻辑不变 if (this.keyCount > 0 && this.bloom.hasArray()) { int pieces = 1; int newByteSize = (int) this.byteSize; int newMaxKeys = this.maxKeys; - // while exponentially smaller & folding is lossless while ((newByteSize & 1) == 0 && newMaxKeys > (this.keyCount << 1)) { pieces <<= 1; newByteSize >>= 1; newMaxKeys >>= 1; } - // if we should fold these into pieces if (pieces > 1) { byte[] array = this.bloom.array(); int start = this.bloom.arrayOffset(); @@ -268,7 +350,6 @@ public void compactBloom() { array[pos] |= array[off++]; } } - // folding done, only use a subset of this array this.bloom.rewind(); this.bloom.limit(newByteSize); this.bloom = this.bloom.slice(); @@ -278,8 +359,44 @@ public void compactBloom() { } } + // public void compactBloom() { + // // see if the actual size is exponentially smaller than expected. + // if (this.keyCount > 0 && this.bloom.hasArray()) { + // int pieces = 1; + // int newByteSize = (int) this.byteSize; + // int newMaxKeys = this.maxKeys; + + // // while exponentially smaller & folding is lossless + // while ((newByteSize & 1) == 0 && newMaxKeys > (this.keyCount << 1)) { + // pieces <<= 1; + // newByteSize >>= 1; + // newMaxKeys >>= 1; + // } + + // // if we should fold these into pieces + // if (pieces > 1) { + // byte[] array = this.bloom.array(); + // int start = this.bloom.arrayOffset(); + // int end = start + newByteSize; + // int off = end; + // for (int p = 1; p < pieces; ++p) { + // for (int pos = start; pos < end; ++pos) { + // array[pos] |= array[off++]; + // } + // } + // // folding done, only use a subset of this array + // this.bloom.rewind(); + // this.bloom.limit(newByteSize); + // this.bloom = this.bloom.slice(); + // this.byteSize = newByteSize; + // this.maxKeys = newMaxKeys; + // } + // } + // } + /** * Writes just the bloom filter to the output array + * * @param out OutputStream to place bloom * @throws IOException Error writing bloom array */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterRvvNative.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterRvvNative.java new file mode 100644 index 000000000000..ddd2e4a55c3a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterRvvNative.java @@ -0,0 +1,69 @@ +package org.apache.hadoop.hbase.util; + +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class BloomFilterRvvNative { + private static final boolean ENABLED; + static { + boolean ok; + try { + System.loadLibrary("bloomfilter_rvv"); + ok = true; + } catch (Throwable t) { + ok = false; + } + ENABLED = ok; + } + + public static boolean isEnabled() { + return ENABLED; + } + + // native + public native void nativeSetBitOld(byte[] bitmap, int offset, long pos); + + public native void nativeSetBitsBatch(byte[] bitmap, int offset, long[] positions, int length); + + public native void nativeCheckBitsBatch(byte[] bitmap, int offset, long[] positions, int length, boolean[] results); + + public native void addToBloomFinal(byte[] bloomBuf, int hash1, int hash2, int hashCount); + + public native boolean getBitFinal(long pos, byte[] bloomBuf); + + // 旧 API + public void setBitsRvv(byte[] bitmap, int offset, long[] positions, int length) { + nativeSetBitsBatch(bitmap, offset, positions, length); + } + + // 便捷重载:对 positions 的一个切片 [posOff, posOff+length) + public void setBitsRvv(byte[] bitmap, int offset, long[] positions, int posOff, int length) { + if (posOff == 0 && length == positions.length) { + nativeSetBitsBatch(bitmap, offset, positions, length); + } else { + long[] slice = new long[length]; + System.arraycopy(positions, posOff, slice, 0, length); + nativeSetBitsBatch(bitmap, offset, slice, length); + } + } + + // 批量查询:对 positions 的一个切片,结果写入 results 的 resOff 开始 + public void checkBitsRvv(byte[] bitmap, int offset, long[] positions, int posOff, int length, boolean[] results, + int resOff) { + long[] slice = new long[length]; + System.arraycopy(positions, posOff, slice, 0, length); + boolean[] tmp = new boolean[length]; + nativeCheckBitsBatch(bitmap, offset, slice, length, tmp); + System.arraycopy(tmp, 0, results, resOff, length); + } + + // 批量添加 hash 位到 BloomFilter + public void setHashLocRvv(byte[] bloomBuf, int hash1, int hash2, int hashCount) { + addToBloomFinal(bloomBuf, hash1, hash2, hashCount); + } + + // 查询指定 bit 是否为 1 + public boolean isBitSet(long pos, byte[] bloomBuf) { + return getBitFinal(pos, bloomBuf); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/native/bloomfilter_rvv.c b/hbase-server/src/main/native/bloomfilter_rvv.c new file mode 100644 index 000000000000..8c33d7549aae --- /dev/null +++ b/hbase-server/src/main/native/bloomfilter_rvv.c @@ -0,0 +1,286 @@ +// bloomfilter_rvv.c +#include +#include +#include +#include +#if defined(__riscv) && defined(__riscv_vector) +#include +#endif + +static inline uint8_t bitmask_for_pos(uint8_t bitPos) { + return (uint8_t)(1u << (bitPos & 7)); +} + +/* Old single-bi set */ +JNIEXPORT void JNICALL +Java_org_apache_hadoop_hbase_util_BloomFilterRvvNative_nativeSetBitOld + (JNIEnv *env, jobject obj, jbyteArray bitmap, jint offset, jlong pos) { + + if (bitmap == NULL) return; + jsize bitmap_len = (*env)->GetArrayLength(env, bitmap); + if (bitmap_len <= 0 || offset < 0 || offset >= bitmap_len) return; + + jbyte *bitmap_ptr = (jbyte*)(*env)->GetPrimitiveArrayCritical(env, bitmap, NULL); + if (bitmap_ptr == NULL) return; + + uint8_t *buf = (uint8_t *)(bitmap_ptr + offset); + uint64_t upos = (uint64_t) pos; + uint64_t bytePos = upos >> 3; + uint8_t bitPos = (uint8_t)(upos & 0x7); + + if (bytePos < (uint64_t)(bitmap_len - offset)) { + buf[bytePos] |= bitmask_for_pos(bitPos); + } + + (*env)->ReleasePrimitiveArrayCritical(env, bitmap, bitmap_ptr, 0); +} + +/* Batch set bits */ +JNIEXPORT void JNICALL +Java_org_apache_hadoop_hbase_util_BloomFilterRvvNative_nativeSetBitsBatch + (JNIEnv *env, jobject obj, + jbyteArray bitmap, jint offset, jlongArray positions, jint length) { + + if (length <= 0 || bitmap == NULL || positions == NULL) return; + + jsize bitmap_len = (*env)->GetArrayLength(env, bitmap); + if (bitmap_len <= 0 || offset < 0 || offset >= bitmap_len) return; + + int localMaskLen = bitmap_len - offset; + if (localMaskLen <= 0) return; + + // Copy positions out (no Critical region yet) + jboolean isCopy = JNI_FALSE; + jlong* pos_elems = (*env)->GetLongArrayElements(env, positions, &isCopy); + if (!pos_elems) return; + + uint8_t *localMask = (uint8_t *) calloc((size_t)localMaskLen, 1); + if (!localMask) { (*env)->ReleaseLongArrayElements(env, positions, pos_elems, JNI_ABORT); return; } + + uint64_t *touched = (uint64_t *) malloc(sizeof(uint64_t) * (size_t)length); + if (!touched) { free(localMask); (*env)->ReleaseLongArrayElements(env, positions, pos_elems, JNI_ABORT); return; } + int touchedCount = 0; + +#if defined(__riscv) && defined(__riscv_vector) + size_t max_vl = __riscv_vsetvl_e64m1((size_t)length); + uint64_t *byte_idx = (uint64_t *) malloc(max_vl * sizeof(uint64_t)); + uint64_t *bit_idx = (uint64_t *) malloc(max_vl * sizeof(uint64_t)); + if (!byte_idx || !bit_idx) { + if (byte_idx) free(byte_idx); + if (bit_idx) free(bit_idx); + free(touched); free(localMask); + (*env)->ReleaseLongArrayElements(env, positions, pos_elems, JNI_ABORT); + return; + } + + const uint64_t *upos_ptr = (const uint64_t *) pos_elems; + int i = 0; + while (i < length) { + size_t vl = __riscv_vsetvl_e64m1((size_t)(length - i)); + vuint64m1_t vpos = __riscv_vle64_v_u64m1(upos_ptr + i, vl); + vuint64m1_t vbytePos = __riscv_vsrl_vx_u64m1(vpos, 3, vl); + vuint64m1_t vbitPos = __riscv_vand_vx_u64m1(vpos, 7, vl); + __riscv_vse64_v_u64m1(byte_idx, vbytePos, vl); + __riscv_vse64_v_u64m1(bit_idx, vbitPos, vl); + for (size_t j = 0; j < vl; j++) { + uint64_t b = byte_idx[j]; + uint8_t bi = (uint8_t)(bit_idx[j] & 0xFFu); + if (b < (uint64_t)localMaskLen) { + uint8_t old = localMask[b]; + localMask[b] = (uint8_t)(old | (uint8_t)(1u << (bi & 7u))); + if (old == 0) touched[touchedCount++] = b; + } + } + i += (int)vl; + } + free(byte_idx); + free(bit_idx); +#else + for (int i = 0; i < length; i++) { + uint64_t upos = (uint64_t)pos_elems[i]; + uint64_t b = upos >> 3; + uint8_t bi = (uint8_t)(upos & 7u); + if (b < (uint64_t)localMaskLen) { + uint8_t old = localMask[b]; + localMask[b] = (uint8_t)(old | (uint8_t)(1u << (bi & 7u))); + if (old == 0) touched[touchedCount++] = b; + } + } +#endif + + // Release positions copy + (*env)->ReleaseLongArrayElements(env, positions, pos_elems, JNI_ABORT); + + // Short critical region for write-back + jbyte *bitmap_ptr = (jbyte*)(*env)->GetPrimitiveArrayCritical(env, bitmap, NULL); + if (bitmap_ptr) { + uint8_t *buf = (uint8_t *)(bitmap_ptr + offset); + for (int k = 0; k < touchedCount; k++) { + uint64_t idx = touched[k]; + buf[idx] |= localMask[idx]; + } + (*env)->ReleasePrimitiveArrayCritical(env, bitmap, bitmap_ptr, 0); + } + + free(touched); + free(localMask); +} + +/* Batch check bits */ +JNIEXPORT void JNICALL +Java_org_apache_hadoop_hbase_util_BloomFilterRvvNative_nativeCheckBitsBatch + (JNIEnv *env, jobject obj, + jbyteArray bitmap, jint offset, + jlongArray positions, jint length, + jbooleanArray results) { + + if (length <= 0 || bitmap == NULL || positions == NULL || results == NULL) return; + + jsize bitmap_len = (*env)->GetArrayLength(env, bitmap); + if (bitmap_len <= 0 || offset < 0 || offset >= bitmap_len) return; + + // Copy arrays out first + jboolean isCopyP = JNI_FALSE, isCopyR = JNI_FALSE; + jlong* pos_elems = (*env)->GetLongArrayElements(env, positions, &isCopyP); + jboolean* res_elems = (*env)->GetBooleanArrayElements(env, results, &isCopyR); + if (!pos_elems || !res_elems) { + if (pos_elems) (*env)->ReleaseLongArrayElements(env, positions, pos_elems, JNI_ABORT); + if (res_elems) (*env)->ReleaseBooleanArrayElements(env, results, res_elems, 0); + return; + } + + // Read bitmap briefly in chunks without holding critical for long + jbyte *bmp = (jbyte*)(*env)->GetPrimitiveArrayCritical(env, bitmap, NULL); + if (!bmp) { + (*env)->ReleaseLongArrayElements(env, positions, pos_elems, JNI_ABORT); + (*env)->ReleaseBooleanArrayElements(env, results, res_elems, 0); + return; + } + + uint8_t *buf = (uint8_t *)(bmp + offset); + +#if defined(__riscv) && defined(__riscv_vector) + size_t max_vl = __riscv_vsetvl_e64m1((size_t)length); + uint64_t *byte_idx = (uint64_t*) malloc(max_vl * sizeof(uint64_t)); + uint64_t *bit_idx = (uint64_t*) malloc(max_vl * sizeof(uint64_t)); + if (!byte_idx || !bit_idx) { + if (byte_idx) free(byte_idx); + if (bit_idx) free(bit_idx); + (*env)->ReleasePrimitiveArrayCritical(env, bitmap, bmp, 0); + (*env)->ReleaseLongArrayElements(env, positions, pos_elems, JNI_ABORT); + (*env)->ReleaseBooleanArrayElements(env, results, res_elems, 0); + return; + } + + const uint64_t *upos_ptr = (const uint64_t *) pos_elems; + int i = 0; + while (i < length) { + size_t vl = __riscv_vsetvl_e64m1((size_t)(length - i)); + vuint64m1_t vpos = __riscv_vle64_v_u64m1(upos_ptr + i, vl); + vuint64m1_t vbytePos = __riscv_vsrl_vx_u64m1(vpos, 3, vl); + vuint64m1_t vbitPos = __riscv_vand_vx_u64m1(vpos, 7, vl); + __riscv_vse64_v_u64m1(byte_idx, vbytePos, vl); + __riscv_vse64_v_u64m1(bit_idx, vbitPos, vl); + for (size_t j = 0; j < vl; j++) { + uint64_t b = byte_idx[j]; + uint64_t bi = bit_idx[j]; + if (b < (uint64_t)(bitmap_len - offset)) { + res_elems[i + j] = (buf[b] & (uint8_t)(1u << (bi & 7))) ? JNI_TRUE : JNI_FALSE; + } else { + res_elems[i + j] = JNI_FALSE; + } + } + i += (int)vl; + } + free(byte_idx); + free(bit_idx); +#else + for (int i = 0; i < length; i++) { + uint64_t upos = (uint64_t)pos_elems[i]; + uint64_t b = upos >> 3; + uint8_t bi = (uint8_t)(upos & 7u); + if (b < (uint64_t)(bitmap_len - offset)) { + res_elems[i] = (buf[b] & (uint8_t)(1u << (bi & 7))) ? JNI_TRUE : JNI_FALSE; + } else { + res_elems[i] = JNI_FALSE; + } + } +#endif + + (*env)->ReleasePrimitiveArrayCritical(env, bitmap, bmp, 0); + (*env)->ReleaseLongArrayElements(env, positions, pos_elems, JNI_ABORT); + (*env)->ReleaseBooleanArrayElements(env, results, res_elems, 0); +} + +static const uint8_t bitvals[8] = { 1u<<0, 1u<<1, 1u<<2, 1u<<3, 1u<<4, 1u<<5, 1u<<6, 1u<<7 }; + +void set_hashloc_rvv_final(int32_t hash1, int32_t hash2, int32_t byteSize, int hashCount, uint8_t *bloomBuf) { + if (!bloomBuf || byteSize <= 0 || hashCount <= 0) return; + + const int64_t bloomBitSize = (int64_t)byteSize * 8LL; + + uint8_t *byte_acc = (uint8_t *)calloc(byteSize, sizeof(uint8_t)); + if (!byte_acc) return; + + size_t base = 0; + while (base < (size_t)hashCount) { +#if defined(__riscv) && defined(__riscv_vector) + size_t vl = __riscv_vsetvl_e32m1(hashCount - base); + vuint32m1_t vid_u = __riscv_vid_v_u32m1(vl); + vint32m1_t vid = __riscv_vreinterpret_v_u32m1_i32m1(vid_u); + vint32m1_t vIdx = __riscv_vadd_vx_i32m1(vid, base, vl); + vint32m1_t vMul = __riscv_vmul_vx_i32m1(vIdx, hash2, vl); + vint32m1_t vComp = __riscv_vadd_vx_i32m1(vMul, hash1, vl); + int32_t tmp[vl]; + __riscv_vse32_v_i32m1(tmp, vComp, vl); + for (size_t j = 0; j < vl; ++j) { + int64_t pos = (int64_t)tmp[j] % bloomBitSize; + if (pos < 0) pos += bloomBitSize; + uint32_t bytePos = (uint32_t)(pos >> 3); + uint32_t bitPos = (uint32_t)(pos & 0x7); + byte_acc[bytePos] |= bitvals[bitPos]; + } + base += vl; +#else + int32_t pos = (int32_t)base; + int64_t comp = (int64_t)hash1 + (int64_t)pos * (int64_t)hash2; + int64_t p = comp % bloomBitSize; + if (p < 0) p += bloomBitSize; + uint32_t bytePos = (uint32_t)(p >> 3); + uint32_t bitPos = (uint32_t)(p & 0x7); + byte_acc[bytePos] |= bitvals[bitPos]; + base += 1; +#endif + } + + for (int i = 0; i < byteSize; ++i) { + bloomBuf[i] |= byte_acc[i]; + } + + free(byte_acc); +} + +JNIEXPORT void JNICALL Java_org_apache_hadoop_hbase_util_BloomFilterRvvNative_addToBloomFinal + (JNIEnv *env, jobject obj, jbyteArray bloomBuf, jint hash1, jint hash2, jint hashCount) { + jbyte *buf = (*env)->GetByteArrayElements(env, bloomBuf, NULL); + jsize len = (*env)->GetArrayLength(env, bloomBuf); + + set_hashloc_rvv_final(hash1, hash2, len, hashCount, (uint8_t *)buf); + + (*env)->ReleaseByteArrayElements(env, bloomBuf, buf, 0); +} + +JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_hbase_util_BloomFilterRvvNative_getBitFinal + (JNIEnv *env, jobject obj, jlong pos, jbyteArray bloomBuf) { + + jsize len = (*env)->GetArrayLength(env, bloomBuf); + if (pos < 0) return JNI_FALSE; + int64_t bytePos = pos >> 3; + if (bytePos >= len) return JNI_FALSE; + + jbyte *buf = (*env)->GetByteArrayElements(env, bloomBuf, NULL); + int bitPos = (int)(pos & 0x7); + int result = ((uint8_t)buf[bytePos] & bitvals[bitPos]) != 0; + (*env)->ReleaseByteArrayElements(env, bloomBuf, buf, 0); + return result ? JNI_TRUE : JNI_FALSE; +} diff --git a/hbase-server/src/main/native/bloomfilter_rvv.h b/hbase-server/src/main/native/bloomfilter_rvv.h new file mode 100644 index 000000000000..cc2ee0837892 --- /dev/null +++ b/hbase-server/src/main/native/bloomfilter_rvv.h @@ -0,0 +1,20 @@ +#ifndef _Included_org_apache_hadoop_hbase_util_BloomFilterRvvNative +#define _Included_org_apache_hadoop_hbase_util_BloomFilterRvvNative +#ifdef __cplusplus +extern "C" { +#endif + +JNIEXPORT void JNICALL Java_org_apache_hadoop_hbase_util_BloomFilterRvvNative_nativeSetBitOld + (JNIEnv *, jobject, jbyteArray, jint, jlong); + +JNIEXPORT void JNICALL Java_org_apache_hadoop_hbase_util_BloomFilterRvvNative_nativeSetBitsBatch + (JNIEnv *, jobject, jbyteArray, jint, jlongArray, jint); +JNIEXPORT void JNICALL +Java_org_apache_hadoop_hbase_util_BloomFilterRvvNative_nativeCheckBitsBatch + (JNIEnv *env, jobject obj, jbyteArray bitmap, jint offset, jlongArray positions, jint length, jbooleanArray results); + +#ifdef __cplusplus +} +#endif +#endif + diff --git a/hbase-server/src/main/native/scan_rvv.c b/hbase-server/src/main/native/scan_rvv.c new file mode 100644 index 000000000000..7421da3cc4b9 --- /dev/null +++ b/hbase-server/src/main/native/scan_rvv.c @@ -0,0 +1,119 @@ +/** + * RISC-V RVV 向量化内存比较和拷贝实现 + * 使用 RISC-V 向量扩展指令集优化 HBase Scan 查询性能 + */ + +#if defined(__riscv) && defined(__riscv_vector) +#include +#endif +#include +#include + +/** + * 使用 RISC-V RVV 指令集实现高效的内存比较 + * + * @param a 第一个内存区域指针 + * @param b 第二个内存区域指针 + * @param len 比较长度 + * @return 比较结果:<0 表示 a0 表示 a>b + * 特殊返回值:-999 表示参数错误 + */ +int rvv_memcmp(const unsigned char* a, const unsigned char* b, size_t len) { + // 边界检查:确保指针有效 + if (!a || !b) return -999; + if (len == 0) return 0; + + size_t i = 0; +#if defined(__riscv) && defined(__riscv_vector) + // RVV 向量化比较路径 + while (i < len) { + size_t remaining = len - i; + // 设置向量长度,处理剩余字节数 + size_t vl = __riscv_vsetvl_e8m1(remaining); + if (vl == 0) break; + + // 向量化加载两个内存区域 + vuint8m1_t va = __riscv_vle8_v_u8m1(a + i, vl); + vuint8m1_t vb = __riscv_vle8_v_u8m1(b + i, vl); + + // 计算异或,找出差异位 + vuint8m1_t vxor = __riscv_vxor_vv_u8m1(va, vb, vl); + + // 生成掩码,标记相等位置 + vbool8_t mask_eq = __riscv_vmseq_vx_u8m1_b8(vxor, 0, vl); + // 反转掩码,标记不相等位置 + vbool8_t mask_diff = __riscv_vmnot_m_b8(mask_eq, vl); + + // 快速找到第一个差异位 + int first_diff = __riscv_vfirst_m_b8(mask_diff, vl); + if (first_diff >= 0) { + // 找到差异,计算具体位置并返回差值 + size_t pos = i + (size_t)first_diff; + unsigned char ca = (unsigned char)a[pos]; + unsigned char cb = (unsigned char)b[pos]; + return (int)ca - (int)cb; + } + i += vl; // 移动到下一个向量块 + } +#else + // 标量回退实现(非 RVV 平台) + for (; i < len; i++) { + unsigned char ca = (unsigned char)a[i]; + unsigned char cb = (unsigned char)b[i]; + if (ca != cb) { + return (int)ca - (int)cb; + } + } +#endif + return 0; // 所有字节都相等 +} + +/** + * 使用 RISC-V RVV 指令集实现高效的内存拷贝 + * + * @param dst 目标内存区域指针 + * @param src 源内存区域指针 + * @param len 拷贝长度 + */ +void rvv_memcpy(unsigned char* dst, const unsigned char* src, size_t len) { + // 边界检查:确保指针有效且长度大于0 + if (!dst || !src || len == 0) return; + + size_t i = 0; +#if defined(__riscv) && defined(__riscv_vector) + // RVV 向量化拷贝路径 + while (i < len) { + size_t remaining = len - i; + // 设置向量长度,处理剩余字节数 + size_t vl = __riscv_vsetvl_e8m1(remaining); + if (vl == 0) break; + + // 向量化加载源数据 + vuint8m1_t vec = __riscv_vle8_v_u8m1(src + i, vl); + // 向量化存储到目标位置 + __riscv_vse8_v_u8m1(dst + i, vec, vl); + i += vl; // 移动到下一个向量块 + } +#else + // 标量回退实现(非 RVV 平台) + for (; i < len; i++) { + dst[i] = src[i]; + } +#endif +} + +/** + * 使用 RISC-V RVV 指令集实现高效的前缀匹配检查 + * + * @param a 第一个内存区域指针 + * @param b 第二个内存区域指针 + * @param prefixLen 前缀长度 + * @return 1 表示前缀匹配,0 表示不匹配 + */ +int rvv_prefix_match(const unsigned char* a, const unsigned char* b, size_t prefixLen) { + // 边界检查:空前缀认为匹配 + if (!a || !b || prefixLen == 0) return 1; + + // 复用 rvv_memcmp 实现前缀比较 + return rvv_memcmp(a, b, prefixLen) == 0; +} \ No newline at end of file diff --git a/hbase-server/src/main/native/scan_rvv.h b/hbase-server/src/main/native/scan_rvv.h new file mode 100644 index 000000000000..e456187424cb --- /dev/null +++ b/hbase-server/src/main/native/scan_rvv.h @@ -0,0 +1,36 @@ +#ifndef SCAN_RVV_H +#define SCAN_RVV_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * 使用 RISC-V 向量优化的内存比较 + * 返回值: + * 0:两个内存块相等 + * 非 0:第一个不同字节的差值 + */ +int rvv_memcmp(const unsigned char* a, const unsigned char* b, size_t len); + +/** + * 使用 RISC-V 向量优化的内存拷贝 + */ +void rvv_memcpy(unsigned char* dst, const unsigned char* src, size_t len); + +/** + * 判断两个内存块前 prefixLen 字节是否相等 + * 返回值: + * 1:相等 + * 0:不相等 + */ +int rvv_prefix_match(const unsigned char* a, const unsigned char* b, size_t prefixLen); + +#ifdef __cplusplus +} +#endif + +#endif // SCAN_RVV_H + diff --git a/hbase-server/src/main/native/scan_rvv_jni.c b/hbase-server/src/main/native/scan_rvv_jni.c new file mode 100644 index 000000000000..4a3e928d9e62 --- /dev/null +++ b/hbase-server/src/main/native/scan_rvv_jni.c @@ -0,0 +1,549 @@ +#include // 包含 rvv_memcmp / rvv_prefix_match 的声明 +#include +#include +#include "scan_rvv.h" + +#if defined(__riscv) && defined(__riscv_vector) +#include +#endif + +// ------------------ JNI 接口 ------------------ + +/** + * JNI 接口:使用 RVV 优化的内存比较 + * 比较两个 byte[] 数组的指定区域 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param a 第一个字节数组 + * @param offsetA 第一个数组的偏移量 + * @param lengthA 第一个数组的长度 + * @param b 第二个字节数组 + * @param offsetB 第二个数组的偏移量 + * @param lengthB 第二个数组的长度 + * @return 比较结果:<0 表示 a0 表示 a>b + * 特殊返回值:-999 表示参数错误,-997 表示偏移量错误,-998 表示内存分配失败 + */ +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_util_ScanRVV_memcmp + (JNIEnv *env, jclass clazz, + jbyteArray a, jint offsetA, jint lengthA, + jbyteArray b, jint offsetB, jint lengthB) { + // 参数验证 + if (!a || !b || lengthA <= 0 || lengthB <= 0) { + return -999; + } + size_t len = lengthA < lengthB ? lengthA : lengthB; + + if (offsetA < 0 || offsetB < 0 || offsetA + len > lengthA || offsetB + len > lengthB) { + return -997; + } + + // 获取数组指针 + jbyte* a_ptr = (*env)->GetPrimitiveArrayCritical(env, a, 0); + jbyte* b_ptr = (*env)->GetPrimitiveArrayCritical(env, b, 0); + if (!a_ptr || !b_ptr) { + return -998; + } + + // 调用 RVV 优化的内存比较 + int ret = rvv_memcmp((const unsigned char*)(a_ptr + offsetA), + (const unsigned char*)(b_ptr + offsetB), + len); + + // 释放数组指针 + (*env)->ReleasePrimitiveArrayCritical(env, a, a_ptr, JNI_ABORT); + (*env)->ReleasePrimitiveArrayCritical(env, b, b_ptr, JNI_ABORT); + + if (ret != 0) return ret; + return lengthA - lengthB; +} + + +/** + * JNI 接口:使用 RVV 优化的前缀匹配检查 + * 检查两个 byte[] 数组是否具有相同的前缀 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param a 第一个字节数组 + * @param offsetA 第一个数组的偏移量 + * @param b 第二个字节数组 + * @param offsetB 第二个数组的偏移量 + * @param prefixLen 前缀长度 + * @return JNI_TRUE 表示前缀匹配,JNI_FALSE 表示不匹配 + */ +JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_hbase_util_ScanRVV_prefixMatch + (JNIEnv *env, jclass clazz, + jbyteArray a, jint offsetA, + jbyteArray b, jint offsetB, + jint prefixLen) +{ + // 参数验证 + if (!a || !b || prefixLen <= 0) return JNI_FALSE; + + jsize lenA = (*env)->GetArrayLength(env, a); + jsize lenB = (*env)->GetArrayLength(env, b); + if (offsetA < 0 || offsetB < 0 || offsetA + prefixLen > lenA || offsetB + prefixLen > lenB) + return JNI_FALSE; + + // 获取数组指针 + jbyte* a_ptr = (*env)->GetPrimitiveArrayCritical(env, a, 0); + jbyte* b_ptr = (*env)->GetPrimitiveArrayCritical(env, b, 0); + + // 调用 RVV 优化的前缀匹配 + int ret = rvv_prefix_match((const unsigned char*)(a_ptr + offsetA), + (const unsigned char*)(b_ptr + offsetB), + (size_t)prefixLen); + + // 释放数组指针 + (*env)->ReleasePrimitiveArrayCritical(env, a, a_ptr, JNI_ABORT); + (*env)->ReleasePrimitiveArrayCritical(env, b, b_ptr, JNI_ABORT); + + return ret ? JNI_TRUE : JNI_FALSE; +} + +/** + * JNI 接口:ByteBuffer vs ByteBuffer 的公共前缀查找 + * 使用 RVV 指令集优化查找两个 ByteBuffer 的公共前缀长度 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param a 第一个 ByteBuffer + * @param aOffset 第一个 ByteBuffer 的偏移量 + * @param aLen 第一个 ByteBuffer 的长度 + * @param b 第二个 ByteBuffer + * @param bOffset 第二个 ByteBuffer 的偏移量 + * @param bLen 第二个 ByteBuffer 的长度 + * @return 公共前缀的长度 + */ +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_util_RVVByteBufferUtils_findCommonPrefixRvv__Ljava_nio_ByteBuffer_2IILjava_nio_ByteBuffer_2II + (JNIEnv *env, jclass clazz, jobject a, jint aOffset, jint aLen, + jobject b, jint bOffset, jint bLen) { + + // 获取 DirectBuffer 地址 + const unsigned char* pa = (const unsigned char*)(*env)->GetDirectBufferAddress(env, a); + const unsigned char* pb = (const unsigned char*)(*env)->GetDirectBufferAddress(env, b); + + if (!pa || !pb) return 0; + + size_t maxLen = aLen < bLen ? aLen : bLen; + size_t i = 0; + +#if defined(__riscv) && defined(__riscv_vector) + // RVV 向量化查找公共前缀 + while (i < maxLen) { + size_t remaining = maxLen - i; + size_t vl = __riscv_vsetvl_e8m1(remaining); + if (vl == 0) break; // 添加 vl 检查 + + // 向量化加载 + vuint8m1_t va = __riscv_vle8_v_u8m1(pa + aOffset + i, vl); + vuint8m1_t vb = __riscv_vle8_v_u8m1(pb + bOffset + i, vl); + + // 计算异或,找出差异位 + vuint8m1_t vxor = __riscv_vxor_vv_u8m1(va, vb, vl); + + // 生成掩码,标记相等位置 + vbool8_t mask = __riscv_vmseq_vx_u8m1_b8(vxor, 0, vl); + // 反转掩码,标记不相等位置 + vbool8_t mask_diff = __riscv_vmnot_m_b8(mask, vl); + + // 找第一个不同字节的位置 + int first_diff = __riscv_vfirst_m_b8(mask_diff, vl); + + if (first_diff >= 0) { + i += first_diff; + return (jint)i; + } + + i += vl; + } + return (jint)i; +#else + // 标量回退实现 + for (; i < maxLen; i++) { + if (pa[aOffset + i] != pb[bOffset + i]) break; + } + return (jint)i; +#endif +} + +/** + * JNI 接口:ByteBuffer vs byte[] 的公共前缀查找 + * 使用 RVV 指令集优化查找 ByteBuffer 和 byte[] 的公共前缀长度 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param a 第一个 ByteBuffer + * @param aOffset 第一个 ByteBuffer 的偏移量 + * @param aLen 第一个 ByteBuffer 的长度 + * @param b 第二个字节数组 + * @param bOffset 第二个数组的偏移量 + * @param bLen 第二个数组的长度 + * @return 公共前缀的长度 + */ +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_util_RVVByteBufferUtils_findCommonPrefixRvv__Ljava_nio_ByteBuffer_2II_3BII + (JNIEnv *env, jclass clazz, jobject a, jint aOffset, jint aLen, + jbyteArray b, jint bOffset, jint bLen) { + + // 获取 DirectBuffer 地址 + const unsigned char* pa = (const unsigned char*)(*env)->GetDirectBufferAddress(env, a); + if (!pa) return 0; + + // 获取 byte[] 指针 + jbyte* pb = (*env)->GetPrimitiveArrayCritical(env, b, 0); + if (!pb) return 0; + + size_t maxLen = aLen < bLen ? aLen : bLen; + size_t i = 0; + +#if defined(__riscv) && defined(__riscv_vector) + // RVV 向量化查找公共前缀 + while (i < maxLen) { + size_t remaining = maxLen - i; + size_t vl = __riscv_vsetvl_e8m1(remaining); + if (vl == 0) break; // 添加 vl 检查 + + // 向量化加载 + vuint8m1_t va = __riscv_vle8_v_u8m1(pa + aOffset + i, vl); + vuint8m1_t vb = __riscv_vle8_v_u8m1((const unsigned char*)(pb + bOffset + i), vl); + + // 计算异或,找出差异位 + vuint8m1_t vxor = __riscv_vxor_vv_u8m1(va, vb, vl); + + // 生成掩码,标记相等位置 + vbool8_t mask = __riscv_vmseq_vx_u8m1_b8(vxor, 0, vl); + // 反转掩码,标记不相等位置 + vbool8_t mask_diff = __riscv_vmnot_m_b8(mask, vl); + + // 找第一个不同字节 + int first_diff = __riscv_vfirst_m_b8(mask_diff, vl); + + if (first_diff >= 0) { + i += first_diff; + (*env)->ReleasePrimitiveArrayCritical(env, b, pb, JNI_ABORT); + return (jint)i; + } + + i += vl; + } + + (*env)->ReleasePrimitiveArrayCritical(env, b, pb, JNI_ABORT); + return (jint)i; +#else + // 标量回退实现 + for (; i < maxLen; i++) { + if (pa[aOffset + i] != pb[bOffset + i]) break; + } + (*env)->ReleasePrimitiveArrayCritical(env, b, pb, JNI_ABORT); + return (jint)i; +#endif +} + +/** + * JNI 接口:byte[] vs byte[] 的公共前缀查找 + * 使用 RVV 指令集优化查找两个 byte[] 的公共前缀长度 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param a 第一个字节数组 + * @param offsetA 第一个数组的偏移量 + * @param lengthA 第一个数组的长度 + * @param b 第二个字节数组 + * @param offsetB 第二个数组的偏移量 + * @param lengthB 第二个数组的长度 + * @return 公共前缀的长度 + */ +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_util_ScanRVV_rvvCommonPrefix + (JNIEnv *env, jclass clazz, + jbyteArray a, jint offsetA, jint lengthA, + jbyteArray b, jint offsetB, jint lengthB) { + + // 获取数组指针 + jbyte* arrA = (*env)->GetPrimitiveArrayCritical(env, a, 0); + jbyte* arrB = (*env)->GetPrimitiveArrayCritical(env, b, 0); + + size_t maxLen = lengthA < lengthB ? lengthA : lengthB; + size_t i = 0; + +#if defined(__riscv) && defined(__riscv_vector) + // RVV 向量化查找公共前缀 + const unsigned char* pa = (const unsigned char*)(arrA + offsetA); + const unsigned char* pb = (const unsigned char*)(arrB + offsetB); + + while (i < maxLen) { + size_t remaining = maxLen - i; + size_t vl = __riscv_vsetvl_e8m1(remaining); + if (vl == 0) break; // 添加 vl 检查 + + // 向量化加载 + vuint8m1_t va = __riscv_vle8_v_u8m1(pa + i, vl); + vuint8m1_t vb = __riscv_vle8_v_u8m1(pb + i, vl); + vuint8m1_t vxor = __riscv_vxor_vv_u8m1(va, vb, vl); + + // 生成掩码,标记相等位置 + vbool8_t mask = __riscv_vmseq_vx_u8m1_b8(vxor, 0, vl); + // 反转掩码,标记不相等位置 + vbool8_t mask_diff = __riscv_vmnot_m_b8(mask, vl); + + // 找第一个不同字节 + int first_diff = __riscv_vfirst_m_b8(mask_diff, vl); + + if (first_diff >= 0) { + i += first_diff; + break; + } + + i += vl; + } +#else + // 标量回退实现 + for (; i < maxLen; i++) { + if (arrA[offsetA + i] != arrB[offsetB + i]) break; + } +#endif + // 确保数组被释放并返回值 + (*env)->ReleasePrimitiveArrayCritical(env, a, arrA, JNI_ABORT); + (*env)->ReleasePrimitiveArrayCritical(env, b, arrB, JNI_ABORT); + return (jint)i; +} + +/** + * JNI 接口:使用 RVV 优化的内存拷贝 + * 在 byte[] 数组之间进行高效的内存拷贝 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param dst 目标字节数组 + * @param dstOffset 目标数组的偏移量 + * @param src 源字节数组 + * @param srcOffset 源数组的偏移量 + * @param length 拷贝长度 + */ +JNIEXPORT void JNICALL Java_org_apache_hadoop_hbase_util_ScanRVV_rvvMemcpy + (JNIEnv *env, jclass clazz, + jbyteArray dst, jint dstOffset, + jbyteArray src, jint srcOffset, jint length) { + + // 参数验证 + if (!dst || !src || length <= 0) { + return; + } + + // 获取数组指针 + jbyte* dst_ptr = (*env)->GetPrimitiveArrayCritical(env, dst, 0); + jbyte* src_ptr = (*env)->GetPrimitiveArrayCritical(env, src, 0); + if (!dst_ptr || !src_ptr) { + return; + } + +#if defined(__riscv) && defined(__riscv_vector) + // RVV 向量化拷贝 + size_t i = 0; + while (i < (size_t)length) { + size_t remaining = length - i; + size_t vl = __riscv_vsetvl_e8m1(remaining); + if (vl == 0) break; // 添加 vl 检查 + + // 向量化加载和存储 + vuint8m1_t vec = __riscv_vle8_v_u8m1((const unsigned char*)(src_ptr + srcOffset + i), vl); + __riscv_vse8_v_u8m1((unsigned char*)(dst_ptr + dstOffset + i), vec, vl); + i += vl; + } +#else + // 标量回退实现 + for (size_t i = 0; i < (size_t)length; i++) { + dst_ptr[dstOffset + i] = src_ptr[srcOffset + i]; + } +#endif + + // 释放数组指针 + (*env)->ReleasePrimitiveArrayCritical(env, dst, dst_ptr, 0); + (*env)->ReleasePrimitiveArrayCritical(env, src, src_ptr, JNI_ABORT); +} + +/** + * JNI 接口:使用 RVV 优化的字节数组比较 + * 比较两个 byte[] 数组的指定区域 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param left 左字节数组 + * @param loff 左数组的偏移量 + * @param llen 左数组的长度 + * @param right 右字节数组 + * @param roff 右数组的偏移量 + * @param rlen 右数组的长度 + * @return 比较结果:<0 表示 left0 表示 left>right + */ +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_util_Bytes_compareToRvv + (JNIEnv *env, jclass clazz, + jbyteArray left, jint loff, jint llen, + jbyteArray right, jint roff, jint rlen) { + + // 获取数组指针 + jbyte* l = (*env)->GetPrimitiveArrayCritical(env, left, 0); + jbyte* r = (*env)->GetPrimitiveArrayCritical(env, right, 0); + + size_t maxLen = llen < rlen ? llen : rlen; + size_t i = 0; + +#if defined(__riscv) && defined(__riscv_vector) + // RVV 向量化比较 + while (i < maxLen) { + size_t remaining = maxLen - i; + size_t vl = __riscv_vsetvl_e8m1(remaining); + if (vl == 0) break; // 添加 vl 检查 + + // 使用 unsigned 类型加载 + vuint8m1_t vlv = __riscv_vle8_v_u8m1((const unsigned char*)(l + loff + i), vl); + vuint8m1_t vrv = __riscv_vle8_v_u8m1((const unsigned char*)(r + roff + i), vl); + + // 计算异或,找出差异位 + vuint8m1_t vxor = __riscv_vxor_vv_u8m1(vlv, vrv, vl); + + // 生成掩码,标记相等位置 + vbool8_t mask = __riscv_vmseq_vx_u8m1_b8(vxor, 0, vl); + // 反转掩码,标记不相等位置 + vbool8_t mask_diff = __riscv_vmnot_m_b8(mask, vl); + int first_diff = __riscv_vfirst_m_b8(mask_diff, vl); + + if (first_diff >= 0) { + int ret = ((unsigned char)l[loff + i + first_diff]) - + ((unsigned char)r[roff + i + first_diff]); + (*env)->ReleasePrimitiveArrayCritical(env, left, l, 0); + (*env)->ReleasePrimitiveArrayCritical(env, right, r, 0); + return ret; + } + + i += vl; + } +#else + // 标量回退实现 + for (; i < maxLen; i++) { + if (l[loff + i] != r[roff + i]) { + int ret = (int)(l[loff + i]) - (int)(r[roff + i]); + (*env)->ReleasePrimitiveArrayCritical(env, left, l, 0); + (*env)->ReleasePrimitiveArrayCritical(env, right, r, 0); + return ret; + } + } +#endif + + // 释放数组指针 + (*env)->ReleasePrimitiveArrayCritical(env, left, l, 0); + (*env)->ReleasePrimitiveArrayCritical(env, right, r, 0); + return 0; +} + + + +/** + * JNI 接口:检查 RVV 是否可用 + * 返回当前平台是否支持 RISC-V 向量扩展 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @return JNI_TRUE 表示支持 RVV,JNI_FALSE 表示不支持 + */ +JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_hbase_util_ScanRVV_isEnabled + (JNIEnv* env, jclass clazz) { +#if defined(__riscv) && defined(__riscv_vector) + return JNI_TRUE; +#else + return JNI_FALSE; +#endif +} + +/** + * JNI 接口:全量 Cell 比较 + * 使用 RVV 优化比较两个完整的 Cell 键 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param aKey 第一个 Cell 键 + * @param aLen 第一个键的长度 + * @param bKey 第二个 Cell 键 + * @param bLen 第二个键的长度 + * @return 比较结果:<0 表示 aKey0 表示 aKey>bKey + */ +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_util_ScanRVV_compareCells + (JNIEnv* env, jclass clazz, jbyteArray aKey, jint aLen, jbyteArray bKey, jint bLen) { + + // 获取数组指针 + jbyte* a_ptr = (*env)->GetPrimitiveArrayCritical(env, aKey, 0); + jbyte* b_ptr = (*env)->GetPrimitiveArrayCritical(env, bKey, 0); + + // 比较最小长度部分 + size_t minLen = aLen < bLen ? aLen : bLen; + int cmp = rvv_memcmp((const unsigned char*)a_ptr, (const unsigned char*)b_ptr, minLen); + if (cmp == 0) cmp = (int)aLen - (int)bLen; + + // 释放数组指针 + (*env)->ReleasePrimitiveArrayCritical(env, aKey, a_ptr, JNI_ABORT); + (*env)->ReleasePrimitiveArrayCritical(env, bKey, b_ptr, JNI_ABORT); + + return cmp; +} + +/** + * JNI 接口:比较键以确定是否跳转到下一行 + * 使用 RVV 优化比较索引键和当前键 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param indexedKey 索引键 + * @param idxLen 索引键长度 + * @param curKey 当前键 + * @param curLen 当前键长度 + * @return 比较结果:<0 表示 indexedKey0 表示 indexedKey>curKey + */ +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_util_ScanRVV_compareKeyForNextRow + (JNIEnv* env, jclass clazz, jbyteArray indexedKey, jint idxLen, jbyteArray curKey, jint curLen) { + + // 获取数组指针 + jbyte* idx_ptr = (*env)->GetPrimitiveArrayCritical(env, indexedKey, 0); + jbyte* cur_ptr = (*env)->GetPrimitiveArrayCritical(env, curKey, 0); + + // 比较最小长度部分 + size_t minLen = idxLen < curLen ? idxLen : curLen; + int cmp = rvv_memcmp((const unsigned char*)idx_ptr, (const unsigned char*)cur_ptr, minLen); + if (cmp == 0) cmp = (int)idxLen - (int)curLen; + + // 释放数组指针 + (*env)->ReleasePrimitiveArrayCritical(env, indexedKey, idx_ptr, JNI_ABORT); + (*env)->ReleasePrimitiveArrayCritical(env, curKey, cur_ptr, JNI_ABORT); + + return cmp; +} + +/** + * JNI 接口:比较键以确定是否跳转到下一列 + * 使用 RVV 优化比较索引键和当前键 + * + * @param env JNI 环境指针 + * @param clazz 调用类 + * @param indexedKey 索引键 + * @param idxLen 索引键长度 + * @param curKey 当前键 + * @param curLen 当前键长度 + * @return 比较结果:<0 表示 indexedKey0 表示 indexedKey>curKey + */ +JNIEXPORT jint JNICALL Java_org_apache_hadoop_hbase_util_ScanRVV_compareKeyForNextColumn + (JNIEnv* env, jclass clazz, jbyteArray indexedKey, jint idxLen, jbyteArray curKey, jint curLen) { + + // 获取数组指针 + jbyte* idx_ptr = (*env)->GetPrimitiveArrayCritical(env, indexedKey, 0); + jbyte* cur_ptr = (*env)->GetPrimitiveArrayCritical(env, curKey, 0); + + // 比较最小长度部分 + size_t minLen = idxLen < curLen ? idxLen : curLen; + int cmp = rvv_memcmp((const unsigned char*)idx_ptr, (const unsigned char*)cur_ptr, minLen); + if (cmp == 0) cmp = (int)idxLen - (int)curLen; + + // 释放数组指针 + (*env)->ReleasePrimitiveArrayCritical(env, indexedKey, idx_ptr, JNI_ABORT); + (*env)->ReleasePrimitiveArrayCritical(env, curKey, cur_ptr, JNI_ABORT); + + return cmp; +}