mirror of
https://github.com/Sarsoo/csbindgen.git
synced 2024-12-22 22:46:26 +00:00
parent
7218fe63b4
commit
502bd033e7
@ -1,761 +0,0 @@
|
||||
#ifndef PHYSICS_CLIENT_C_API_H
|
||||
#define PHYSICS_CLIENT_C_API_H
|
||||
|
||||
//#include "SharedMemoryBlock.h"
|
||||
#include "SharedMemoryPublic.h"
|
||||
|
||||
#define B3_DECLARE_HANDLE(name) \
|
||||
typedef struct name##__ \
|
||||
{ \
|
||||
int unused; \
|
||||
} * name
|
||||
|
||||
B3_DECLARE_HANDLE(b3PhysicsClientHandle);
|
||||
B3_DECLARE_HANDLE(b3SharedMemoryCommandHandle);
|
||||
B3_DECLARE_HANDLE(b3SharedMemoryStatusHandle);
|
||||
|
||||
#ifdef _WIN32
|
||||
#define B3_SHARED_API __declspec(dllexport)
|
||||
#elif defined(__GNUC__)
|
||||
#define B3_SHARED_API __attribute__((visibility("default")))
|
||||
#else
|
||||
#define B3_SHARED_API
|
||||
#endif
|
||||
|
||||
///There are several connection methods, see following header files:
|
||||
#include "PhysicsClientSharedMemory_C_API.h"
|
||||
#include "PhysicsClientSharedMemory2_C_API.h"
|
||||
#include "PhysicsDirectC_API.h"
|
||||
|
||||
#ifdef BT_ENABLE_ENET
|
||||
#include "PhysicsClientUDP_C_API.h"
|
||||
#endif
|
||||
|
||||
#ifdef BT_ENABLE_CLSOCKET
|
||||
#include "PhysicsClientTCP_C_API.h"
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
///b3DisconnectSharedMemory will disconnect the client from the server and cleanup memory.
|
||||
B3_SHARED_API void b3DisconnectSharedMemory(b3PhysicsClientHandle physClient);
|
||||
|
||||
///There can only be 1 outstanding command. Check if a command can be send.
|
||||
B3_SHARED_API int b3CanSubmitCommand(b3PhysicsClientHandle physClient);
|
||||
|
||||
///blocking submit command and wait for status
|
||||
B3_SHARED_API b3SharedMemoryStatusHandle b3SubmitClientCommandAndWaitStatus(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle);
|
||||
|
||||
///In general it is better to use b3SubmitClientCommandAndWaitStatus. b3SubmitClientCommand is a non-blocking submit
|
||||
///command, which requires checking for the status manually, using b3ProcessServerStatus. Also, before sending the
|
||||
///next command, make sure to check if you can send a command using 'b3CanSubmitCommand'.
|
||||
B3_SHARED_API int b3SubmitClientCommand(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle);
|
||||
|
||||
///non-blocking check status
|
||||
B3_SHARED_API b3SharedMemoryStatusHandle b3ProcessServerStatus(b3PhysicsClientHandle physClient);
|
||||
|
||||
/// Get the physics server return status type. See EnumSharedMemoryServerStatus in SharedMemoryPublic.h for error codes.
|
||||
B3_SHARED_API int b3GetStatusType(b3SharedMemoryStatusHandle statusHandle);
|
||||
|
||||
///Plugin system, load and unload a plugin, execute a command
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateCustomCommand(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3CustomCommandLoadPlugin(b3SharedMemoryCommandHandle commandHandle, const char* pluginPath);
|
||||
B3_SHARED_API void b3CustomCommandLoadPluginSetPostFix(b3SharedMemoryCommandHandle commandHandle, const char* postFix);
|
||||
B3_SHARED_API int b3GetStatusPluginUniqueId(b3SharedMemoryStatusHandle statusHandle);
|
||||
B3_SHARED_API int b3GetStatusPluginCommandResult(b3SharedMemoryStatusHandle statusHandle);
|
||||
B3_SHARED_API int b3GetStatusPluginCommandReturnData(b3PhysicsClientHandle physClient, struct b3UserDataValue* valueOut);
|
||||
|
||||
B3_SHARED_API void b3CustomCommandUnloadPlugin(b3SharedMemoryCommandHandle commandHandle, int pluginUniqueId);
|
||||
B3_SHARED_API void b3CustomCommandExecutePluginCommand(b3SharedMemoryCommandHandle commandHandle, int pluginUniqueId, const char* textArguments);
|
||||
B3_SHARED_API void b3CustomCommandExecuteAddIntArgument(b3SharedMemoryCommandHandle commandHandle, int intVal);
|
||||
B3_SHARED_API void b3CustomCommandExecuteAddFloatArgument(b3SharedMemoryCommandHandle commandHandle, float floatVal);
|
||||
|
||||
B3_SHARED_API int b3GetStatusBodyIndices(b3SharedMemoryStatusHandle statusHandle, int* bodyIndicesOut, int bodyIndicesCapacity);
|
||||
|
||||
B3_SHARED_API int b3GetStatusBodyIndex(b3SharedMemoryStatusHandle statusHandle);
|
||||
|
||||
B3_SHARED_API int b3GetStatusActualState(b3SharedMemoryStatusHandle statusHandle,
|
||||
int* bodyUniqueId,
|
||||
int* numDegreeOfFreedomQ,
|
||||
int* numDegreeOfFreedomU,
|
||||
const double* rootLocalInertialFrame[],
|
||||
const double* actualStateQ[],
|
||||
const double* actualStateQdot[],
|
||||
const double* jointReactionForces[]);
|
||||
|
||||
B3_SHARED_API int b3GetStatusActualState2(b3SharedMemoryStatusHandle statusHandle,
|
||||
int* bodyUniqueId,
|
||||
int* numLinks,
|
||||
int* numDegreeOfFreedomQ,
|
||||
int* numDegreeOfFreedomU,
|
||||
const double* rootLocalInertialFrame[],
|
||||
const double* actualStateQ[],
|
||||
const double* actualStateQdot[],
|
||||
const double* jointReactionForces[],
|
||||
const double* linkLocalInertialFrames[],
|
||||
const double* jointMotorForces[],
|
||||
const double* linkStates[],
|
||||
const double* linkWorldVelocities[]);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3RequestCollisionInfoCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
B3_SHARED_API int b3GetStatusAABB(b3SharedMemoryStatusHandle statusHandle, int linkIndex, double aabbMin[/*3*/], double aabbMax[/*3*/]);
|
||||
|
||||
///If you re-connected to an existing server, or server changed otherwise, sync the body info and user constraints etc.
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitSyncBodyInfoCommand(b3PhysicsClientHandle physClient);
|
||||
|
||||
// Sync the body info of a single body. Useful when a new body has been added by a different client (e,g, when detecting through a body added notification).
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestBodyInfoCommand(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRemoveBodyCommand(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
|
||||
///return the total number of bodies in the simulation
|
||||
B3_SHARED_API int b3GetNumBodies(b3PhysicsClientHandle physClient);
|
||||
|
||||
/// return the body unique id, given the index in range [0 , b3GetNumBodies() )
|
||||
B3_SHARED_API int b3GetBodyUniqueId(b3PhysicsClientHandle physClient, int serialIndex);
|
||||
|
||||
///given a body unique id, return the body information. See b3BodyInfo in SharedMemoryPublic.h
|
||||
B3_SHARED_API int b3GetBodyInfo(b3PhysicsClientHandle physClient, int bodyUniqueId, struct b3BodyInfo* info);
|
||||
|
||||
///give a unique body index (after loading the body) return the number of joints.
|
||||
B3_SHARED_API int b3GetNumJoints(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
///give a unique body index (after loading the body) return the number of degrees of freedom (DoF).
|
||||
B3_SHARED_API int b3GetNumDofs(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
|
||||
///compute the number of degrees of freedom for this body.
|
||||
///Return -1 for unsupported spherical joint, -2 for unsupported planar joint.
|
||||
B3_SHARED_API int b3ComputeDofCount(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
|
||||
///given a body and joint index, return the joint information. See b3JointInfo in SharedMemoryPublic.h
|
||||
B3_SHARED_API int b3GetJointInfo(b3PhysicsClientHandle physClient, int bodyUniqueId, int jointIndex, struct b3JointInfo* info);
|
||||
|
||||
///user data handling
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitSyncUserDataCommand(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3AddBodyToSyncUserDataRequest(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitAddUserDataCommand(b3PhysicsClientHandle physClient, int bodyUniqueId, int linkIndex, int visualShapeIndex, const char* key, enum UserDataValueType valueType, int valueLength, const void* valueData);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRemoveUserDataCommand(b3PhysicsClientHandle physClient, int userDataId);
|
||||
|
||||
B3_SHARED_API int b3GetUserData(b3PhysicsClientHandle physClient, int userDataId, struct b3UserDataValue* valueOut);
|
||||
B3_SHARED_API int b3GetUserDataId(b3PhysicsClientHandle physClient, int bodyUniqueId, int linkIndex, int visualShapeIndex, const char* key);
|
||||
B3_SHARED_API int b3GetUserDataIdFromStatus(b3SharedMemoryStatusHandle statusHandle);
|
||||
B3_SHARED_API int b3GetNumUserData(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
B3_SHARED_API void b3GetUserDataInfo(b3PhysicsClientHandle physClient, int bodyUniqueId, int userDataIndex, const char** keyOut, int* userDataIdOut, int* linkIndexOut, int* visualShapeIndexOut);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3GetDynamicsInfoCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId, int linkIndex);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3GetDynamicsInfoCommandInit2(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex);
|
||||
|
||||
///given a body unique id and link index, return the dynamics information. See b3DynamicsInfo in SharedMemoryPublic.h
|
||||
B3_SHARED_API int b3GetDynamicsInfo(b3SharedMemoryStatusHandle statusHandle, struct b3DynamicsInfo* info);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitChangeDynamicsInfo(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitChangeDynamicsInfo2(b3SharedMemoryCommandHandle commandHandle);
|
||||
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetMass(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double mass);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetLocalInertiaDiagonal(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, const double localInertiaDiagonal[]);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetAnisotropicFriction(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, const double anisotropicFriction[]);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetJointLimit(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double jointLowerLimit, double jointUpperLimit);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetJointLimitForce(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double jointLimitForce);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetDynamicType(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, int dynamicType);
|
||||
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetSleepThreshold(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, double sleepThreshold);
|
||||
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetLateralFriction(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double lateralFriction);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetSpinningFriction(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double friction);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetRollingFriction(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double friction);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetRestitution(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double restitution);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetLinearDamping(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, double linearDamping);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetAngularDamping(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, double angularDamping);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetJointDamping(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double jointDamping);
|
||||
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetContactStiffnessAndDamping(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double contactStiffness, double contactDamping);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetFrictionAnchor(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, int frictionAnchor);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetCcdSweptSphereRadius(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double ccdSweptSphereRadius);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetContactProcessingThreshold(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkIndex, double contactProcessingThreshold);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetActivationState(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int activationState);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetMaxJointVelocity(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, double maxJointVelocity);
|
||||
B3_SHARED_API int b3ChangeDynamicsInfoSetCollisionMargin(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, double collisionMargin);
|
||||
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitCreateUserConstraintCommand(b3PhysicsClientHandle physClient, int parentBodyUniqueId, int parentJointIndex, int childBodyUniqueId, int childJointIndex, struct b3JointInfo* info);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitCreateUserConstraintCommand2(b3SharedMemoryCommandHandle commandHandle, int parentBodyUniqueId, int parentJointIndex, int childBodyUniqueId, int childJointIndex, struct b3JointInfo* info);
|
||||
|
||||
///return a unique id for the user constraint, after successful creation, or -1 for an invalid constraint id
|
||||
B3_SHARED_API int b3GetStatusUserConstraintUniqueId(b3SharedMemoryStatusHandle statusHandle);
|
||||
|
||||
///change parameters of an existing user constraint
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitChangeUserConstraintCommand(b3PhysicsClientHandle physClient, int userConstraintUniqueId);
|
||||
B3_SHARED_API int b3InitChangeUserConstraintSetPivotInB(b3SharedMemoryCommandHandle commandHandle, const double jointChildPivot[/*3*/]);
|
||||
B3_SHARED_API int b3InitChangeUserConstraintSetFrameInB(b3SharedMemoryCommandHandle commandHandle, const double jointChildFrameOrn[/*4*/]);
|
||||
B3_SHARED_API int b3InitChangeUserConstraintSetMaxForce(b3SharedMemoryCommandHandle commandHandle, double maxAppliedForce);
|
||||
B3_SHARED_API int b3InitChangeUserConstraintSetGearRatio(b3SharedMemoryCommandHandle commandHandle, double gearRatio);
|
||||
B3_SHARED_API int b3InitChangeUserConstraintSetGearAuxLink(b3SharedMemoryCommandHandle commandHandle, int gearAuxLink);
|
||||
B3_SHARED_API int b3InitChangeUserConstraintSetRelativePositionTarget(b3SharedMemoryCommandHandle commandHandle, double relativePositionTarget);
|
||||
B3_SHARED_API int b3InitChangeUserConstraintSetERP(b3SharedMemoryCommandHandle commandHandle, double erp);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRemoveUserConstraintCommand(b3PhysicsClientHandle physClient, int userConstraintUniqueId);
|
||||
|
||||
B3_SHARED_API int b3GetNumUserConstraints(b3PhysicsClientHandle physClient);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitGetUserConstraintStateCommand(b3PhysicsClientHandle physClient, int constraintUniqueId);
|
||||
B3_SHARED_API int b3GetStatusUserConstraintState(b3SharedMemoryStatusHandle statusHandle, struct b3UserConstraintState* constraintState);
|
||||
|
||||
B3_SHARED_API int b3GetUserConstraintInfo(b3PhysicsClientHandle physClient, int constraintUniqueId, struct b3UserConstraint* info);
|
||||
/// return the user constraint id, given the index in range [0 , b3GetNumUserConstraints() )
|
||||
B3_SHARED_API int b3GetUserConstraintId(b3PhysicsClientHandle physClient, int serialIndex);
|
||||
|
||||
///Request physics debug lines for debug visualization. The flags in debugMode are the same as used in Bullet
|
||||
///See btIDebugDraw::DebugDrawModes in Bullet/src/LinearMath/btIDebugDraw.h
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestDebugLinesCommand(b3PhysicsClientHandle physClient, int debugMode);
|
||||
|
||||
///Get the pointers to the physics debug line information, after b3InitRequestDebugLinesCommand returns
|
||||
///status CMD_DEBUG_LINES_COMPLETED
|
||||
B3_SHARED_API void b3GetDebugLines(b3PhysicsClientHandle physClient, struct b3DebugLines* lines);
|
||||
|
||||
///configure the 3D OpenGL debug visualizer (enable/disable GUI widgets, shadows, position camera etc)
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitConfigureOpenGLVisualizer(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitConfigureOpenGLVisualizer2(b3SharedMemoryCommandHandle commandHandle);
|
||||
B3_SHARED_API void b3ConfigureOpenGLVisualizerSetVisualizationFlags(b3SharedMemoryCommandHandle commandHandle, int flag, int enabled);
|
||||
B3_SHARED_API void b3ConfigureOpenGLVisualizerSetLightPosition(b3SharedMemoryCommandHandle commandHandle, const float lightPosition[3]);
|
||||
B3_SHARED_API void b3ConfigureOpenGLVisualizerSetShadowMapResolution(b3SharedMemoryCommandHandle commandHandle, int shadowMapResolution);
|
||||
B3_SHARED_API void b3ConfigureOpenGLVisualizerSetShadowMapIntensity(b3SharedMemoryCommandHandle commandHandle, double shadowMapIntensity);
|
||||
B3_SHARED_API void b3ConfigureOpenGLVisualizerSetLightRgbBackground(b3SharedMemoryCommandHandle commandHandle, const float rgbBackground[3]);
|
||||
|
||||
|
||||
B3_SHARED_API void b3ConfigureOpenGLVisualizerSetShadowMapWorldSize(b3SharedMemoryCommandHandle commandHandle, int shadowMapWorldSize);
|
||||
B3_SHARED_API void b3ConfigureOpenGLVisualizerSetRemoteSyncTransformInterval(b3SharedMemoryCommandHandle commandHandle, double remoteSyncTransformInterval);
|
||||
|
||||
B3_SHARED_API void b3ConfigureOpenGLVisualizerSetViewMatrix(b3SharedMemoryCommandHandle commandHandle, float cameraDistance, float cameraPitch, float cameraYaw, const float cameraTargetPosition[/*3*/]);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestOpenGLVisualizerCameraCommand(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3GetStatusOpenGLVisualizerCamera(b3SharedMemoryStatusHandle statusHandle, struct b3OpenGLVisualizerCameraInfo* camera);
|
||||
|
||||
/// Add/remove user-specific debug lines and debug text messages
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUserDebugDrawAddLine3D(b3PhysicsClientHandle physClient, const double fromXYZ[/*3*/], const double toXYZ[/*3*/], const double colorRGB[/*3*/], double lineWidth, double lifeTime);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUserDebugDrawAddPoints3D(b3PhysicsClientHandle physClient, const double positionsXYZ[/*3n*/], const double colorsRGB[/*3*/], double pointSize, double lifeTime, int pointNum);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUserDebugDrawAddText3D(b3PhysicsClientHandle physClient, const char* txt, const double positionXYZ[/*3*/], const double colorRGB[/*3*/], double textSize, double lifeTime);
|
||||
B3_SHARED_API void b3UserDebugTextSetOptionFlags(b3SharedMemoryCommandHandle commandHandle, int optionFlags);
|
||||
B3_SHARED_API void b3UserDebugTextSetOrientation(b3SharedMemoryCommandHandle commandHandle, const double orientation[/*4*/]);
|
||||
B3_SHARED_API void b3UserDebugItemSetReplaceItemUniqueId(b3SharedMemoryCommandHandle commandHandle, int replaceItem);
|
||||
|
||||
B3_SHARED_API void b3UserDebugItemSetParentObject(b3SharedMemoryCommandHandle commandHandle, int objectUniqueId, int linkIndex);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUserDebugAddParameter(b3PhysicsClientHandle physClient, const char* txt, double rangeMin, double rangeMax, double startValue);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUserDebugReadParameter(b3PhysicsClientHandle physClient, int debugItemUniqueId);
|
||||
B3_SHARED_API int b3GetStatusDebugParameterValue(b3SharedMemoryStatusHandle statusHandle, double* paramValue);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUserDebugDrawRemove(b3PhysicsClientHandle physClient, int debugItemUniqueId);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUserDebugDrawRemoveAll(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUserRemoveAllParameters(b3PhysicsClientHandle physClient);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitDebugDrawingCommand(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3SetDebugObjectColor(b3SharedMemoryCommandHandle commandHandle, int objectUniqueId, int linkIndex, const double objectColorRGB[/*3*/]);
|
||||
B3_SHARED_API void b3RemoveDebugObjectColor(b3SharedMemoryCommandHandle commandHandle, int objectUniqueId, int linkIndex);
|
||||
|
||||
///All debug items unique Ids are positive: a negative unique Id means failure.
|
||||
B3_SHARED_API int b3GetDebugItemUniqueId(b3SharedMemoryStatusHandle statusHandle);
|
||||
|
||||
///request an image from a simulated camera, using a software renderer.
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestCameraImage(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestCameraImage2(b3SharedMemoryCommandHandle commandHandle);
|
||||
B3_SHARED_API void b3RequestCameraImageSetCameraMatrices(b3SharedMemoryCommandHandle commandHandle, float viewMatrix[/*16*/], float projectionMatrix[/*16*/]);
|
||||
B3_SHARED_API void b3RequestCameraImageSetPixelResolution(b3SharedMemoryCommandHandle commandHandle, int width, int height);
|
||||
B3_SHARED_API void b3RequestCameraImageSetLightDirection(b3SharedMemoryCommandHandle commandHandle, const float lightDirection[/*3*/]);
|
||||
B3_SHARED_API void b3RequestCameraImageSetLightColor(b3SharedMemoryCommandHandle commandHandle, const float lightColor[/*3*/]);
|
||||
B3_SHARED_API void b3RequestCameraImageSetLightDistance(b3SharedMemoryCommandHandle commandHandle, float lightDistance);
|
||||
B3_SHARED_API void b3RequestCameraImageSetLightAmbientCoeff(b3SharedMemoryCommandHandle commandHandle, float lightAmbientCoeff);
|
||||
B3_SHARED_API void b3RequestCameraImageSetLightDiffuseCoeff(b3SharedMemoryCommandHandle commandHandle, float lightDiffuseCoeff);
|
||||
B3_SHARED_API void b3RequestCameraImageSetLightSpecularCoeff(b3SharedMemoryCommandHandle commandHandle, float lightSpecularCoeff);
|
||||
B3_SHARED_API void b3RequestCameraImageSetShadow(b3SharedMemoryCommandHandle commandHandle, int hasShadow);
|
||||
B3_SHARED_API void b3RequestCameraImageSelectRenderer(b3SharedMemoryCommandHandle commandHandle, int renderer);
|
||||
B3_SHARED_API void b3RequestCameraImageSetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
|
||||
B3_SHARED_API void b3GetCameraImageData(b3PhysicsClientHandle physClient, struct b3CameraImageData* imageData);
|
||||
|
||||
///set projective texture camera matrices.
|
||||
B3_SHARED_API void b3RequestCameraImageSetProjectiveTextureMatrices(b3SharedMemoryCommandHandle commandHandle, float viewMatrix[/*16*/], float projectionMatrix[/*16*/]);
|
||||
|
||||
///compute a view matrix, helper function for b3RequestCameraImageSetCameraMatrices
|
||||
B3_SHARED_API void b3ComputeViewMatrixFromPositions(const float cameraPosition[/*3*/], const float cameraTargetPosition[/*3*/], const float cameraUp[/*3*/], float viewMatrix[/*16*/]);
|
||||
B3_SHARED_API void b3ComputeViewMatrixFromYawPitchRoll(const float cameraTargetPosition[/*3*/], float distance, float yaw, float pitch, float roll, int upAxis, float viewMatrix[/*16*/]);
|
||||
B3_SHARED_API void b3ComputePositionFromViewMatrix(const float viewMatrix[/*16*/], float cameraPosition[/*3*/], float cameraTargetPosition[/*3*/], float cameraUp[/*3*/]);
|
||||
|
||||
///compute a projection matrix, helper function for b3RequestCameraImageSetCameraMatrices
|
||||
B3_SHARED_API void b3ComputeProjectionMatrix(float left, float right, float bottom, float top, float nearVal, float farVal, float projectionMatrix[/*16*/]);
|
||||
B3_SHARED_API void b3ComputeProjectionMatrixFOV(float fov, float aspect, float nearVal, float farVal, float projectionMatrix[/*16*/]);
|
||||
|
||||
/* obsolete, please use b3ComputeViewProjectionMatrices */
|
||||
B3_SHARED_API void b3RequestCameraImageSetViewMatrix(b3SharedMemoryCommandHandle commandHandle, const float cameraPosition[/*3*/], const float cameraTargetPosition[/*3*/], const float cameraUp[/*3*/]);
|
||||
/* obsolete, please use b3ComputeViewProjectionMatrices */
|
||||
B3_SHARED_API void b3RequestCameraImageSetViewMatrix2(b3SharedMemoryCommandHandle commandHandle, const float cameraTargetPosition[/*3*/], float distance, float yaw, float pitch, float roll, int upAxis);
|
||||
/* obsolete, please use b3ComputeViewProjectionMatrices */
|
||||
B3_SHARED_API void b3RequestCameraImageSetProjectionMatrix(b3SharedMemoryCommandHandle commandHandle, float left, float right, float bottom, float top, float nearVal, float farVal);
|
||||
/* obsolete, please use b3ComputeViewProjectionMatrices */
|
||||
B3_SHARED_API void b3RequestCameraImageSetFOVProjectionMatrix(b3SharedMemoryCommandHandle commandHandle, float fov, float aspect, float nearVal, float farVal);
|
||||
|
||||
///request an contact point information
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestContactPointInformation(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3SetContactFilterBodyA(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueIdA);
|
||||
B3_SHARED_API void b3SetContactFilterBodyB(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueIdB);
|
||||
B3_SHARED_API void b3SetContactFilterLinkA(b3SharedMemoryCommandHandle commandHandle, int linkIndexA);
|
||||
B3_SHARED_API void b3SetContactFilterLinkB(b3SharedMemoryCommandHandle commandHandle, int linkIndexB);
|
||||
B3_SHARED_API void b3GetContactPointInformation(b3PhysicsClientHandle physClient, struct b3ContactInformation* contactPointData);
|
||||
|
||||
///compute the closest points between two bodies
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitClosestDistanceQuery(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterBodyA(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueIdA);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterLinkA(b3SharedMemoryCommandHandle commandHandle, int linkIndexA);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterBodyB(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueIdB);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterLinkB(b3SharedMemoryCommandHandle commandHandle, int linkIndexB);
|
||||
B3_SHARED_API void b3SetClosestDistanceThreshold(b3SharedMemoryCommandHandle commandHandle, double distance);
|
||||
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterCollisionShapeA(b3SharedMemoryCommandHandle commandHandle, int collisionShapeA);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterCollisionShapeB(b3SharedMemoryCommandHandle commandHandle, int collisionShapeB);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterCollisionShapePositionA(b3SharedMemoryCommandHandle commandHandle, const double collisionShapePositionA[/*3*/]);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterCollisionShapePositionB(b3SharedMemoryCommandHandle commandHandle, const double collisionShapePositionB[/*3*/]);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterCollisionShapeOrientationA(b3SharedMemoryCommandHandle commandHandle, const double collisionShapeOrientationA[/*4*/]);
|
||||
B3_SHARED_API void b3SetClosestDistanceFilterCollisionShapeOrientationB(b3SharedMemoryCommandHandle commandHandle, const double collisionShapeOrientationB[/*4*/]);
|
||||
|
||||
B3_SHARED_API void b3GetClosestPointInformation(b3PhysicsClientHandle physClient, struct b3ContactInformation* contactPointInfo);
|
||||
|
||||
///get all the bodies that touch a given axis aligned bounding box specified in world space (min and max coordinates)
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitAABBOverlapQuery(b3PhysicsClientHandle physClient, const double aabbMin[/*3*/], const double aabbMax[/*3*/]);
|
||||
B3_SHARED_API void b3GetAABBOverlapResults(b3PhysicsClientHandle physClient, struct b3AABBOverlapData* data);
|
||||
|
||||
//request visual shape information
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestVisualShapeInformation(b3PhysicsClientHandle physClient, int bodyUniqueIdA);
|
||||
B3_SHARED_API void b3GetVisualShapeInformation(b3PhysicsClientHandle physClient, struct b3VisualShapeInformation* visualShapeInfo);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestCollisionShapeInformation(b3PhysicsClientHandle physClient, int bodyUniqueId, int linkIndex);
|
||||
B3_SHARED_API void b3GetCollisionShapeInformation(b3PhysicsClientHandle physClient, struct b3CollisionShapeInformation* collisionShapeInfo);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitLoadTexture(b3PhysicsClientHandle physClient, const char* filename);
|
||||
B3_SHARED_API int b3GetStatusTextureUniqueId(b3SharedMemoryStatusHandle statusHandle);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateChangeTextureCommandInit(b3PhysicsClientHandle physClient, int textureUniqueId, int width, int height, const char* rgbPixels);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUpdateVisualShape(b3PhysicsClientHandle physClient, int bodyUniqueId, int jointIndex, int shapeIndex, int textureUniqueId);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitUpdateVisualShape2(b3PhysicsClientHandle physClient, int bodyUniqueId, int jointIndex, int shapeIndex);
|
||||
B3_SHARED_API void b3UpdateVisualShapeTexture(b3SharedMemoryCommandHandle commandHandle, int textureUniqueId);
|
||||
B3_SHARED_API void b3UpdateVisualShapeRGBAColor(b3SharedMemoryCommandHandle commandHandle, const double rgbaColor[/*4*/]);
|
||||
B3_SHARED_API void b3UpdateVisualShapeFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
|
||||
B3_SHARED_API void b3UpdateVisualShapeSpecularColor(b3SharedMemoryCommandHandle commandHandle, const double specularColor[/*3*/]);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitPhysicsParamCommand(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitPhysicsParamCommand2(b3SharedMemoryCommandHandle commandHandle);
|
||||
B3_SHARED_API int b3PhysicsParamSetGravity(b3SharedMemoryCommandHandle commandHandle, double gravx, double gravy, double gravz);
|
||||
B3_SHARED_API int b3PhysicsParamSetTimeStep(b3SharedMemoryCommandHandle commandHandle, double timeStep);
|
||||
B3_SHARED_API int b3PhysicsParamSetDefaultContactERP(b3SharedMemoryCommandHandle commandHandle, double defaultContactERP);
|
||||
B3_SHARED_API int b3PhysicsParamSetDefaultNonContactERP(b3SharedMemoryCommandHandle commandHandle, double defaultNonContactERP);
|
||||
B3_SHARED_API int b3PhysicsParamSetDefaultFrictionERP(b3SharedMemoryCommandHandle commandHandle, double frictionERP);
|
||||
B3_SHARED_API int b3PhysicsParamSetDefaultGlobalCFM(b3SharedMemoryCommandHandle commandHandle, double defaultGlobalCFM);
|
||||
B3_SHARED_API int b3PhysicsParamSetDefaultFrictionCFM(b3SharedMemoryCommandHandle commandHandle, double frictionCFM);
|
||||
B3_SHARED_API int b3PhysicsParamSetNumSubSteps(b3SharedMemoryCommandHandle commandHandle, int numSubSteps);
|
||||
B3_SHARED_API int b3PhysicsParamSetRealTimeSimulation(b3SharedMemoryCommandHandle commandHandle, int enableRealTimeSimulation);
|
||||
B3_SHARED_API int b3PhysicsParamSetNumSolverIterations(b3SharedMemoryCommandHandle commandHandle, int numSolverIterations);
|
||||
B3_SHARED_API int b3PhysicsParamSetNumNonContactInnerIterations(b3SharedMemoryCommandHandle commandHandle, int numMotorIterations);
|
||||
B3_SHARED_API int b3PhysicsParamSetWarmStartingFactor(b3SharedMemoryCommandHandle commandHandle, double warmStartingFactor);
|
||||
B3_SHARED_API int b3PhysicsParamSetArticulatedWarmStartingFactor(b3SharedMemoryCommandHandle commandHandle, double warmStartingFactor);
|
||||
B3_SHARED_API int b3PhysicsParamSetCollisionFilterMode(b3SharedMemoryCommandHandle commandHandle, int filterMode);
|
||||
B3_SHARED_API int b3PhysicsParamSetUseSplitImpulse(b3SharedMemoryCommandHandle commandHandle, int useSplitImpulse);
|
||||
B3_SHARED_API int b3PhysicsParamSetSplitImpulsePenetrationThreshold(b3SharedMemoryCommandHandle commandHandle, double splitImpulsePenetrationThreshold);
|
||||
B3_SHARED_API int b3PhysicsParamSetContactBreakingThreshold(b3SharedMemoryCommandHandle commandHandle, double contactBreakingThreshold);
|
||||
B3_SHARED_API int b3PhysicsParamSetMaxNumCommandsPer1ms(b3SharedMemoryCommandHandle commandHandle, int maxNumCmdPer1ms);
|
||||
B3_SHARED_API int b3PhysicsParamSetEnableFileCaching(b3SharedMemoryCommandHandle commandHandle, int enableFileCaching);
|
||||
B3_SHARED_API int b3PhysicsParamSetRestitutionVelocityThreshold(b3SharedMemoryCommandHandle commandHandle, double restitutionVelocityThreshold);
|
||||
B3_SHARED_API int b3PhysicsParamSetEnableConeFriction(b3SharedMemoryCommandHandle commandHandle, int enableConeFriction);
|
||||
B3_SHARED_API int b3PhysicsParameterSetDeterministicOverlappingPairs(b3SharedMemoryCommandHandle commandHandle, int deterministicOverlappingPairs);
|
||||
B3_SHARED_API int b3PhysicsParameterSetAllowedCcdPenetration(b3SharedMemoryCommandHandle commandHandle, double allowedCcdPenetration);
|
||||
B3_SHARED_API int b3PhysicsParameterSetJointFeedbackMode(b3SharedMemoryCommandHandle commandHandle, int jointFeedbackMode);
|
||||
B3_SHARED_API int b3PhysicsParamSetSolverResidualThreshold(b3SharedMemoryCommandHandle commandHandle, double solverResidualThreshold);
|
||||
B3_SHARED_API int b3PhysicsParamSetContactSlop(b3SharedMemoryCommandHandle commandHandle, double contactSlop);
|
||||
B3_SHARED_API int b3PhysicsParameterSetEnableSAT(b3SharedMemoryCommandHandle commandHandle, int enableSAT);
|
||||
B3_SHARED_API int b3PhysicsParameterSetConstraintSolverType(b3SharedMemoryCommandHandle commandHandle, int constraintSolverType);
|
||||
B3_SHARED_API int b3PhysicsParameterSetMinimumSolverIslandSize(b3SharedMemoryCommandHandle commandHandle, int minimumSolverIslandSize);
|
||||
B3_SHARED_API int b3PhysicsParamSetSolverAnalytics(b3SharedMemoryCommandHandle commandHandle, int reportSolverAnalytics);
|
||||
B3_SHARED_API int b3PhysicsParameterSetSparseSdfVoxelSize(b3SharedMemoryCommandHandle commandHandle, double sparseSdfVoxelSize);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRequestPhysicsParamCommand(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3GetStatusPhysicsSimulationParameters(b3SharedMemoryStatusHandle statusHandle, struct b3PhysicsSimulationParameters* params);
|
||||
|
||||
//b3PhysicsParamSetInternalSimFlags is for internal/temporary/easter-egg/experimental demo purposes
|
||||
//Use at own risk: magic things may or my not happen when calling this API
|
||||
B3_SHARED_API int b3PhysicsParamSetInternalSimFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitStepSimulationCommand(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitStepSimulationCommand2(b3SharedMemoryCommandHandle commandHandle);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitPerformCollisionDetectionCommand(b3PhysicsClientHandle physClient);
|
||||
|
||||
B3_SHARED_API int b3GetStatusForwardDynamicsAnalyticsData(b3SharedMemoryStatusHandle statusHandle, struct b3ForwardDynamicsAnalyticsArgs* analyticsData);
|
||||
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitResetSimulationCommand(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitResetSimulationCommand2(b3SharedMemoryCommandHandle commandHandle);
|
||||
B3_SHARED_API int b3InitResetSimulationSetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
///Load a robot from a URDF file. Status type will CMD_URDF_LOADING_COMPLETED.
|
||||
///Access the robot from the unique body index, through b3GetStatusBodyIndex(statusHandle);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadUrdfCommandInit(b3PhysicsClientHandle physClient, const char* urdfFileName);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadUrdfCommandInit2(b3SharedMemoryCommandHandle commandHandle, const char* urdfFileName);
|
||||
B3_SHARED_API int b3LoadUrdfCommandSetStartPosition(b3SharedMemoryCommandHandle commandHandle, double startPosX, double startPosY, double startPosZ);
|
||||
B3_SHARED_API int b3LoadUrdfCommandSetStartOrientation(b3SharedMemoryCommandHandle commandHandle, double startOrnX, double startOrnY, double startOrnZ, double startOrnW);
|
||||
B3_SHARED_API int b3LoadUrdfCommandSetUseMultiBody(b3SharedMemoryCommandHandle commandHandle, int useMultiBody);
|
||||
B3_SHARED_API int b3LoadUrdfCommandSetUseFixedBase(b3SharedMemoryCommandHandle commandHandle, int useFixedBase);
|
||||
B3_SHARED_API int b3LoadUrdfCommandSetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
B3_SHARED_API int b3LoadUrdfCommandSetGlobalScaling(b3SharedMemoryCommandHandle commandHandle, double globalScaling);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3SaveStateCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRemoveStateCommand(b3PhysicsClientHandle physClient, int stateId);
|
||||
B3_SHARED_API int b3GetStatusGetStateId(b3SharedMemoryStatusHandle statusHandle);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadStateCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3LoadStateSetStateId(b3SharedMemoryCommandHandle commandHandle, int stateId);
|
||||
B3_SHARED_API int b3LoadStateSetFileName(b3SharedMemoryCommandHandle commandHandle, const char* fileName);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadBulletCommandInit(b3PhysicsClientHandle physClient, const char* fileName);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3SaveBulletCommandInit(b3PhysicsClientHandle physClient, const char* fileName);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadMJCFCommandInit(b3PhysicsClientHandle physClient, const char* fileName);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadMJCFCommandInit2(b3SharedMemoryCommandHandle commandHandle, const char* fileName);
|
||||
B3_SHARED_API void b3LoadMJCFCommandSetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
B3_SHARED_API void b3LoadMJCFCommandSetUseMultiBody(b3SharedMemoryCommandHandle commandHandle, int useMultiBody);
|
||||
|
||||
|
||||
///compute the forces to achieve an acceleration, given a state q and qdot using inverse dynamics
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CalculateInverseDynamicsCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId,
|
||||
const double* jointPositionsQ, const double* jointVelocitiesQdot, const double* jointAccelerations);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CalculateInverseDynamicsCommandInit2(b3PhysicsClientHandle physClient, int bodyUniqueId,
|
||||
const double* jointPositionsQ, int dofCountQ, const double* jointVelocitiesQdot, const double* jointAccelerations, int dofCountQdot);
|
||||
B3_SHARED_API void b3CalculateInverseDynamicsSetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
|
||||
B3_SHARED_API int b3GetStatusInverseDynamicsJointForces(b3SharedMemoryStatusHandle statusHandle,
|
||||
int* bodyUniqueId,
|
||||
int* dofCount,
|
||||
double* jointForces);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CalculateJacobianCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId, int linkIndex, const double* localPosition, const double* jointPositionsQ, const double* jointVelocitiesQdot, const double* jointAccelerations);
|
||||
B3_SHARED_API int b3GetStatusJacobian(b3SharedMemoryStatusHandle statusHandle,
|
||||
int* dofCount,
|
||||
double* linearJacobian,
|
||||
double* angularJacobian);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CalculateMassMatrixCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId, const double* jointPositionsQ, int dofCountQ);
|
||||
B3_SHARED_API void b3CalculateMassMatrixSetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
///the mass matrix is stored in column-major layout of size dofCount*dofCount
|
||||
B3_SHARED_API int b3GetStatusMassMatrix(b3PhysicsClientHandle physClient, b3SharedMemoryStatusHandle statusHandle, int* dofCount, double* massMatrix);
|
||||
|
||||
///compute the joint positions to move the end effector to a desired target using inverse kinematics
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CalculateInverseKinematicsCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsAddTargetPurePosition(b3SharedMemoryCommandHandle commandHandle, int endEffectorLinkIndex, const double targetPosition[/*3*/]);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsAddTargetsPurePosition(b3SharedMemoryCommandHandle commandHandle, int numEndEffectorLinkIndices, const int* endEffectorIndices, const double* targetPositions);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsAddTargetPositionWithOrientation(b3SharedMemoryCommandHandle commandHandle, int endEffectorLinkIndex, const double targetPosition[/*3*/], const double targetOrientation[/*4*/]);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsPosWithNullSpaceVel(b3SharedMemoryCommandHandle commandHandle, int numDof, int endEffectorLinkIndex, const double targetPosition[/*3*/], const double* lowerLimit, const double* upperLimit, const double* jointRange, const double* restPose);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsPosOrnWithNullSpaceVel(b3SharedMemoryCommandHandle commandHandle, int numDof, int endEffectorLinkIndex, const double targetPosition[/*3*/], const double targetOrientation[/*4*/], const double* lowerLimit, const double* upperLimit, const double* jointRange, const double* restPose);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsSetJointDamping(b3SharedMemoryCommandHandle commandHandle, int numDof, const double* jointDampingCoeff);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsSelectSolver(b3SharedMemoryCommandHandle commandHandle, int solver);
|
||||
B3_SHARED_API int b3GetStatusInverseKinematicsJointPositions(b3SharedMemoryStatusHandle statusHandle,
|
||||
int* bodyUniqueId,
|
||||
int* dofCount,
|
||||
double* jointPositions);
|
||||
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsSetCurrentPositions(b3SharedMemoryCommandHandle commandHandle, int numDof, const double* currentJointPositions);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsSetMaxNumIterations(b3SharedMemoryCommandHandle commandHandle, int maxNumIterations);
|
||||
B3_SHARED_API void b3CalculateInverseKinematicsSetResidualThreshold(b3SharedMemoryCommandHandle commandHandle, double residualThreshold);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CollisionFilterCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3SetCollisionFilterPair(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueIdA,
|
||||
int bodyUniqueIdB, int linkIndexA, int linkIndexB, int enableCollision);
|
||||
B3_SHARED_API void b3SetCollisionFilterGroupMask(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueIdA,
|
||||
int linkIndexA, int collisionFilterGroup, int collisionFilterMask);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadSdfCommandInit(b3PhysicsClientHandle physClient, const char* sdfFileName);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadSdfCommandInit2(b3SharedMemoryCommandHandle commandHandle, const char* sdfFileName);
|
||||
|
||||
B3_SHARED_API int b3LoadSdfCommandSetUseMultiBody(b3SharedMemoryCommandHandle commandHandle, int useMultiBody);
|
||||
B3_SHARED_API int b3LoadSdfCommandSetUseGlobalScaling(b3SharedMemoryCommandHandle commandHandle, double globalScaling);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3SaveWorldCommandInit(b3PhysicsClientHandle physClient, const char* sdfFileName);
|
||||
|
||||
///The b3JointControlCommandInit method is obsolete, use b3JointControlCommandInit2 instead
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3JointControlCommandInit(b3PhysicsClientHandle physClient, int controlMode);
|
||||
|
||||
///Set joint motor control variables such as desired position/angle, desired velocity,
|
||||
///applied joint forces, dependent on the control mode (CONTROL_MODE_VELOCITY or CONTROL_MODE_TORQUE)
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3JointControlCommandInit2(b3PhysicsClientHandle physClient, int bodyUniqueId, int controlMode);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3JointControlCommandInit2Internal(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int controlMode);
|
||||
|
||||
///Only use when controlMode is CONTROL_MODE_POSITION_VELOCITY_PD
|
||||
B3_SHARED_API int b3JointControlSetDesiredPosition(b3SharedMemoryCommandHandle commandHandle, int qIndex, double value);
|
||||
B3_SHARED_API int b3JointControlSetDesiredPositionMultiDof(b3SharedMemoryCommandHandle commandHandle, int qIndex, const double* position, int dofCount);
|
||||
|
||||
B3_SHARED_API int b3JointControlSetKp(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double value);
|
||||
B3_SHARED_API int b3JointControlSetKpMultiDof(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double* kps, int dofCount);
|
||||
B3_SHARED_API int b3JointControlSetKd(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double value);
|
||||
B3_SHARED_API int b3JointControlSetKdMultiDof(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double* kds, int dofCount);
|
||||
B3_SHARED_API int b3JointControlSetMaximumVelocity(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double maximumVelocity);
|
||||
|
||||
///Only use when controlMode is CONTROL_MODE_VELOCITY
|
||||
B3_SHARED_API int b3JointControlSetDesiredVelocity(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double value); /* find a better name for dof/q/u indices, point to b3JointInfo */
|
||||
B3_SHARED_API int b3JointControlSetDesiredVelocityMultiDof(b3SharedMemoryCommandHandle commandHandle, int dofIndex, const double* velocity, int dofCount);
|
||||
B3_SHARED_API int b3JointControlSetDesiredVelocityMultiDof2(b3SharedMemoryCommandHandle commandHandle, int dofIndex, const double* velocity, int dofCount);
|
||||
|
||||
B3_SHARED_API int b3JointControlSetMaximumForce(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double value);
|
||||
B3_SHARED_API int b3JointControlSetDesiredForceTorqueMultiDof(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double* forces, int dofCount);
|
||||
B3_SHARED_API int b3JointControlSetDamping(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double value);
|
||||
B3_SHARED_API int b3JointControlSetDampingMultiDof(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double* damping, int dofCount);
|
||||
|
||||
///Only use if when controlMode is CONTROL_MODE_TORQUE,
|
||||
B3_SHARED_API int b3JointControlSetDesiredForceTorque(b3SharedMemoryCommandHandle commandHandle, int dofIndex, double value);
|
||||
|
||||
///the creation of collision shapes and rigid bodies etc is likely going to change,
|
||||
///but good to have a b3CreateBoxShapeCommandInit for now
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateCollisionShapeCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddSphere(b3SharedMemoryCommandHandle commandHandle, double radius);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddBox(b3SharedMemoryCommandHandle commandHandle, const double halfExtents[/*3*/]);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddCapsule(b3SharedMemoryCommandHandle commandHandle, double radius, double height);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddCylinder(b3SharedMemoryCommandHandle commandHandle, double radius, double height);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddHeightfield(b3SharedMemoryCommandHandle commandHandle, const char* fileName, const double meshScale[/*3*/], double textureScaling);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddHeightfield2(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, const double meshScale[/*3*/], double textureScaling, float* heightfieldData, int numHeightfieldRows, int numHeightfieldColumns, int replaceHeightfieldIndex);
|
||||
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddPlane(b3SharedMemoryCommandHandle commandHandle, const double planeNormal[/*3*/], double planeConstant);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddMesh(b3SharedMemoryCommandHandle commandHandle, const char* fileName, const double meshScale[/*3*/]);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddConvexMesh(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, const double meshScale[/*3*/], const double* vertices, int numVertices);
|
||||
B3_SHARED_API int b3CreateCollisionShapeAddConcaveMesh(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, const double meshScale[/*3*/], const double* vertices, int numVertices, const int* indices, int numIndices);
|
||||
B3_SHARED_API void b3CreateCollisionSetFlag(b3SharedMemoryCommandHandle commandHandle, int shapeIndex, int flags);
|
||||
B3_SHARED_API void b3CreateCollisionShapeSetChildTransform(b3SharedMemoryCommandHandle commandHandle, int shapeIndex, const double childPosition[/*3*/], const double childOrientation[/*4*/]);
|
||||
B3_SHARED_API int b3GetStatusCollisionShapeUniqueId(b3SharedMemoryStatusHandle statusHandle);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitRemoveCollisionShapeCommand(b3PhysicsClientHandle physClient, int collisionShapeId);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3GetMeshDataCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId, int linkIndex);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3GetTetraMeshDataCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
B3_SHARED_API void b3GetMeshDataSimulationMesh(b3SharedMemoryCommandHandle commandHandle);
|
||||
B3_SHARED_API void b3MeshDataSimulationMeshVelocity(b3SharedMemoryCommandHandle commandHandle);
|
||||
B3_SHARED_API void b3GetMeshDataSetCollisionShapeIndex(b3SharedMemoryCommandHandle commandHandle, int shapeIndex);
|
||||
B3_SHARED_API void b3GetMeshDataSetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
B3_SHARED_API void b3GetTetraMeshDataSetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
|
||||
B3_SHARED_API void b3GetMeshData(b3PhysicsClientHandle physClient, struct b3MeshData* meshData);
|
||||
B3_SHARED_API void b3GetTetraMeshData(b3PhysicsClientHandle physClient, struct b3TetraMeshData* meshData);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3ResetMeshDataCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId, int num_vertices, const double* vertices);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateVisualShapeCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3CreateVisualShapeAddSphere(b3SharedMemoryCommandHandle commandHandle, double radius);
|
||||
B3_SHARED_API int b3CreateVisualShapeAddBox(b3SharedMemoryCommandHandle commandHandle, const double halfExtents[/*3*/]);
|
||||
B3_SHARED_API int b3CreateVisualShapeAddCapsule(b3SharedMemoryCommandHandle commandHandle, double radius, double height);
|
||||
B3_SHARED_API int b3CreateVisualShapeAddCylinder(b3SharedMemoryCommandHandle commandHandle, double radius, double height);
|
||||
B3_SHARED_API int b3CreateVisualShapeAddPlane(b3SharedMemoryCommandHandle commandHandle, const double planeNormal[/*3*/], double planeConstant);
|
||||
B3_SHARED_API int b3CreateVisualShapeAddMesh(b3SharedMemoryCommandHandle commandHandle, const char* fileName, const double meshScale[/*3*/]);
|
||||
B3_SHARED_API int b3CreateVisualShapeAddMesh2(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, const double meshScale[/*3*/], const double* vertices, int numVertices, const int* indices, int numIndices, const double* normals, int numNormals, const double* uvs, int numUVs);
|
||||
|
||||
|
||||
B3_SHARED_API void b3CreateVisualSetFlag(b3SharedMemoryCommandHandle commandHandle, int shapeIndex, int flags);
|
||||
B3_SHARED_API void b3CreateVisualShapeSetChildTransform(b3SharedMemoryCommandHandle commandHandle, int shapeIndex, const double childPosition[/*3*/], const double childOrientation[/*4*/]);
|
||||
B3_SHARED_API void b3CreateVisualShapeSetSpecularColor(b3SharedMemoryCommandHandle commandHandle, int shapeIndex, const double specularColor[/*3*/]);
|
||||
B3_SHARED_API void b3CreateVisualShapeSetRGBAColor(b3SharedMemoryCommandHandle commandHandle, int shapeIndex, const double rgbaColor[/*4*/]);
|
||||
|
||||
B3_SHARED_API int b3GetStatusVisualShapeUniqueId(b3SharedMemoryStatusHandle statusHandle);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateMultiBodyCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3CreateMultiBodyBase(b3SharedMemoryCommandHandle commandHandle, double mass, int collisionShapeUnique, int visualShapeUniqueId, const double basePosition[/*3*/], const double baseOrientation[/*4*/], const double baseInertialFramePosition[/*3*/], const double baseInertialFrameOrientation[/*4*/]);
|
||||
|
||||
B3_SHARED_API int b3CreateMultiBodyLink(b3SharedMemoryCommandHandle commandHandle, double linkMass, double linkCollisionShapeIndex,
|
||||
double linkVisualShapeIndex,
|
||||
const double linkPosition[/*3*/],
|
||||
const double linkOrientation[/*4*/],
|
||||
const double linkInertialFramePosition[/*3*/],
|
||||
const double linkInertialFrameOrientation[/*4*/],
|
||||
int linkParentIndex,
|
||||
int linkJointType,
|
||||
const double linkJointAxis[/*3*/]);
|
||||
|
||||
//batch creation is an performance feature to create a large number of multi bodies in one command
|
||||
B3_SHARED_API int b3CreateMultiBodySetBatchPositions(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, double* batchPositions, int numBatchObjects);
|
||||
|
||||
//useMaximalCoordinates are disabled by default, enabling them is experimental and not fully supported yet
|
||||
B3_SHARED_API void b3CreateMultiBodyUseMaximalCoordinates(b3SharedMemoryCommandHandle commandHandle);
|
||||
B3_SHARED_API void b3CreateMultiBodySetFlags(b3SharedMemoryCommandHandle commandHandle, int flags);
|
||||
|
||||
//int b3CreateMultiBodyAddLink(b3SharedMemoryCommandHandle commandHandle, int jointType, int parentLinkIndex, double linkMass, int linkCollisionShapeUnique, int linkVisualShapeUniqueId);
|
||||
|
||||
///create a box of size (1,1,1) at world origin (0,0,0) at orientation quat (0,0,0,1)
|
||||
///after that, you can optionally adjust the initial position, orientation and size
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateBoxShapeCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3CreateBoxCommandSetStartPosition(b3SharedMemoryCommandHandle commandHandle, double startPosX, double startPosY, double startPosZ);
|
||||
B3_SHARED_API int b3CreateBoxCommandSetStartOrientation(b3SharedMemoryCommandHandle commandHandle, double startOrnX, double startOrnY, double startOrnZ, double startOrnW);
|
||||
B3_SHARED_API int b3CreateBoxCommandSetHalfExtents(b3SharedMemoryCommandHandle commandHandle, double halfExtentsX, double halfExtentsY, double halfExtentsZ);
|
||||
B3_SHARED_API int b3CreateBoxCommandSetMass(b3SharedMemoryCommandHandle commandHandle, double mass);
|
||||
B3_SHARED_API int b3CreateBoxCommandSetCollisionShapeType(b3SharedMemoryCommandHandle commandHandle, int collisionShapeType);
|
||||
B3_SHARED_API int b3CreateBoxCommandSetColorRGBA(b3SharedMemoryCommandHandle commandHandle, double red, double green, double blue, double alpha);
|
||||
|
||||
///b3CreatePoseCommandInit will initialize (teleport) the pose of a body/robot. You can individually set the base position,
|
||||
///base orientation and joint angles. This will set all velocities of base and joints to zero.
|
||||
///This is not a robot control command using actuators/joint motors, but manual repositioning the robot.
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreatePoseCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreatePoseCommandInit2(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId);
|
||||
|
||||
B3_SHARED_API int b3CreatePoseCommandSetBasePosition(b3SharedMemoryCommandHandle commandHandle, double startPosX, double startPosY, double startPosZ);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetBaseOrientation(b3SharedMemoryCommandHandle commandHandle, double startOrnX, double startOrnY, double startOrnZ, double startOrnW);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetBaseLinearVelocity(b3SharedMemoryCommandHandle commandHandle, const double linVel[/*3*/]);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetBaseAngularVelocity(b3SharedMemoryCommandHandle commandHandle, const double angVel[/*3*/]);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetBaseScaling(b3SharedMemoryCommandHandle commandHandle, double scaling[/* 3*/]);
|
||||
|
||||
|
||||
B3_SHARED_API int b3CreatePoseCommandSetJointPositions(b3SharedMemoryCommandHandle commandHandle, int numJointPositions, const double* jointPositions);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetJointPosition(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, int jointIndex, double jointPosition);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetJointPositionMultiDof(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, int jointIndex, const double* jointPosition, int posSize);
|
||||
|
||||
B3_SHARED_API int b3CreatePoseCommandSetQ(b3SharedMemoryCommandHandle commandHandle, int numJointPositions, const double* q, const int* hasQ);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetQdots(b3SharedMemoryCommandHandle commandHandle, int numJointVelocities, const double* qDots, const int* hasQdots);
|
||||
|
||||
B3_SHARED_API int b3CreatePoseCommandSetJointVelocities(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, int numJointVelocities, const double* jointVelocities);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetJointVelocity(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, int jointIndex, double jointVelocity);
|
||||
B3_SHARED_API int b3CreatePoseCommandSetJointVelocityMultiDof(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, int jointIndex, const double* jointVelocity, int velSize);
|
||||
|
||||
///We are currently not reading the sensor information from the URDF file, and programmatically assign sensors.
|
||||
///This is rather inconsistent, to mix programmatical creation with loading from file.
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateSensorCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
B3_SHARED_API int b3CreateSensorEnable6DofJointForceTorqueSensor(b3SharedMemoryCommandHandle commandHandle, int jointIndex, int enable);
|
||||
///b3CreateSensorEnableIMUForLink is not implemented yet.
|
||||
///For now, if the IMU is located in the root link, use the root world transform to mimic an IMU.
|
||||
B3_SHARED_API int b3CreateSensorEnableIMUForLink(b3SharedMemoryCommandHandle commandHandle, int linkIndex, int enable);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3RequestActualStateCommandInit(b3PhysicsClientHandle physClient, int bodyUniqueId);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3RequestActualStateCommandInit2(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId);
|
||||
|
||||
B3_SHARED_API int b3RequestActualStateCommandComputeLinkVelocity(b3SharedMemoryCommandHandle commandHandle, int computeLinkVelocity);
|
||||
B3_SHARED_API int b3RequestActualStateCommandComputeForwardKinematics(b3SharedMemoryCommandHandle commandHandle, int computeForwardKinematics);
|
||||
|
||||
B3_SHARED_API int b3GetJointState(b3PhysicsClientHandle physClient, b3SharedMemoryStatusHandle statusHandle, int jointIndex, struct b3JointSensorState* state);
|
||||
B3_SHARED_API int b3GetJointStateMultiDof(b3PhysicsClientHandle physClient, b3SharedMemoryStatusHandle statusHandle, int jointIndex, struct b3JointSensorState2* state);
|
||||
B3_SHARED_API int b3GetLinkState(b3PhysicsClientHandle physClient, b3SharedMemoryStatusHandle statusHandle, int linkIndex, struct b3LinkState* state);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3PickBody(b3PhysicsClientHandle physClient, double rayFromWorldX,
|
||||
double rayFromWorldY, double rayFromWorldZ,
|
||||
double rayToWorldX, double rayToWorldY, double rayToWorldZ);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3MovePickedBody(b3PhysicsClientHandle physClient, double rayFromWorldX,
|
||||
double rayFromWorldY, double rayFromWorldZ,
|
||||
double rayToWorldX, double rayToWorldY,
|
||||
double rayToWorldZ);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3RemovePickingConstraint(b3PhysicsClientHandle physClient);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateRaycastCommandInit(b3PhysicsClientHandle physClient, double rayFromWorldX,
|
||||
double rayFromWorldY, double rayFromWorldZ,
|
||||
double rayToWorldX, double rayToWorldY, double rayToWorldZ);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3CreateRaycastBatchCommandInit(b3PhysicsClientHandle physClient);
|
||||
// Sets the number of threads to use to compute the ray intersections for the batch. Specify 0 to let Bullet decide, 1 (default) for single core execution, 2 or more to select the number of threads to use.
|
||||
B3_SHARED_API void b3RaycastBatchSetNumThreads(b3SharedMemoryCommandHandle commandHandle, int numThreads);
|
||||
//max num rays for b3RaycastBatchAddRay is MAX_RAY_INTERSECTION_BATCH_SIZE
|
||||
B3_SHARED_API void b3RaycastBatchAddRay(b3SharedMemoryCommandHandle commandHandle, const double rayFromWorld[/*3*/], const double rayToWorld[/*3*/]);
|
||||
//max num rays for b3RaycastBatchAddRays is MAX_RAY_INTERSECTION_BATCH_SIZE_STREAMING
|
||||
B3_SHARED_API void b3RaycastBatchAddRays(b3PhysicsClientHandle physClient, b3SharedMemoryCommandHandle commandHandle, const double* rayFromWorld, const double* rayToWorld, int numRays);
|
||||
B3_SHARED_API void b3RaycastBatchSetParentObject(b3SharedMemoryCommandHandle commandHandle, int parentObjectUniqueId, int parentLinkIndex);
|
||||
B3_SHARED_API void b3RaycastBatchSetReportHitNumber(b3SharedMemoryCommandHandle commandHandle, int reportHitNumber);
|
||||
B3_SHARED_API void b3RaycastBatchSetCollisionFilterMask(b3SharedMemoryCommandHandle commandHandle, int collisionFilterMask);
|
||||
B3_SHARED_API void b3RaycastBatchSetFractionEpsilon(b3SharedMemoryCommandHandle commandHandle, double fractionEpsilon);
|
||||
|
||||
B3_SHARED_API void b3GetRaycastInformation(b3PhysicsClientHandle physClient, struct b3RaycastInformation* raycastInfo);
|
||||
|
||||
/// Apply external force at the body (or link) center of mass, in world space/Cartesian coordinates.
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3ApplyExternalForceCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3ApplyExternalForce(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkId, const double force[/*3*/], const double position[/*3*/], int flag);
|
||||
B3_SHARED_API void b3ApplyExternalTorque(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueId, int linkId, const double torque[/*3*/], int flag);
|
||||
|
||||
///experiments of robots interacting with non-rigid objects (such as btSoftBody)
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3LoadSoftBodyCommandInit(b3PhysicsClientHandle physClient, const char* fileName);
|
||||
B3_SHARED_API int b3LoadSoftBodySetScale(b3SharedMemoryCommandHandle commandHandle, double scale);
|
||||
B3_SHARED_API int b3LoadSoftBodySetMass(b3SharedMemoryCommandHandle commandHandle, double mass);
|
||||
B3_SHARED_API int b3LoadSoftBodySetCollisionMargin(b3SharedMemoryCommandHandle commandHandle, double collisionMargin);
|
||||
B3_SHARED_API int b3LoadSoftBodySetStartPosition(b3SharedMemoryCommandHandle commandHandle, double startPosX, double startPosY, double startPosZ);
|
||||
B3_SHARED_API int b3LoadSoftBodySetStartOrientation(b3SharedMemoryCommandHandle commandHandle, double startOrnX, double startOrnY, double startOrnZ, double startOrnW);
|
||||
B3_SHARED_API int b3LoadSoftBodyUpdateSimMesh(b3SharedMemoryCommandHandle commandHandle, const char* filename);
|
||||
B3_SHARED_API int b3LoadSoftBodyAddCorotatedForce(b3SharedMemoryCommandHandle commandHandle, double corotatedMu, double corotatedLambda);
|
||||
B3_SHARED_API int b3LoadSoftBodyAddCorotatedForce(b3SharedMemoryCommandHandle commandHandle, double corotatedMu, double corotatedLambda);
|
||||
B3_SHARED_API int b3LoadSoftBodyAddNeoHookeanForce(b3SharedMemoryCommandHandle commandHandle, double NeoHookeanMu, double NeoHookeanLambda, double NeoHookeanDamping);
|
||||
B3_SHARED_API int b3LoadSoftBodyAddMassSpringForce(b3SharedMemoryCommandHandle commandHandle, double springElasticStiffness , double springDampingStiffness);
|
||||
B3_SHARED_API int b3LoadSoftBodyAddGravityForce(b3SharedMemoryCommandHandle commandHandle, double gravityX, double gravityY, double gravityZ);
|
||||
B3_SHARED_API int b3LoadSoftBodySetCollisionHardness(b3SharedMemoryCommandHandle commandHandle, double collisionHardness);
|
||||
B3_SHARED_API int b3LoadSoftBodySetSelfCollision(b3SharedMemoryCommandHandle commandHandle, int useSelfCollision);
|
||||
B3_SHARED_API int b3LoadSoftBodySetRepulsionStiffness(b3SharedMemoryCommandHandle commandHandle, double stiffness);
|
||||
B3_SHARED_API int b3LoadSoftBodyUseFaceContact(b3SharedMemoryCommandHandle commandHandle, int useFaceContact);
|
||||
B3_SHARED_API int b3LoadSoftBodySetFrictionCoefficient(b3SharedMemoryCommandHandle commandHandle, double frictionCoefficient);
|
||||
B3_SHARED_API int b3LoadSoftBodyUseBendingSprings(b3SharedMemoryCommandHandle commandHandle, int useBendingSprings, double bendingStiffness);
|
||||
B3_SHARED_API int b3LoadSoftBodyUseAllDirectionDampingSprings(b3SharedMemoryCommandHandle commandHandle, int useAllDirectionDamping);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3InitCreateSoftBodyAnchorConstraintCommand(b3PhysicsClientHandle physClient, int softBodyUniqueId, int nodeIndex, int bodyUniqueId, int linkIndex, const double bodyFramePosition[3]);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3RequestVREventsCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3VREventsSetDeviceTypeFilter(b3SharedMemoryCommandHandle commandHandle, int deviceTypeFilter);
|
||||
|
||||
B3_SHARED_API void b3GetVREventsData(b3PhysicsClientHandle physClient, struct b3VREventsData* vrEventsData);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3SetVRCameraStateCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3SetVRCameraRootPosition(b3SharedMemoryCommandHandle commandHandle, const double rootPos[/*3*/]);
|
||||
B3_SHARED_API int b3SetVRCameraRootOrientation(b3SharedMemoryCommandHandle commandHandle, const double rootOrn[/*4*/]);
|
||||
B3_SHARED_API int b3SetVRCameraTrackingObject(b3SharedMemoryCommandHandle commandHandle, int objectUniqueId);
|
||||
B3_SHARED_API int b3SetVRCameraTrackingObjectFlag(b3SharedMemoryCommandHandle commandHandle, int flag);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3RequestKeyboardEventsCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3RequestKeyboardEventsCommandInit2(b3SharedMemoryCommandHandle commandHandle);
|
||||
B3_SHARED_API void b3GetKeyboardEventsData(b3PhysicsClientHandle physClient, struct b3KeyboardEventsData* keyboardEventsData);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3RequestMouseEventsCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API void b3GetMouseEventsData(b3PhysicsClientHandle physClient, struct b3MouseEventsData* mouseEventsData);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3StateLoggingCommandInit(b3PhysicsClientHandle physClient);
|
||||
B3_SHARED_API int b3StateLoggingStart(b3SharedMemoryCommandHandle commandHandle, int loggingType, const char* fileName);
|
||||
B3_SHARED_API int b3StateLoggingAddLoggingObjectUniqueId(b3SharedMemoryCommandHandle commandHandle, int objectUniqueId);
|
||||
B3_SHARED_API int b3StateLoggingSetMaxLogDof(b3SharedMemoryCommandHandle commandHandle, int maxLogDof);
|
||||
B3_SHARED_API int b3StateLoggingSetLinkIndexA(b3SharedMemoryCommandHandle commandHandle, int linkIndexA);
|
||||
B3_SHARED_API int b3StateLoggingSetLinkIndexB(b3SharedMemoryCommandHandle commandHandle, int linkIndexB);
|
||||
B3_SHARED_API int b3StateLoggingSetBodyAUniqueId(b3SharedMemoryCommandHandle commandHandle, int bodyAUniqueId);
|
||||
B3_SHARED_API int b3StateLoggingSetBodyBUniqueId(b3SharedMemoryCommandHandle commandHandle, int bodyBUniqueId);
|
||||
B3_SHARED_API int b3StateLoggingSetDeviceTypeFilter(b3SharedMemoryCommandHandle commandHandle, int deviceTypeFilter);
|
||||
B3_SHARED_API int b3StateLoggingSetLogFlags(b3SharedMemoryCommandHandle commandHandle, int logFlags);
|
||||
|
||||
B3_SHARED_API int b3GetStatusLoggingUniqueId(b3SharedMemoryStatusHandle statusHandle);
|
||||
B3_SHARED_API int b3StateLoggingStop(b3SharedMemoryCommandHandle commandHandle, int loggingUid);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3ProfileTimingCommandInit(b3PhysicsClientHandle physClient, const char* name);
|
||||
B3_SHARED_API void b3SetProfileTimingDuractionInMicroSeconds(b3SharedMemoryCommandHandle commandHandle, int duration);
|
||||
B3_SHARED_API void b3SetProfileTimingType(b3SharedMemoryCommandHandle commandHandle, int type);
|
||||
|
||||
B3_SHARED_API void b3PushProfileTiming(b3PhysicsClientHandle physClient, const char* timingName);
|
||||
B3_SHARED_API void b3PopProfileTiming(b3PhysicsClientHandle physClient);
|
||||
|
||||
B3_SHARED_API void b3SetTimeOut(b3PhysicsClientHandle physClient, double timeOutInSeconds);
|
||||
B3_SHARED_API double b3GetTimeOut(b3PhysicsClientHandle physClient);
|
||||
|
||||
B3_SHARED_API b3SharedMemoryCommandHandle b3SetAdditionalSearchPath(b3PhysicsClientHandle physClient, const char* path);
|
||||
|
||||
B3_SHARED_API void b3MultiplyTransforms(const double posA[/*3*/], const double ornA[/*4*/], const double posB[/*3*/], const double ornB[/*4*/], double outPos[/*3*/], double outOrn[/*4*/]);
|
||||
B3_SHARED_API void b3InvertTransform(const double pos[/*3*/], const double orn[/*4*/], double outPos[/*3*/], double outOrn[/*4*/]);
|
||||
B3_SHARED_API void b3QuaternionSlerp(const double startQuat[/*4*/], const double endQuat[/*4*/], double interpolationFraction, double outOrn[/*4*/]);
|
||||
B3_SHARED_API void b3GetQuaternionFromAxisAngle(const double axis[/*3*/], double angle, double outQuat[/*4*/]);
|
||||
B3_SHARED_API void b3GetAxisAngleFromQuaternion(const double quat[/*4*/], double axis[/*3*/], double* angle);
|
||||
B3_SHARED_API void b3GetQuaternionDifference(const double startQuat[/*4*/], const double endQuat[/*4*/], double outOrn[/*4*/]);
|
||||
B3_SHARED_API void b3GetAxisDifferenceQuaternion(const double startQuat[/*4*/], const double endQuat[/*4*/], double axisOut[/*3*/]);
|
||||
B3_SHARED_API void b3CalculateVelocityQuaternion(const double startQuat[/*4*/], const double endQuat[/*4*/], double deltaTime, double angVelOut[/*3*/]);
|
||||
B3_SHARED_API void b3RotateVector(const double quat[/*4*/], const double vec[/*3*/], double vecOut[/*3*/]);
|
||||
|
||||
#ifdef BT_ENABLE_VHACD
|
||||
B3_SHARED_API void b3VHACD(const char* fileNameInput, const char* fileNameOutput, const char* fileNameLogging,
|
||||
double concavity, double alpha, double beta, double gamma, double minVolumePerCH, int resolution,
|
||||
int maxNumVerticesPerCH, int depth, int planeDownsampling, int convexhullDownsampling,
|
||||
int pca, int mode, int convexhullApproximation);
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //PHYSICS_CLIENT_C_API_H
|
@ -1,17 +0,0 @@
|
||||
#ifndef PHYSICS_CLIENT_SHARED_MEMORY2_H
|
||||
#define PHYSICS_CLIENT_SHARED_MEMORY2_H
|
||||
|
||||
#include "PhysicsClientC_API.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
b3PhysicsClientHandle b3ConnectSharedMemory2(int key);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //PHYSICS_CLIENT_SHARED_MEMORY2_H
|
@ -1,17 +0,0 @@
|
||||
#ifndef PHYSICS_CLIENT_SHARED_MEMORY_H
|
||||
#define PHYSICS_CLIENT_SHARED_MEMORY_H
|
||||
|
||||
#include "PhysicsClientC_API.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
B3_SHARED_API b3PhysicsClientHandle b3ConnectSharedMemory(int key);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //PHYSICS_CLIENT_SHARED_MEMORY_H
|
@ -1,18 +0,0 @@
|
||||
#ifndef PHYSICS_DIRECT_C_API_H
|
||||
#define PHYSICS_DIRECT_C_API_H
|
||||
|
||||
#include "PhysicsClientC_API.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
///think more about naming. Directly execute commands without transport (no shared memory, UDP, socket, grpc etc)
|
||||
B3_SHARED_API b3PhysicsClientHandle b3ConnectPhysicsDirect();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //PHYSICS_DIRECT_C_API_H
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,862 +0,0 @@
|
||||
/*
|
||||
* LZ4 - Fast LZ compression algorithm
|
||||
* Header File
|
||||
* Copyright (C) 2011-2020, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- LZ4 homepage : http://www.lz4.org
|
||||
- LZ4 source repository : https://github.com/lz4/lz4
|
||||
*/
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef LZ4_H_2983827168210
|
||||
#define LZ4_H_2983827168210
|
||||
|
||||
/* --- Dependency --- */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/**
|
||||
Introduction
|
||||
|
||||
LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
|
||||
scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
|
||||
multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
|
||||
|
||||
The LZ4 compression library provides in-memory compression and decompression functions.
|
||||
It gives full buffer control to user.
|
||||
Compression can be done in:
|
||||
- a single step (described as Simple Functions)
|
||||
- a single step, reusing a context (described in Advanced Functions)
|
||||
- unbounded multiple steps (described as Streaming compression)
|
||||
|
||||
lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
|
||||
Decompressing such a compressed block requires additional metadata.
|
||||
Exact metadata depends on exact decompression function.
|
||||
For the typical case of LZ4_decompress_safe(),
|
||||
metadata includes block's compressed size, and maximum bound of decompressed size.
|
||||
Each application is free to encode and pass such metadata in whichever way it wants.
|
||||
|
||||
lz4.h only handle blocks, it can not generate Frames.
|
||||
|
||||
Blocks are different from Frames (doc/lz4_Frame_format.md).
|
||||
Frames bundle both blocks and metadata in a specified manner.
|
||||
Embedding metadata is required for compressed data to be self-contained and portable.
|
||||
Frame format is delivered through a companion API, declared in lz4frame.h.
|
||||
The `lz4` CLI can only manage frames.
|
||||
*/
|
||||
|
||||
/*^***************************************************************
|
||||
* Export parameters
|
||||
*****************************************************************/
|
||||
/*
|
||||
* LZ4_DLL_EXPORT :
|
||||
* Enable exporting of functions when building a Windows DLL
|
||||
* LZ4LIB_VISIBILITY :
|
||||
* Control library symbols visibility.
|
||||
*/
|
||||
#ifndef LZ4LIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define LZ4LIB_VISIBILITY
|
||||
# endif
|
||||
#endif
|
||||
#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
|
||||
# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
|
||||
#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
|
||||
# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define LZ4LIB_API LZ4LIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
/*! LZ4_FREESTANDING :
|
||||
* When this macro is set to 1, it enables "freestanding mode" that is
|
||||
* suitable for typical freestanding environment which doesn't support
|
||||
* standard C library.
|
||||
*
|
||||
* - LZ4_FREESTANDING is a compile-time switch.
|
||||
* - It requires the following macros to be defined:
|
||||
* LZ4_memcpy, LZ4_memmove, LZ4_memset.
|
||||
* - It only enables LZ4/HC functions which don't use heap.
|
||||
* All LZ4F_* functions are not supported.
|
||||
* - See tests/freestanding.c to check its basic setup.
|
||||
*/
|
||||
#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1)
|
||||
# define LZ4_HEAPMODE 0
|
||||
# define LZ4HC_HEAPMODE 0
|
||||
# define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1
|
||||
# if !defined(LZ4_memcpy)
|
||||
# error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'."
|
||||
# endif
|
||||
# if !defined(LZ4_memset)
|
||||
# error "LZ4_FREESTANDING requires macro 'LZ4_memset'."
|
||||
# endif
|
||||
# if !defined(LZ4_memmove)
|
||||
# error "LZ4_FREESTANDING requires macro 'LZ4_memmove'."
|
||||
# endif
|
||||
#elif ! defined(LZ4_FREESTANDING)
|
||||
# define LZ4_FREESTANDING 0
|
||||
#endif
|
||||
|
||||
|
||||
/*------ Version ------*/
|
||||
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
|
||||
#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
|
||||
#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */
|
||||
|
||||
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
|
||||
|
||||
#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
|
||||
#define LZ4_QUOTE(str) #str
|
||||
#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
|
||||
#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */
|
||||
|
||||
LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */
|
||||
LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Tuning parameter
|
||||
**************************************/
|
||||
#define LZ4_MEMORY_USAGE_MIN 10
|
||||
#define LZ4_MEMORY_USAGE_DEFAULT 14
|
||||
#define LZ4_MEMORY_USAGE_MAX 20
|
||||
|
||||
/*!
|
||||
* LZ4_MEMORY_USAGE :
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; )
|
||||
* Increasing memory usage improves compression ratio, at the cost of speed.
|
||||
* Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality.
|
||||
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
|
||||
*/
|
||||
#ifndef LZ4_MEMORY_USAGE
|
||||
# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT
|
||||
#endif
|
||||
|
||||
#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN)
|
||||
# error "LZ4_MEMORY_USAGE is too small !"
|
||||
#endif
|
||||
|
||||
#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX)
|
||||
# error "LZ4_MEMORY_USAGE is too large !"
|
||||
#endif
|
||||
|
||||
/*-************************************
|
||||
* Simple Functions
|
||||
**************************************/
|
||||
/*! LZ4_compress_default() :
|
||||
* Compresses 'srcSize' bytes from buffer 'src'
|
||||
* into already allocated 'dst' buffer of size 'dstCapacity'.
|
||||
* Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
|
||||
* It also runs faster, so it's a recommended setting.
|
||||
* If the function cannot compress 'src' into a more limited 'dst' budget,
|
||||
* compression stops *immediately*, and the function result is zero.
|
||||
* In which case, 'dst' content is undefined (invalid).
|
||||
* srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
|
||||
* dstCapacity : size of buffer 'dst' (which must be already allocated)
|
||||
* @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
|
||||
* or 0 if compression fails
|
||||
* Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
|
||||
|
||||
/*! LZ4_decompress_safe() :
|
||||
* @compressedSize : is the exact complete size of the compressed block.
|
||||
* @dstCapacity : is the size of destination buffer (which must be already allocated),
|
||||
* is an upper bound of decompressed size.
|
||||
* @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
|
||||
* If destination buffer is not large enough, decoding will stop and output an error code (negative value).
|
||||
* If the source stream is detected malformed, the function will stop decoding and return a negative result.
|
||||
* Note 1 : This function is protected against malicious data packets :
|
||||
* it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
|
||||
* even if the compressed block is maliciously modified to order the decoder to do these actions.
|
||||
* In such case, the decoder stops immediately, and considers the compressed block malformed.
|
||||
* Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
|
||||
* The implementation is free to send / store / derive this information in whichever way is most beneficial.
|
||||
* If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Advanced Functions
|
||||
**************************************/
|
||||
#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
|
||||
#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
|
||||
|
||||
/*! LZ4_compressBound() :
|
||||
Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
|
||||
This function is primarily useful for memory allocation purposes (destination buffer size).
|
||||
Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
|
||||
Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
|
||||
inputSize : max supported value is LZ4_MAX_INPUT_SIZE
|
||||
return : maximum output size in a "worst case" scenario
|
||||
or 0, if input size is incorrect (too large or negative)
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compressBound(int inputSize);
|
||||
|
||||
/*! LZ4_compress_fast() :
|
||||
Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
|
||||
The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
|
||||
It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
|
||||
An acceleration value of "1" is the same as regular LZ4_compress_default()
|
||||
Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
|
||||
Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||
|
||||
|
||||
/*! LZ4_compress_fast_extState() :
|
||||
* Same as LZ4_compress_fast(), using an externally allocated memory space for its state.
|
||||
* Use LZ4_sizeofState() to know how much memory must be allocated,
|
||||
* and allocate it on 8-bytes boundaries (using `malloc()` typically).
|
||||
* Then, provide this buffer as `void* state` to compression function.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_sizeofState(void);
|
||||
LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||
|
||||
|
||||
/*! LZ4_compress_destSize() :
|
||||
* Reverse the logic : compresses as much data as possible from 'src' buffer
|
||||
* into already allocated buffer 'dst', of size >= 'targetDestSize'.
|
||||
* This function either compresses the entire 'src' content into 'dst' if it's large enough,
|
||||
* or fill 'dst' buffer completely with as much data as possible from 'src'.
|
||||
* note: acceleration parameter is fixed to "default".
|
||||
*
|
||||
* *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
|
||||
* New value is necessarily <= input value.
|
||||
* @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
|
||||
* or 0 if compression fails.
|
||||
*
|
||||
* Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
|
||||
* the produced compressed content could, in specific circumstances,
|
||||
* require to be decompressed into a destination buffer larger
|
||||
* by at least 1 byte than the content to decompress.
|
||||
* If an application uses `LZ4_compress_destSize()`,
|
||||
* it's highly recommended to update liblz4 to v1.9.2 or better.
|
||||
* If this can't be done or ensured,
|
||||
* the receiving decompression function should provide
|
||||
* a dstCapacity which is > decompressedSize, by at least 1 byte.
|
||||
* See https://github.com/lz4/lz4/issues/859 for details
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
|
||||
|
||||
|
||||
/*! LZ4_decompress_safe_partial() :
|
||||
* Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
|
||||
* into destination buffer 'dst' of size 'dstCapacity'.
|
||||
* Up to 'targetOutputSize' bytes will be decoded.
|
||||
* The function stops decoding on reaching this objective.
|
||||
* This can be useful to boost performance
|
||||
* whenever only the beginning of a block is required.
|
||||
*
|
||||
* @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
|
||||
* If source stream is detected malformed, function returns a negative result.
|
||||
*
|
||||
* Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
|
||||
*
|
||||
* Note 2 : targetOutputSize must be <= dstCapacity
|
||||
*
|
||||
* Note 3 : this function effectively stops decoding on reaching targetOutputSize,
|
||||
* so dstCapacity is kind of redundant.
|
||||
* This is because in older versions of this function,
|
||||
* decoding operation would still write complete sequences.
|
||||
* Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
|
||||
* it could write more bytes, though only up to dstCapacity.
|
||||
* Some "margin" used to be required for this operation to work properly.
|
||||
* Thankfully, this is no longer necessary.
|
||||
* The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
|
||||
*
|
||||
* Note 4 : If srcSize is the exact size of the block,
|
||||
* then targetOutputSize can be any value,
|
||||
* including larger than the block's decompressed size.
|
||||
* The function will, at most, generate block's decompressed size.
|
||||
*
|
||||
* Note 5 : If srcSize is _larger_ than block's compressed size,
|
||||
* then targetOutputSize **MUST** be <= block's decompressed size.
|
||||
* Otherwise, *silent corruption will occur*.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
|
||||
|
||||
|
||||
/*-*********************************************
|
||||
* Streaming Compression Functions
|
||||
***********************************************/
|
||||
typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
|
||||
|
||||
/**
|
||||
Note about RC_INVOKED
|
||||
|
||||
- RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio).
|
||||
https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros
|
||||
|
||||
- Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars)
|
||||
and reports warning "RC4011: identifier truncated".
|
||||
|
||||
- To eliminate the warning, we surround long preprocessor symbol with
|
||||
"#if !defined(RC_INVOKED) ... #endif" block that means
|
||||
"skip this block when rc.exe is trying to read it".
|
||||
*/
|
||||
#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
|
||||
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
|
||||
LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
|
||||
#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
|
||||
#endif
|
||||
|
||||
/*! LZ4_resetStream_fast() : v1.9.0+
|
||||
* Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
|
||||
* (e.g., LZ4_compress_fast_continue()).
|
||||
*
|
||||
* An LZ4_stream_t must be initialized once before usage.
|
||||
* This is automatically done when created by LZ4_createStream().
|
||||
* However, should the LZ4_stream_t be simply declared on stack (for example),
|
||||
* it's necessary to initialize it first, using LZ4_initStream().
|
||||
*
|
||||
* After init, start any new stream with LZ4_resetStream_fast().
|
||||
* A same LZ4_stream_t can be re-used multiple times consecutively
|
||||
* and compress multiple streams,
|
||||
* provided that it starts each new stream with LZ4_resetStream_fast().
|
||||
*
|
||||
* LZ4_resetStream_fast() is much faster than LZ4_initStream(),
|
||||
* but is not compatible with memory regions containing garbage data.
|
||||
*
|
||||
* Note: it's only useful to call LZ4_resetStream_fast()
|
||||
* in the context of streaming compression.
|
||||
* The *extState* functions perform their own resets.
|
||||
* Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
|
||||
*/
|
||||
LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
|
||||
|
||||
/*! LZ4_loadDict() :
|
||||
* Use this function to reference a static dictionary into LZ4_stream_t.
|
||||
* The dictionary must remain available during compression.
|
||||
* LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
|
||||
* The same dictionary will have to be loaded on decompression side for successful decoding.
|
||||
* Dictionary are useful for better compression of small data (KB range).
|
||||
* While LZ4 accept any input as dictionary,
|
||||
* results are generally better when using Zstandard's Dictionary Builder.
|
||||
* Loading a size of 0 is allowed, and is the same as reset.
|
||||
* @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
|
||||
*/
|
||||
LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
|
||||
|
||||
/*! LZ4_compress_fast_continue() :
|
||||
* Compress 'src' content using data from previously compressed blocks, for better compression ratio.
|
||||
* 'dst' buffer must be already allocated.
|
||||
* If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
|
||||
*
|
||||
* @return : size of compressed block
|
||||
* or 0 if there is an error (typically, cannot fit into 'dst').
|
||||
*
|
||||
* Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block.
|
||||
* Each block has precise boundaries.
|
||||
* Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata.
|
||||
* It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
|
||||
*
|
||||
* Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory !
|
||||
*
|
||||
* Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
|
||||
* Make sure that buffers are separated, by at least one byte.
|
||||
* This construction ensures that each block only depends on previous block.
|
||||
*
|
||||
* Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
|
||||
*
|
||||
* Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||
|
||||
/*! LZ4_saveDict() :
|
||||
* If last 64KB data cannot be guaranteed to remain available at its current memory location,
|
||||
* save it into a safer place (char* safeBuffer).
|
||||
* This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
|
||||
* but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
|
||||
* @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize);
|
||||
|
||||
|
||||
/*-**********************************************
|
||||
* Streaming Decompression Functions
|
||||
* Bufferless synchronous API
|
||||
************************************************/
|
||||
typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
|
||||
|
||||
/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
|
||||
* creation / destruction of streaming decompression tracking context.
|
||||
* A tracking context can be re-used multiple times.
|
||||
*/
|
||||
#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
|
||||
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
|
||||
LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
|
||||
#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
|
||||
#endif
|
||||
|
||||
/*! LZ4_setStreamDecode() :
|
||||
* An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
|
||||
* Use this function to start decompression of a new stream of blocks.
|
||||
* A dictionary can optionally be set. Use NULL or size 0 for a reset order.
|
||||
* Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
|
||||
* @return : 1 if OK, 0 if error
|
||||
*/
|
||||
LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
|
||||
|
||||
/*! LZ4_decoderRingBufferSize() : v1.8.2+
|
||||
* Note : in a ring buffer scenario (optional),
|
||||
* blocks are presumed decompressed next to each other
|
||||
* up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize),
|
||||
* at which stage it resumes from beginning of ring buffer.
|
||||
* When setting such a ring buffer for streaming decompression,
|
||||
* provides the minimum size of this ring buffer
|
||||
* to be compatible with any source respecting maxBlockSize condition.
|
||||
* @return : minimum ring buffer size,
|
||||
* or 0 if there is an error (invalid maxBlockSize).
|
||||
*/
|
||||
LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
|
||||
#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
|
||||
|
||||
/*! LZ4_decompress_safe_continue() :
|
||||
* This decoding function allows decompression of consecutive blocks in "streaming" mode.
|
||||
* The difference with the usual independent blocks is that
|
||||
* new blocks are allowed to find references into former blocks.
|
||||
* A block is an unsplittable entity, and must be presented entirely to the decompression function.
|
||||
* LZ4_decompress_safe_continue() only accepts one block at a time.
|
||||
* It's modeled after `LZ4_decompress_safe()` and behaves similarly.
|
||||
*
|
||||
* @LZ4_streamDecode : decompression state, tracking the position in memory of past data
|
||||
* @compressedSize : exact complete size of one compressed block.
|
||||
* @dstCapacity : size of destination buffer (which must be already allocated),
|
||||
* must be an upper bound of decompressed size.
|
||||
* @return : number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
|
||||
* If destination buffer is not large enough, decoding will stop and output an error code (negative value).
|
||||
* If the source stream is detected malformed, the function will stop decoding and return a negative result.
|
||||
*
|
||||
* The last 64KB of previously decoded data *must* remain available and unmodified
|
||||
* at the memory position where they were previously decoded.
|
||||
* If less than 64KB of data has been decoded, all the data must be present.
|
||||
*
|
||||
* Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
|
||||
* - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize).
|
||||
* maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes.
|
||||
* In which case, encoding and decoding buffers do not need to be synchronized.
|
||||
* Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize.
|
||||
* - Synchronized mode :
|
||||
* Decompression buffer size is _exactly_ the same as compression buffer size,
|
||||
* and follows exactly same update rule (block boundaries at same positions),
|
||||
* and decoding function is provided with exact decompressed size of each block (exception for last block of the stream),
|
||||
* _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB).
|
||||
* - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes.
|
||||
* In which case, encoding and decoding buffers do not need to be synchronized,
|
||||
* and encoding ring buffer can have any size, including small ones ( < 64 KB).
|
||||
*
|
||||
* Whenever these conditions are not possible,
|
||||
* save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
|
||||
* then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
|
||||
*/
|
||||
LZ4LIB_API int
|
||||
LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode,
|
||||
const char* src, char* dst,
|
||||
int srcSize, int dstCapacity);
|
||||
|
||||
|
||||
/*! LZ4_decompress_safe_usingDict() :
|
||||
* Works the same as
|
||||
* a combination of LZ4_setStreamDecode() followed by LZ4_decompress_safe_continue()
|
||||
* However, it's stateless: it doesn't need any LZ4_streamDecode_t state.
|
||||
* Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
|
||||
* Performance tip : Decompression speed can be substantially increased
|
||||
* when dst == dictStart + dictSize.
|
||||
*/
|
||||
LZ4LIB_API int
|
||||
LZ4_decompress_safe_usingDict(const char* src, char* dst,
|
||||
int srcSize, int dstCapacity,
|
||||
const char* dictStart, int dictSize);
|
||||
|
||||
/*! LZ4_decompress_safe_partial_usingDict() :
|
||||
* Behaves the same as LZ4_decompress_safe_partial()
|
||||
* with the added ability to specify a memory segment for past data.
|
||||
* Performance tip : Decompression speed can be substantially increased
|
||||
* when dst == dictStart + dictSize.
|
||||
*/
|
||||
LZ4LIB_API int
|
||||
LZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
|
||||
int compressedSize,
|
||||
int targetOutputSize, int maxOutputSize,
|
||||
const char* dictStart, int dictSize);
|
||||
|
||||
#endif /* LZ4_H_2983827168210 */
|
||||
|
||||
|
||||
/*^*************************************
|
||||
* !!!!!! STATIC LINKING ONLY !!!!!!
|
||||
***************************************/
|
||||
|
||||
/*-****************************************************************************
|
||||
* Experimental section
|
||||
*
|
||||
* Symbols declared in this section must be considered unstable. Their
|
||||
* signatures or semantics may change, or they may be removed altogether in the
|
||||
* future. They are therefore only safe to depend on when the caller is
|
||||
* statically linked against the library.
|
||||
*
|
||||
* To protect against unsafe usage, not only are the declarations guarded,
|
||||
* the definitions are hidden by default
|
||||
* when building LZ4 as a shared/dynamic library.
|
||||
*
|
||||
* In order to access these declarations,
|
||||
* define LZ4_STATIC_LINKING_ONLY in your application
|
||||
* before including LZ4's headers.
|
||||
*
|
||||
* In order to make their implementations accessible dynamically, you must
|
||||
* define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
|
||||
******************************************************************************/
|
||||
|
||||
#ifdef LZ4_STATIC_LINKING_ONLY
|
||||
|
||||
#ifndef LZ4_STATIC_3504398509
|
||||
#define LZ4_STATIC_3504398509
|
||||
|
||||
#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
|
||||
#define LZ4LIB_STATIC_API LZ4LIB_API
|
||||
#else
|
||||
#define LZ4LIB_STATIC_API
|
||||
#endif
|
||||
|
||||
|
||||
/*! LZ4_compress_fast_extState_fastReset() :
|
||||
* A variant of LZ4_compress_fast_extState().
|
||||
*
|
||||
* Using this variant avoids an expensive initialization step.
|
||||
* It is only safe to call if the state buffer is known to be correctly initialized already
|
||||
* (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized").
|
||||
* From a high level, the difference is that
|
||||
* this function initializes the provided state with a call to something like LZ4_resetStream_fast()
|
||||
* while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream().
|
||||
*/
|
||||
LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||
|
||||
/*! LZ4_attach_dictionary() :
|
||||
* This is an experimental API that allows
|
||||
* efficient use of a static dictionary many times.
|
||||
*
|
||||
* Rather than re-loading the dictionary buffer into a working context before
|
||||
* each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
|
||||
* working LZ4_stream_t, this function introduces a no-copy setup mechanism,
|
||||
* in which the working stream references the dictionary stream in-place.
|
||||
*
|
||||
* Several assumptions are made about the state of the dictionary stream.
|
||||
* Currently, only streams which have been prepared by LZ4_loadDict() should
|
||||
* be expected to work.
|
||||
*
|
||||
* Alternatively, the provided dictionaryStream may be NULL,
|
||||
* in which case any existing dictionary stream is unset.
|
||||
*
|
||||
* If a dictionary is provided, it replaces any pre-existing stream history.
|
||||
* The dictionary contents are the only history that can be referenced and
|
||||
* logically immediately precede the data compressed in the first subsequent
|
||||
* compression call.
|
||||
*
|
||||
* The dictionary will only remain attached to the working stream through the
|
||||
* first compression call, at the end of which it is cleared. The dictionary
|
||||
* stream (and source buffer) must remain in-place / accessible / unchanged
|
||||
* through the completion of the first compression call on the stream.
|
||||
*/
|
||||
LZ4LIB_STATIC_API void
|
||||
LZ4_attach_dictionary(LZ4_stream_t* workingStream,
|
||||
const LZ4_stream_t* dictionaryStream);
|
||||
|
||||
|
||||
/*! In-place compression and decompression
|
||||
*
|
||||
* It's possible to have input and output sharing the same buffer,
|
||||
* for highly constrained memory environments.
|
||||
* In both cases, it requires input to lay at the end of the buffer,
|
||||
* and decompression to start at beginning of the buffer.
|
||||
* Buffer size must feature some margin, hence be larger than final size.
|
||||
*
|
||||
* |<------------------------buffer--------------------------------->|
|
||||
* |<-----------compressed data--------->|
|
||||
* |<-----------decompressed size------------------>|
|
||||
* |<----margin---->|
|
||||
*
|
||||
* This technique is more useful for decompression,
|
||||
* since decompressed size is typically larger,
|
||||
* and margin is short.
|
||||
*
|
||||
* In-place decompression will work inside any buffer
|
||||
* which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
|
||||
* This presumes that decompressedSize > compressedSize.
|
||||
* Otherwise, it means compression actually expanded data,
|
||||
* and it would be more efficient to store such data with a flag indicating it's not compressed.
|
||||
* This can happen when data is not compressible (already compressed, or encrypted).
|
||||
*
|
||||
* For in-place compression, margin is larger, as it must be able to cope with both
|
||||
* history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
|
||||
* and data expansion, which can happen when input is not compressible.
|
||||
* As a consequence, buffer size requirements are much higher,
|
||||
* and memory savings offered by in-place compression are more limited.
|
||||
*
|
||||
* There are ways to limit this cost for compression :
|
||||
* - Reduce history size, by modifying LZ4_DISTANCE_MAX.
|
||||
* Note that it is a compile-time constant, so all compressions will apply this limit.
|
||||
* Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
|
||||
* so it's a reasonable trick when inputs are known to be small.
|
||||
* - Require the compressor to deliver a "maximum compressed size".
|
||||
* This is the `dstCapacity` parameter in `LZ4_compress*()`.
|
||||
* When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
|
||||
* in which case, the return code will be 0 (zero).
|
||||
* The caller must be ready for these cases to happen,
|
||||
* and typically design a backup scheme to send data uncompressed.
|
||||
* The combination of both techniques can significantly reduce
|
||||
* the amount of margin required for in-place compression.
|
||||
*
|
||||
* In-place compression can work in any buffer
|
||||
* which size is >= (maxCompressedSize)
|
||||
* with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
|
||||
* LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
|
||||
* so it's possible to reduce memory requirements by playing with them.
|
||||
*/
|
||||
|
||||
#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
|
||||
#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
|
||||
|
||||
#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
|
||||
# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
|
||||
#endif
|
||||
|
||||
#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
|
||||
#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
|
||||
|
||||
#endif /* LZ4_STATIC_3504398509 */
|
||||
#endif /* LZ4_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
|
||||
#ifndef LZ4_H_98237428734687
|
||||
#define LZ4_H_98237428734687
|
||||
|
||||
/*-************************************************************
|
||||
* Private Definitions
|
||||
**************************************************************
|
||||
* Do not use these definitions directly.
|
||||
* They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
|
||||
* Accessing members will expose user code to API and/or ABI break in future versions of the library.
|
||||
**************************************************************/
|
||||
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
|
||||
#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
|
||||
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
|
||||
|
||||
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# include <stdint.h>
|
||||
typedef int8_t LZ4_i8;
|
||||
typedef uint8_t LZ4_byte;
|
||||
typedef uint16_t LZ4_u16;
|
||||
typedef uint32_t LZ4_u32;
|
||||
#else
|
||||
typedef signed char LZ4_i8;
|
||||
typedef unsigned char LZ4_byte;
|
||||
typedef unsigned short LZ4_u16;
|
||||
typedef unsigned int LZ4_u32;
|
||||
#endif
|
||||
|
||||
/*! LZ4_stream_t :
|
||||
* Never ever use below internal definitions directly !
|
||||
* These definitions are not API/ABI safe, and may change in future versions.
|
||||
* If you need static allocation, declare or allocate an LZ4_stream_t object.
|
||||
**/
|
||||
|
||||
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
|
||||
struct LZ4_stream_t_internal {
|
||||
LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
|
||||
const LZ4_byte* dictionary;
|
||||
const LZ4_stream_t_internal* dictCtx;
|
||||
LZ4_u32 currentOffset;
|
||||
LZ4_u32 tableType;
|
||||
LZ4_u32 dictSize;
|
||||
/* Implicit padding to ensure structure is aligned */
|
||||
};
|
||||
|
||||
#define LZ4_STREAM_MINSIZE ((1UL << LZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */
|
||||
union LZ4_stream_u {
|
||||
char minStateSize[LZ4_STREAM_MINSIZE];
|
||||
LZ4_stream_t_internal internal_donotuse;
|
||||
}; /* previously typedef'd to LZ4_stream_t */
|
||||
|
||||
|
||||
/*! LZ4_initStream() : v1.9.0+
|
||||
* An LZ4_stream_t structure must be initialized at least once.
|
||||
* This is automatically done when invoking LZ4_createStream(),
|
||||
* but it's not when the structure is simply declared on stack (for example).
|
||||
*
|
||||
* Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t.
|
||||
* It can also initialize any arbitrary buffer of sufficient size,
|
||||
* and will @return a pointer of proper type upon initialization.
|
||||
*
|
||||
* Note : initialization fails if size and alignment conditions are not respected.
|
||||
* In which case, the function will @return NULL.
|
||||
* Note2: An LZ4_stream_t structure guarantees correct alignment and size.
|
||||
* Note3: Before v1.9.0, use LZ4_resetStream() instead
|
||||
**/
|
||||
LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
|
||||
|
||||
|
||||
/*! LZ4_streamDecode_t :
|
||||
* Never ever use below internal definitions directly !
|
||||
* These definitions are not API/ABI safe, and may change in future versions.
|
||||
* If you need static allocation, declare or allocate an LZ4_streamDecode_t object.
|
||||
**/
|
||||
typedef struct {
|
||||
const LZ4_byte* externalDict;
|
||||
const LZ4_byte* prefixEnd;
|
||||
size_t extDictSize;
|
||||
size_t prefixSize;
|
||||
} LZ4_streamDecode_t_internal;
|
||||
|
||||
#define LZ4_STREAMDECODE_MINSIZE 32
|
||||
union LZ4_streamDecode_u {
|
||||
char minStateSize[LZ4_STREAMDECODE_MINSIZE];
|
||||
LZ4_streamDecode_t_internal internal_donotuse;
|
||||
} ; /* previously typedef'd to LZ4_streamDecode_t */
|
||||
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Obsolete Functions
|
||||
**************************************/
|
||||
|
||||
/*! Deprecation warnings
|
||||
*
|
||||
* Deprecated functions make the compiler generate a warning when invoked.
|
||||
* This is meant to invite users to update their source code.
|
||||
* Should deprecation warnings be a problem, it is generally possible to disable them,
|
||||
* typically with -Wno-deprecated-declarations for gcc
|
||||
* or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
*
|
||||
* Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS
|
||||
* before including the header file.
|
||||
*/
|
||||
#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
|
||||
# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
|
||||
#else
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define LZ4_DEPRECATED(message) [[deprecated(message)]]
|
||||
# elif defined(_MSC_VER)
|
||||
# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
|
||||
# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated))
|
||||
# else
|
||||
# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
|
||||
# define LZ4_DEPRECATED(message) /* disabled */
|
||||
# endif
|
||||
#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
/*! Obsolete compression functions (since v1.7.3) */
|
||||
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
|
||||
|
||||
/*! Obsolete decompression functions (since v1.8.0) */
|
||||
LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
|
||||
LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
|
||||
|
||||
/* Obsolete streaming functions (since v1.7.0)
|
||||
* degraded functionality; do not use!
|
||||
*
|
||||
* In order to perform streaming compression, these functions depended on data
|
||||
* that is no longer tracked in the state. They have been preserved as well as
|
||||
* possible: using them will still produce a correct output. However, they don't
|
||||
* actually retain any history between compression calls. The compression ratio
|
||||
* achieved will therefore be no better than compressing each chunk
|
||||
* independently.
|
||||
*/
|
||||
LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer);
|
||||
LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void);
|
||||
LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
|
||||
LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
|
||||
|
||||
/*! Obsolete streaming decoding functions (since v1.7.0) */
|
||||
LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
|
||||
LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
|
||||
|
||||
/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
|
||||
* These functions used to be faster than LZ4_decompress_safe(),
|
||||
* but this is no longer the case. They are now slower.
|
||||
* This is because LZ4_decompress_fast() doesn't know the input size,
|
||||
* and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
|
||||
* On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
|
||||
* As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
|
||||
*
|
||||
* The last remaining LZ4_decompress_fast() specificity is that
|
||||
* it can decompress a block without knowing its compressed size.
|
||||
* Such functionality can be achieved in a more secure manner
|
||||
* by employing LZ4_decompress_safe_partial().
|
||||
*
|
||||
* Parameters:
|
||||
* originalSize : is the uncompressed size to regenerate.
|
||||
* `dst` must be already allocated, its size must be >= 'originalSize' bytes.
|
||||
* @return : number of bytes read from source buffer (== compressed size).
|
||||
* The function expects to finish at block's end exactly.
|
||||
* If the source stream is detected malformed, the function stops decoding and returns a negative result.
|
||||
* note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer.
|
||||
* However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds.
|
||||
* Also, since match offsets are not validated, match reads from 'src' may underflow too.
|
||||
* These issues never happen if input (compressed) data is correct.
|
||||
* But they may happen if input data is invalid (error or intentional tampering).
|
||||
* As a consequence, use these functions in trusted environments with trusted data **only**.
|
||||
*/
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
|
||||
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
|
||||
LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
|
||||
LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
|
||||
|
||||
/*! LZ4_resetStream() :
|
||||
* An LZ4_stream_t structure must be initialized at least once.
|
||||
* This is done with LZ4_initStream(), or LZ4_resetStream().
|
||||
* Consider switching to LZ4_initStream(),
|
||||
* invoking LZ4_resetStream() will trigger deprecation warnings in the future.
|
||||
*/
|
||||
LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
|
||||
|
||||
|
||||
#endif /* LZ4_H_98237428734687 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
@ -1,311 +0,0 @@
|
||||
/*
|
||||
* LZ4 file library
|
||||
* Copyright (C) 2022, Xiaomi Inc.
|
||||
*
|
||||
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - LZ4 homepage : http://www.lz4.org
|
||||
* - LZ4 source repository : https://github.com/lz4/lz4
|
||||
*/
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "lz4.h"
|
||||
#include "lz4file.h"
|
||||
|
||||
struct LZ4_readFile_s {
|
||||
LZ4F_dctx* dctxPtr;
|
||||
FILE* fp;
|
||||
LZ4_byte* srcBuf;
|
||||
size_t srcBufNext;
|
||||
size_t srcBufSize;
|
||||
size_t srcBufMaxSize;
|
||||
};
|
||||
|
||||
struct LZ4_writeFile_s {
|
||||
LZ4F_cctx* cctxPtr;
|
||||
FILE* fp;
|
||||
LZ4_byte* dstBuf;
|
||||
size_t maxWriteSize;
|
||||
size_t dstBufMaxSize;
|
||||
LZ4F_errorCode_t errCode;
|
||||
};
|
||||
|
||||
LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp)
|
||||
{
|
||||
char buf[LZ4F_HEADER_SIZE_MAX];
|
||||
size_t consumedSize;
|
||||
LZ4F_errorCode_t ret;
|
||||
LZ4F_frameInfo_t info;
|
||||
|
||||
if (fp == NULL || lz4fRead == NULL) {
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
}
|
||||
|
||||
*lz4fRead = (LZ4_readFile_t*)calloc(1, sizeof(LZ4_readFile_t));
|
||||
if (*lz4fRead == NULL) {
|
||||
return -LZ4F_ERROR_allocation_failed;
|
||||
}
|
||||
|
||||
ret = LZ4F_createDecompressionContext(&(*lz4fRead)->dctxPtr, LZ4F_getVersion());
|
||||
if (LZ4F_isError(ret)) {
|
||||
free(*lz4fRead);
|
||||
return ret;
|
||||
}
|
||||
|
||||
(*lz4fRead)->fp = fp;
|
||||
consumedSize = fread(buf, 1, sizeof(buf), (*lz4fRead)->fp);
|
||||
if (consumedSize != sizeof(buf)) {
|
||||
free(*lz4fRead);
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
}
|
||||
|
||||
ret = LZ4F_getFrameInfo((*lz4fRead)->dctxPtr, &info, buf, &consumedSize);
|
||||
if (LZ4F_isError(ret)) {
|
||||
LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
|
||||
free(*lz4fRead);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (info.blockSizeID) {
|
||||
case LZ4F_default :
|
||||
case LZ4F_max64KB :
|
||||
(*lz4fRead)->srcBufMaxSize = 64 * 1024;
|
||||
break;
|
||||
case LZ4F_max256KB:
|
||||
(*lz4fRead)->srcBufMaxSize = 256 * 1024;
|
||||
break;
|
||||
case LZ4F_max1MB:
|
||||
(*lz4fRead)->srcBufMaxSize = 1 * 1024 * 1024;
|
||||
break;
|
||||
case LZ4F_max4MB:
|
||||
(*lz4fRead)->srcBufMaxSize = 4 * 1024 * 1024;
|
||||
break;
|
||||
default:
|
||||
LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
|
||||
free(*lz4fRead);
|
||||
return -LZ4F_ERROR_maxBlockSize_invalid;
|
||||
}
|
||||
|
||||
(*lz4fRead)->srcBuf = (LZ4_byte*)malloc((*lz4fRead)->srcBufMaxSize);
|
||||
if ((*lz4fRead)->srcBuf == NULL) {
|
||||
LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
|
||||
free(lz4fRead);
|
||||
return -LZ4F_ERROR_allocation_failed;
|
||||
}
|
||||
|
||||
(*lz4fRead)->srcBufSize = sizeof(buf) - consumedSize;
|
||||
memcpy((*lz4fRead)->srcBuf, buf + consumedSize, (*lz4fRead)->srcBufSize);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size)
|
||||
{
|
||||
LZ4_byte* p = (LZ4_byte*)buf;
|
||||
size_t next = 0;
|
||||
|
||||
if (lz4fRead == NULL || buf == NULL)
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
|
||||
while (next < size) {
|
||||
size_t srcsize = lz4fRead->srcBufSize - lz4fRead->srcBufNext;
|
||||
size_t dstsize = size - next;
|
||||
size_t ret;
|
||||
|
||||
if (srcsize == 0) {
|
||||
ret = fread(lz4fRead->srcBuf, 1, lz4fRead->srcBufMaxSize, lz4fRead->fp);
|
||||
if (ret > 0) {
|
||||
lz4fRead->srcBufSize = ret;
|
||||
srcsize = lz4fRead->srcBufSize;
|
||||
lz4fRead->srcBufNext = 0;
|
||||
}
|
||||
else if (ret == 0) {
|
||||
break;
|
||||
}
|
||||
else {
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
}
|
||||
}
|
||||
|
||||
ret = LZ4F_decompress(lz4fRead->dctxPtr,
|
||||
p, &dstsize,
|
||||
lz4fRead->srcBuf + lz4fRead->srcBufNext,
|
||||
&srcsize,
|
||||
NULL);
|
||||
if (LZ4F_isError(ret)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
lz4fRead->srcBufNext += srcsize;
|
||||
next += dstsize;
|
||||
p += dstsize;
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead)
|
||||
{
|
||||
if (lz4fRead == NULL)
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
LZ4F_freeDecompressionContext(lz4fRead->dctxPtr);
|
||||
free(lz4fRead->srcBuf);
|
||||
free(lz4fRead);
|
||||
return LZ4F_OK_NoError;
|
||||
}
|
||||
|
||||
LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr)
|
||||
{
|
||||
LZ4_byte buf[LZ4F_HEADER_SIZE_MAX];
|
||||
size_t ret;
|
||||
|
||||
if (fp == NULL || lz4fWrite == NULL)
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
|
||||
*lz4fWrite = (LZ4_writeFile_t*)malloc(sizeof(LZ4_writeFile_t));
|
||||
if (*lz4fWrite == NULL) {
|
||||
return -LZ4F_ERROR_allocation_failed;
|
||||
}
|
||||
if (prefsPtr != NULL) {
|
||||
switch (prefsPtr->frameInfo.blockSizeID) {
|
||||
case LZ4F_default :
|
||||
case LZ4F_max64KB :
|
||||
(*lz4fWrite)->maxWriteSize = 64 * 1024;
|
||||
break;
|
||||
case LZ4F_max256KB:
|
||||
(*lz4fWrite)->maxWriteSize = 256 * 1024;
|
||||
break;
|
||||
case LZ4F_max1MB:
|
||||
(*lz4fWrite)->maxWriteSize = 1 * 1024 * 1024;
|
||||
break;
|
||||
case LZ4F_max4MB:
|
||||
(*lz4fWrite)->maxWriteSize = 4 * 1024 * 1024;
|
||||
break;
|
||||
default:
|
||||
free(lz4fWrite);
|
||||
return -LZ4F_ERROR_maxBlockSize_invalid;
|
||||
}
|
||||
} else {
|
||||
(*lz4fWrite)->maxWriteSize = 64 * 1024;
|
||||
}
|
||||
|
||||
(*lz4fWrite)->dstBufMaxSize = LZ4F_compressBound((*lz4fWrite)->maxWriteSize, prefsPtr);
|
||||
(*lz4fWrite)->dstBuf = (LZ4_byte*)malloc((*lz4fWrite)->dstBufMaxSize);
|
||||
if ((*lz4fWrite)->dstBuf == NULL) {
|
||||
free(*lz4fWrite);
|
||||
return -LZ4F_ERROR_allocation_failed;
|
||||
}
|
||||
|
||||
ret = LZ4F_createCompressionContext(&(*lz4fWrite)->cctxPtr, LZ4F_getVersion());
|
||||
if (LZ4F_isError(ret)) {
|
||||
free((*lz4fWrite)->dstBuf);
|
||||
free(*lz4fWrite);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = LZ4F_compressBegin((*lz4fWrite)->cctxPtr, buf, LZ4F_HEADER_SIZE_MAX, prefsPtr);
|
||||
if (LZ4F_isError(ret)) {
|
||||
LZ4F_freeCompressionContext((*lz4fWrite)->cctxPtr);
|
||||
free((*lz4fWrite)->dstBuf);
|
||||
free(*lz4fWrite);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret != fwrite(buf, 1, ret, fp)) {
|
||||
LZ4F_freeCompressionContext((*lz4fWrite)->cctxPtr);
|
||||
free((*lz4fWrite)->dstBuf);
|
||||
free(*lz4fWrite);
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
}
|
||||
|
||||
(*lz4fWrite)->fp = fp;
|
||||
(*lz4fWrite)->errCode = LZ4F_OK_NoError;
|
||||
return LZ4F_OK_NoError;
|
||||
}
|
||||
|
||||
size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, const void* buf, size_t size)
|
||||
{
|
||||
const LZ4_byte* p = (const LZ4_byte*)buf;
|
||||
size_t remain = size;
|
||||
size_t chunk;
|
||||
size_t ret;
|
||||
|
||||
if (lz4fWrite == NULL || buf == NULL)
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
while (remain) {
|
||||
if (remain > lz4fWrite->maxWriteSize)
|
||||
chunk = lz4fWrite->maxWriteSize;
|
||||
else
|
||||
chunk = remain;
|
||||
|
||||
ret = LZ4F_compressUpdate(lz4fWrite->cctxPtr,
|
||||
lz4fWrite->dstBuf, lz4fWrite->dstBufMaxSize,
|
||||
p, chunk,
|
||||
NULL);
|
||||
if (LZ4F_isError(ret)) {
|
||||
lz4fWrite->errCode = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if(ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) {
|
||||
lz4fWrite->errCode = -LZ4F_ERROR_GENERIC;
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
}
|
||||
|
||||
p += chunk;
|
||||
remain -= chunk;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite)
|
||||
{
|
||||
LZ4F_errorCode_t ret = LZ4F_OK_NoError;
|
||||
|
||||
if (lz4fWrite == NULL)
|
||||
return -LZ4F_ERROR_GENERIC;
|
||||
|
||||
if (lz4fWrite->errCode == LZ4F_OK_NoError) {
|
||||
ret = LZ4F_compressEnd(lz4fWrite->cctxPtr,
|
||||
lz4fWrite->dstBuf, lz4fWrite->dstBufMaxSize,
|
||||
NULL);
|
||||
if (LZ4F_isError(ret)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) {
|
||||
ret = -LZ4F_ERROR_GENERIC;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
LZ4F_freeCompressionContext(lz4fWrite->cctxPtr);
|
||||
free(lz4fWrite->dstBuf);
|
||||
free(lz4fWrite);
|
||||
return ret;
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
/*
|
||||
LZ4 file library
|
||||
Header File
|
||||
Copyright (C) 2022, Xiaomi Inc.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- LZ4 source repository : https://github.com/lz4/lz4
|
||||
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*/
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef LZ4FILE_H
|
||||
#define LZ4FILE_H
|
||||
|
||||
#include <stdio.h>
|
||||
#include "lz4frame_static.h"
|
||||
|
||||
typedef struct LZ4_readFile_s LZ4_readFile_t;
|
||||
typedef struct LZ4_writeFile_s LZ4_writeFile_t;
|
||||
|
||||
/*! LZ4F_readOpen() :
|
||||
* Set read lz4file handle.
|
||||
* `lz4f` will set a lz4file handle.
|
||||
* `fp` must be the return value of the lz4 file opened by fopen.
|
||||
*/
|
||||
LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp);
|
||||
|
||||
/*! LZ4F_read() :
|
||||
* Read lz4file content to buffer.
|
||||
* `lz4f` must use LZ4_readOpen to set first.
|
||||
* `buf` read data buffer.
|
||||
* `size` read data buffer size.
|
||||
*/
|
||||
LZ4FLIB_STATIC_API size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size);
|
||||
|
||||
/*! LZ4F_readClose() :
|
||||
* Close lz4file handle.
|
||||
* `lz4f` must use LZ4_readOpen to set first.
|
||||
*/
|
||||
LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead);
|
||||
|
||||
/*! LZ4F_writeOpen() :
|
||||
* Set write lz4file handle.
|
||||
* `lz4f` will set a lz4file handle.
|
||||
* `fp` must be the return value of the lz4 file opened by fopen.
|
||||
*/
|
||||
LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr);
|
||||
|
||||
/*! LZ4F_write() :
|
||||
* Write buffer to lz4file.
|
||||
* `lz4f` must use LZ4F_writeOpen to set first.
|
||||
* `buf` write data buffer.
|
||||
* `size` write data buffer size.
|
||||
*/
|
||||
LZ4FLIB_STATIC_API size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, const void* buf, size_t size);
|
||||
|
||||
/*! LZ4F_writeClose() :
|
||||
* Close lz4file handle.
|
||||
* `lz4f` must use LZ4F_writeOpen to set first.
|
||||
*/
|
||||
LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite);
|
||||
|
||||
#endif /* LZ4FILE_H */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,707 +0,0 @@
|
||||
/*
|
||||
LZ4F - LZ4-Frame library
|
||||
Header File
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- LZ4 source repository : https://github.com/lz4/lz4
|
||||
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*/
|
||||
|
||||
/* LZ4F is a stand-alone API able to create and decode LZ4 frames
|
||||
* conformant with specification v1.6.1 in doc/lz4_Frame_format.md .
|
||||
* Generated frames are compatible with `lz4` CLI.
|
||||
*
|
||||
* LZ4F also offers streaming capabilities.
|
||||
*
|
||||
* lz4.h is not required when using lz4frame.h,
|
||||
* except to extract common constants such as LZ4_VERSION_NUMBER.
|
||||
* */
|
||||
|
||||
#ifndef LZ4F_H_09782039843
|
||||
#define LZ4F_H_09782039843
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* --- Dependency --- */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/**
|
||||
* Introduction
|
||||
*
|
||||
* lz4frame.h implements LZ4 frame specification: see doc/lz4_Frame_format.md .
|
||||
* LZ4 Frames are compatible with `lz4` CLI,
|
||||
* and designed to be interoperable with any system.
|
||||
**/
|
||||
|
||||
/*-***************************************************************
|
||||
* Compiler specifics
|
||||
*****************************************************************/
|
||||
/* LZ4_DLL_EXPORT :
|
||||
* Enable exporting of functions when building a Windows DLL
|
||||
* LZ4FLIB_VISIBILITY :
|
||||
* Control library symbols visibility.
|
||||
*/
|
||||
#ifndef LZ4FLIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define LZ4FLIB_VISIBILITY
|
||||
# endif
|
||||
#endif
|
||||
#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
|
||||
# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
|
||||
#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
|
||||
# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
|
||||
#else
|
||||
# define LZ4FLIB_API LZ4FLIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS
|
||||
# define LZ4F_DEPRECATE(x) x
|
||||
#else
|
||||
# if defined(_MSC_VER)
|
||||
# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */
|
||||
# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6))
|
||||
# define LZ4F_DEPRECATE(x) x __attribute__((deprecated))
|
||||
# else
|
||||
# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Error management
|
||||
**************************************/
|
||||
typedef size_t LZ4F_errorCode_t;
|
||||
|
||||
LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */
|
||||
LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Frame compression types
|
||||
************************************* */
|
||||
/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */
|
||||
#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
|
||||
# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
|
||||
#else
|
||||
# define LZ4F_OBSOLETE_ENUM(x)
|
||||
#endif
|
||||
|
||||
/* The larger the block size, the (slightly) better the compression ratio,
|
||||
* though there are diminishing returns.
|
||||
* Larger blocks also increase memory usage on both compression and decompression sides.
|
||||
*/
|
||||
typedef enum {
|
||||
LZ4F_default=0,
|
||||
LZ4F_max64KB=4,
|
||||
LZ4F_max256KB=5,
|
||||
LZ4F_max1MB=6,
|
||||
LZ4F_max4MB=7
|
||||
LZ4F_OBSOLETE_ENUM(max64KB)
|
||||
LZ4F_OBSOLETE_ENUM(max256KB)
|
||||
LZ4F_OBSOLETE_ENUM(max1MB)
|
||||
LZ4F_OBSOLETE_ENUM(max4MB)
|
||||
} LZ4F_blockSizeID_t;
|
||||
|
||||
/* Linked blocks sharply reduce inefficiencies when using small blocks,
|
||||
* they compress better.
|
||||
* However, some LZ4 decoders are only compatible with independent blocks */
|
||||
typedef enum {
|
||||
LZ4F_blockLinked=0,
|
||||
LZ4F_blockIndependent
|
||||
LZ4F_OBSOLETE_ENUM(blockLinked)
|
||||
LZ4F_OBSOLETE_ENUM(blockIndependent)
|
||||
} LZ4F_blockMode_t;
|
||||
|
||||
typedef enum {
|
||||
LZ4F_noContentChecksum=0,
|
||||
LZ4F_contentChecksumEnabled
|
||||
LZ4F_OBSOLETE_ENUM(noContentChecksum)
|
||||
LZ4F_OBSOLETE_ENUM(contentChecksumEnabled)
|
||||
} LZ4F_contentChecksum_t;
|
||||
|
||||
typedef enum {
|
||||
LZ4F_noBlockChecksum=0,
|
||||
LZ4F_blockChecksumEnabled
|
||||
} LZ4F_blockChecksum_t;
|
||||
|
||||
typedef enum {
|
||||
LZ4F_frame=0,
|
||||
LZ4F_skippableFrame
|
||||
LZ4F_OBSOLETE_ENUM(skippableFrame)
|
||||
} LZ4F_frameType_t;
|
||||
|
||||
#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
|
||||
typedef LZ4F_blockSizeID_t blockSizeID_t;
|
||||
typedef LZ4F_blockMode_t blockMode_t;
|
||||
typedef LZ4F_frameType_t frameType_t;
|
||||
typedef LZ4F_contentChecksum_t contentChecksum_t;
|
||||
#endif
|
||||
|
||||
/*! LZ4F_frameInfo_t :
|
||||
* makes it possible to set or read frame parameters.
|
||||
* Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO,
|
||||
* setting all parameters to default.
|
||||
* It's then possible to update selectively some parameters */
|
||||
typedef struct {
|
||||
LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */
|
||||
LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */
|
||||
LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */
|
||||
LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */
|
||||
unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */
|
||||
unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */
|
||||
LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */
|
||||
} LZ4F_frameInfo_t;
|
||||
|
||||
#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
|
||||
|
||||
/*! LZ4F_preferences_t :
|
||||
* makes it possible to supply advanced compression instructions to streaming interface.
|
||||
* Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES,
|
||||
* setting all parameters to default.
|
||||
* All reserved fields must be set to zero. */
|
||||
typedef struct {
|
||||
LZ4F_frameInfo_t frameInfo;
|
||||
int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */
|
||||
unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */
|
||||
unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */
|
||||
unsigned reserved[3]; /* must be zero for forward compatibility */
|
||||
} LZ4F_preferences_t;
|
||||
|
||||
#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */
|
||||
|
||||
|
||||
/*-*********************************
|
||||
* Simple compression function
|
||||
***********************************/
|
||||
|
||||
LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
|
||||
|
||||
/*! LZ4F_compressFrameBound() :
|
||||
* Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences.
|
||||
* `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
|
||||
* Note : this result is only usable with LZ4F_compressFrame().
|
||||
* It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed.
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
/*! LZ4F_compressFrame() :
|
||||
* Compress srcBuffer content into an LZ4-compressed frame.
|
||||
* It's a one shot operation, all input content is consumed, and all output is generated.
|
||||
*
|
||||
* Note : it's a stateless operation (no LZ4F_cctx state needed).
|
||||
* In order to reduce load on the allocator, LZ4F_compressFrame(), by default,
|
||||
* uses the stack to allocate space for the compression state and some table.
|
||||
* If this usage of the stack is too much for your application,
|
||||
* consider compiling `lz4frame.c` with compile-time macro LZ4F_HEAPMODE set to 1 instead.
|
||||
* All state allocations will use the Heap.
|
||||
* It also means each invocation of LZ4F_compressFrame() will trigger several internal alloc/free invocations.
|
||||
*
|
||||
* @dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
|
||||
* @preferencesPtr is optional : one can provide NULL, in which case all preferences are set to default.
|
||||
* @return : number of bytes written into dstBuffer.
|
||||
* or an error code if it fails (can be tested using LZ4F_isError())
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
|
||||
const void* srcBuffer, size_t srcSize,
|
||||
const LZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
|
||||
/*-***********************************
|
||||
* Advanced compression functions
|
||||
*************************************/
|
||||
typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */
|
||||
typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using LZ4F_cctx */
|
||||
|
||||
typedef struct {
|
||||
unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
|
||||
unsigned reserved[3];
|
||||
} LZ4F_compressOptions_t;
|
||||
|
||||
/*--- Resource Management ---*/
|
||||
|
||||
#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */
|
||||
LZ4FLIB_API unsigned LZ4F_getVersion(void);
|
||||
|
||||
/*! LZ4F_createCompressionContext() :
|
||||
* The first thing to do is to create a compressionContext object,
|
||||
* which will keep track of operation state during streaming compression.
|
||||
* This is achieved using LZ4F_createCompressionContext(), which takes as argument a version,
|
||||
* and a pointer to LZ4F_cctx*, to write the resulting pointer into.
|
||||
* @version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
|
||||
* The function provides a pointer to a fully allocated LZ4F_cctx object.
|
||||
* @cctxPtr MUST be != NULL.
|
||||
* If @return != zero, context creation failed.
|
||||
* A created compression context can be employed multiple times for consecutive streaming operations.
|
||||
* Once all streaming compression jobs are completed,
|
||||
* the state object can be released using LZ4F_freeCompressionContext().
|
||||
* Note1 : LZ4F_freeCompressionContext() is always successful. Its return value can be ignored.
|
||||
* Note2 : LZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing).
|
||||
**/
|
||||
LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version);
|
||||
LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
|
||||
|
||||
|
||||
/*---- Compression ----*/
|
||||
|
||||
#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected parameters */
|
||||
#define LZ4F_HEADER_SIZE_MAX 19
|
||||
|
||||
/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */
|
||||
#define LZ4F_BLOCK_HEADER_SIZE 4
|
||||
|
||||
/* Size in bytes of a block checksum footer in little-endian format. */
|
||||
#define LZ4F_BLOCK_CHECKSUM_SIZE 4
|
||||
|
||||
/* Size in bytes of the content checksum. */
|
||||
#define LZ4F_CONTENT_CHECKSUM_SIZE 4
|
||||
|
||||
/*! LZ4F_compressBegin() :
|
||||
* will write the frame header into dstBuffer.
|
||||
* dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
* `prefsPtr` is optional : NULL can be provided to set all preferences to default.
|
||||
* @return : number of bytes written into dstBuffer for the header
|
||||
* or an error code (which can be tested using LZ4F_isError())
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const LZ4F_preferences_t* prefsPtr);
|
||||
|
||||
/*! LZ4F_compressBound() :
|
||||
* Provides minimum dstCapacity required to guarantee success of
|
||||
* LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario.
|
||||
* When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead.
|
||||
* Note that the result is only valid for a single invocation of LZ4F_compressUpdate().
|
||||
* When invoking LZ4F_compressUpdate() multiple times,
|
||||
* if the output buffer is gradually filled up instead of emptied and re-used from its start,
|
||||
* one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound().
|
||||
* @return is always the same for a srcSize and prefsPtr.
|
||||
* prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
|
||||
* tech details :
|
||||
* @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
|
||||
* It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
|
||||
* @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr);
|
||||
|
||||
/*! LZ4F_compressUpdate() :
|
||||
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
|
||||
* Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations.
|
||||
* This value is provided by LZ4F_compressBound().
|
||||
* If this condition is not respected, LZ4F_compress() will fail (result is an errorCode).
|
||||
* After an error, the state is left in a UB state, and must be re-initialized or freed.
|
||||
* If previously an uncompressed block was written, buffered data is flushed
|
||||
* before appending compressed data is continued.
|
||||
* `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
|
||||
* @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
|
||||
* or an error code if it fails (which can be tested using LZ4F_isError())
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* srcBuffer, size_t srcSize,
|
||||
const LZ4F_compressOptions_t* cOptPtr);
|
||||
|
||||
/*! LZ4F_flush() :
|
||||
* When data must be generated and sent immediately, without waiting for a block to be completely filled,
|
||||
* it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx.
|
||||
* `dstCapacity` must be large enough to ensure the operation will be successful.
|
||||
* `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default.
|
||||
* @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx)
|
||||
* or an error code if it fails (which can be tested using LZ4F_isError())
|
||||
* Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const LZ4F_compressOptions_t* cOptPtr);
|
||||
|
||||
/*! LZ4F_compressEnd() :
|
||||
* To properly finish an LZ4 frame, invoke LZ4F_compressEnd().
|
||||
* It will flush whatever data remained within `cctx` (like LZ4_flush())
|
||||
* and properly finalize the frame, with an endMark and a checksum.
|
||||
* `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default.
|
||||
* @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark),
|
||||
* or an error code if it fails (which can be tested using LZ4F_isError())
|
||||
* Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
|
||||
* A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task.
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const LZ4F_compressOptions_t* cOptPtr);
|
||||
|
||||
|
||||
/*-*********************************
|
||||
* Decompression functions
|
||||
***********************************/
|
||||
typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */
|
||||
typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */
|
||||
|
||||
typedef struct {
|
||||
unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations.
|
||||
* This optimization skips storage operations in tmp buffers. */
|
||||
unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time.
|
||||
* Setting this option to 1 once disables all checksums for the rest of the frame. */
|
||||
unsigned reserved1; /* must be set to zero for forward compatibility */
|
||||
unsigned reserved0; /* idem */
|
||||
} LZ4F_decompressOptions_t;
|
||||
|
||||
|
||||
/* Resource management */
|
||||
|
||||
/*! LZ4F_createDecompressionContext() :
|
||||
* Create an LZ4F_dctx object, to track all decompression operations.
|
||||
* @version provided MUST be LZ4F_VERSION.
|
||||
* @dctxPtr MUST be valid.
|
||||
* The function fills @dctxPtr with the value of a pointer to an allocated and initialized LZ4F_dctx object.
|
||||
* The @return is an errorCode, which can be tested using LZ4F_isError().
|
||||
* dctx memory can be released using LZ4F_freeDecompressionContext();
|
||||
* Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released.
|
||||
* That is, it should be == 0 if decompression has been completed fully and correctly.
|
||||
*/
|
||||
LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version);
|
||||
LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
|
||||
|
||||
|
||||
/*-***********************************
|
||||
* Streaming decompression functions
|
||||
*************************************/
|
||||
|
||||
#define LZ4F_MAGICNUMBER 0x184D2204U
|
||||
#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
|
||||
#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5
|
||||
|
||||
/*! LZ4F_headerSize() : v1.9.0+
|
||||
* Provide the header size of a frame starting at `src`.
|
||||
* `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH,
|
||||
* which is enough to decode the header length.
|
||||
* @return : size of frame header
|
||||
* or an error code, which can be tested using LZ4F_isError()
|
||||
* note : Frame header size is variable, but is guaranteed to be
|
||||
* >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
*/
|
||||
LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
|
||||
|
||||
/*! LZ4F_getFrameInfo() :
|
||||
* This function extracts frame parameters (max blockSize, dictID, etc.).
|
||||
* Its usage is optional: user can also invoke LZ4F_decompress() directly.
|
||||
*
|
||||
* Extracted information will fill an existing LZ4F_frameInfo_t structure.
|
||||
* This can be useful for allocation and dictionary identification purposes.
|
||||
*
|
||||
* LZ4F_getFrameInfo() can work in the following situations :
|
||||
*
|
||||
* 1) At the beginning of a new frame, before any invocation of LZ4F_decompress().
|
||||
* It will decode header from `srcBuffer`,
|
||||
* consuming the header and starting the decoding process.
|
||||
*
|
||||
* Input size must be large enough to contain the full frame header.
|
||||
* Frame header size can be known beforehand by LZ4F_headerSize().
|
||||
* Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes,
|
||||
* and not more than <= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
* Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work.
|
||||
* It's allowed to provide more input data than the header size,
|
||||
* LZ4F_getFrameInfo() will only consume the header.
|
||||
*
|
||||
* If input size is not large enough,
|
||||
* aka if it's smaller than header size,
|
||||
* function will fail and return an error code.
|
||||
*
|
||||
* 2) After decoding has been started,
|
||||
* it's possible to invoke LZ4F_getFrameInfo() anytime
|
||||
* to extract already decoded frame parameters stored within dctx.
|
||||
*
|
||||
* Note that, if decoding has barely started,
|
||||
* and not yet read enough information to decode the header,
|
||||
* LZ4F_getFrameInfo() will fail.
|
||||
*
|
||||
* The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value).
|
||||
* LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started,
|
||||
* and when decoding the header has been successful.
|
||||
* Decompression must then resume from (srcBuffer + *srcSizePtr).
|
||||
*
|
||||
* @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call,
|
||||
* or an error code which can be tested using LZ4F_isError().
|
||||
* note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely.
|
||||
* note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
|
||||
*/
|
||||
LZ4FLIB_API size_t
|
||||
LZ4F_getFrameInfo(LZ4F_dctx* dctx,
|
||||
LZ4F_frameInfo_t* frameInfoPtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr);
|
||||
|
||||
/*! LZ4F_decompress() :
|
||||
* Call this function repetitively to regenerate data compressed in `srcBuffer`.
|
||||
*
|
||||
* The function requires a valid dctx state.
|
||||
* It will read up to *srcSizePtr bytes from srcBuffer,
|
||||
* and decompress data into dstBuffer, of capacity *dstSizePtr.
|
||||
*
|
||||
* The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value).
|
||||
* The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value).
|
||||
*
|
||||
* The function does not necessarily read all input bytes, so always check value in *srcSizePtr.
|
||||
* Unconsumed source data must be presented again in subsequent invocations.
|
||||
*
|
||||
* `dstBuffer` can freely change between each consecutive function invocation.
|
||||
* `dstBuffer` content will be overwritten.
|
||||
*
|
||||
* Note: if `LZ4F_getFrameInfo()` is called before `LZ4F_decompress()`, srcBuffer must be updated to reflect
|
||||
* the number of bytes consumed after reading the frame header. Failure to update srcBuffer before calling
|
||||
* `LZ4F_decompress()` will cause decompression failure or, even worse, successful but incorrect decompression.
|
||||
* See the `LZ4F_getFrameInfo()` docs for details.
|
||||
*
|
||||
* @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call.
|
||||
* Schematically, it's the size of the current (or remaining) compressed block + header of next block.
|
||||
* Respecting the hint provides some small speed benefit, because it skips intermediate buffers.
|
||||
* This is just a hint though, it's always possible to provide any srcSize.
|
||||
*
|
||||
* When a frame is fully decoded, @return will be 0 (no more data expected).
|
||||
* When provided with more bytes than necessary to decode a frame,
|
||||
* LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0.
|
||||
*
|
||||
* If decompression failed, @return is an error code, which can be tested using LZ4F_isError().
|
||||
* After a decompression error, the `dctx` context is not resumable.
|
||||
* Use LZ4F_resetDecompressionContext() to return to clean state.
|
||||
*
|
||||
* After a frame is fully decoded, dctx can be used again to decompress another frame.
|
||||
*/
|
||||
LZ4FLIB_API size_t
|
||||
LZ4F_decompress(LZ4F_dctx* dctx,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const LZ4F_decompressOptions_t* dOptPtr);
|
||||
|
||||
|
||||
/*! LZ4F_resetDecompressionContext() : added in v1.8.0
|
||||
* In case of an error, the context is left in "undefined" state.
|
||||
* In which case, it's necessary to reset it, before re-using it.
|
||||
* This method can also be used to abruptly stop any unfinished decompression,
|
||||
* and start a new one using same context resources. */
|
||||
LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */
|
||||
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LZ4F_H_09782039843 */
|
||||
|
||||
#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843)
|
||||
#define LZ4F_H_STATIC_09782039843
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* These declarations are not stable and may change in the future.
|
||||
* They are therefore only safe to depend on
|
||||
* when the caller is statically linked against the library.
|
||||
* To access their declarations, define LZ4F_STATIC_LINKING_ONLY.
|
||||
*
|
||||
* By default, these symbols aren't published into shared/dynamic libraries.
|
||||
* You can override this behavior and force them to be published
|
||||
* by defining LZ4F_PUBLISH_STATIC_FUNCTIONS.
|
||||
* Use at your own risk.
|
||||
*/
|
||||
#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
|
||||
# define LZ4FLIB_STATIC_API LZ4FLIB_API
|
||||
#else
|
||||
# define LZ4FLIB_STATIC_API
|
||||
#endif
|
||||
|
||||
|
||||
/* --- Error List --- */
|
||||
#define LZ4F_LIST_ERRORS(ITEM) \
|
||||
ITEM(OK_NoError) \
|
||||
ITEM(ERROR_GENERIC) \
|
||||
ITEM(ERROR_maxBlockSize_invalid) \
|
||||
ITEM(ERROR_blockMode_invalid) \
|
||||
ITEM(ERROR_contentChecksumFlag_invalid) \
|
||||
ITEM(ERROR_compressionLevel_invalid) \
|
||||
ITEM(ERROR_headerVersion_wrong) \
|
||||
ITEM(ERROR_blockChecksum_invalid) \
|
||||
ITEM(ERROR_reservedFlag_set) \
|
||||
ITEM(ERROR_allocation_failed) \
|
||||
ITEM(ERROR_srcSize_tooLarge) \
|
||||
ITEM(ERROR_dstMaxSize_tooSmall) \
|
||||
ITEM(ERROR_frameHeader_incomplete) \
|
||||
ITEM(ERROR_frameType_unknown) \
|
||||
ITEM(ERROR_frameSize_wrong) \
|
||||
ITEM(ERROR_srcPtr_wrong) \
|
||||
ITEM(ERROR_decompressionFailed) \
|
||||
ITEM(ERROR_headerChecksum_invalid) \
|
||||
ITEM(ERROR_contentChecksum_invalid) \
|
||||
ITEM(ERROR_frameDecoding_alreadyStarted) \
|
||||
ITEM(ERROR_compressionState_uninitialized) \
|
||||
ITEM(ERROR_parameter_null) \
|
||||
ITEM(ERROR_maxCode)
|
||||
|
||||
#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
|
||||
|
||||
/* enum list is exposed, to handle specific errors */
|
||||
typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM)
|
||||
_LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes;
|
||||
|
||||
LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
|
||||
|
||||
|
||||
/*! LZ4F_getBlockSize() :
|
||||
* Return, in scalar format (size_t),
|
||||
* the maximum block size associated with blockSizeID.
|
||||
**/
|
||||
LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID);
|
||||
|
||||
/*! LZ4F_uncompressedUpdate() :
|
||||
* LZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary.
|
||||
* Important rule: dstCapacity MUST be large enough to store the entire source buffer as
|
||||
* no compression is done for this operation
|
||||
* If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode).
|
||||
* After an error, the state is left in a UB state, and must be re-initialized or freed.
|
||||
* If previously a compressed block was written, buffered data is flushed
|
||||
* before appending uncompressed data is continued.
|
||||
* This is only supported when LZ4F_blockIndependent is used
|
||||
* `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
|
||||
* @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
|
||||
* or an error code if it fails (which can be tested using LZ4F_isError())
|
||||
*/
|
||||
LZ4FLIB_STATIC_API size_t
|
||||
LZ4F_uncompressedUpdate(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* srcBuffer, size_t srcSize,
|
||||
const LZ4F_compressOptions_t* cOptPtr);
|
||||
|
||||
/**********************************
|
||||
* Bulk processing dictionary API
|
||||
*********************************/
|
||||
|
||||
/* A Dictionary is useful for the compression of small messages (KB range).
|
||||
* It dramatically improves compression efficiency.
|
||||
*
|
||||
* LZ4 can ingest any input as dictionary, though only the last 64 KB are useful.
|
||||
* Best results are generally achieved by using Zstandard's Dictionary Builder
|
||||
* to generate a high-quality dictionary from a set of samples.
|
||||
*
|
||||
* Loading a dictionary has a cost, since it involves construction of tables.
|
||||
* The Bulk processing dictionary API makes it possible to share this cost
|
||||
* over an arbitrary number of compression jobs, even concurrently,
|
||||
* markedly improving compression latency for these cases.
|
||||
*
|
||||
* The same dictionary will have to be used on the decompression side
|
||||
* for decoding to be successful.
|
||||
* To help identify the correct dictionary at decoding stage,
|
||||
* the frame header allows optional embedding of a dictID field.
|
||||
*/
|
||||
typedef struct LZ4F_CDict_s LZ4F_CDict;
|
||||
|
||||
/*! LZ4_createCDict() :
|
||||
* When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once.
|
||||
* LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
|
||||
* LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
|
||||
* `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */
|
||||
LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
|
||||
LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
|
||||
|
||||
|
||||
/*! LZ4_compressFrame_usingCDict() :
|
||||
* Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
|
||||
* cctx must point to a context created by LZ4F_createCompressionContext().
|
||||
* If cdict==NULL, compress without a dictionary.
|
||||
* dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
|
||||
* If this condition is not respected, function will fail (@return an errorCode).
|
||||
* The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
|
||||
* but it's not recommended, as it's the only way to provide dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer.
|
||||
* or an error code if it fails (can be tested using LZ4F_isError()) */
|
||||
LZ4FLIB_STATIC_API size_t
|
||||
LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const LZ4F_CDict* cdict,
|
||||
const LZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
|
||||
/*! LZ4F_compressBegin_usingCDict() :
|
||||
* Inits streaming dictionary compression, and writes the frame header into dstBuffer.
|
||||
* dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
* `prefsPtr` is optional : you may provide NULL as argument,
|
||||
* however, it's the only way to provide dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer for the header,
|
||||
* or an error code (which can be tested using LZ4F_isError()) */
|
||||
LZ4FLIB_STATIC_API size_t
|
||||
LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const LZ4F_CDict* cdict,
|
||||
const LZ4F_preferences_t* prefsPtr);
|
||||
|
||||
|
||||
/*! LZ4F_decompress_usingDict() :
|
||||
* Same as LZ4F_decompress(), using a predefined dictionary.
|
||||
* Dictionary is used "in place", without any preprocessing.
|
||||
** It must remain accessible throughout the entire frame decoding. */
|
||||
LZ4FLIB_STATIC_API size_t
|
||||
LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const void* dict, size_t dictSize,
|
||||
const LZ4F_decompressOptions_t* decompressOptionsPtr);
|
||||
|
||||
|
||||
/*! Custom memory allocation : v1.9.4+
|
||||
* These prototypes make it possible to pass custom allocation/free functions.
|
||||
* LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below.
|
||||
* All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
|
||||
*/
|
||||
typedef void* (*LZ4F_AllocFunction) (void* opaqueState, size_t size);
|
||||
typedef void* (*LZ4F_CallocFunction) (void* opaqueState, size_t size);
|
||||
typedef void (*LZ4F_FreeFunction) (void* opaqueState, void* address);
|
||||
typedef struct {
|
||||
LZ4F_AllocFunction customAlloc;
|
||||
LZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */
|
||||
LZ4F_FreeFunction customFree;
|
||||
void* opaqueState;
|
||||
} LZ4F_CustomMem;
|
||||
static
|
||||
#ifdef __GNUC__
|
||||
__attribute__((__unused__))
|
||||
#endif
|
||||
LZ4F_CustomMem const LZ4F_defaultCMem = { NULL, NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
|
||||
|
||||
LZ4FLIB_STATIC_API LZ4F_cctx* LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version);
|
||||
LZ4FLIB_STATIC_API LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version);
|
||||
LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict_advanced(LZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
LZ4 auto-framing library
|
||||
Header File for static linking only
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- LZ4 source repository : https://github.com/lz4/lz4
|
||||
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*/
|
||||
|
||||
#ifndef LZ4FRAME_STATIC_H_0398209384
|
||||
#define LZ4FRAME_STATIC_H_0398209384
|
||||
|
||||
/* The declarations that formerly were made here have been merged into
|
||||
* lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward,
|
||||
* it is recommended to simply include that header directly.
|
||||
*/
|
||||
|
||||
#define LZ4F_STATIC_LINKING_ONLY
|
||||
#include "lz4frame.h"
|
||||
|
||||
#endif /* LZ4FRAME_STATIC_H_0398209384 */
|
File diff suppressed because it is too large
Load Diff
@ -1,413 +0,0 @@
|
||||
/*
|
||||
LZ4 HC - High Compression Mode of LZ4
|
||||
Header File
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- LZ4 source repository : https://github.com/lz4/lz4
|
||||
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*/
|
||||
#ifndef LZ4_HC_H_19834876238432
|
||||
#define LZ4_HC_H_19834876238432
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* --- Dependency --- */
|
||||
/* note : lz4hc requires lz4.h/lz4.c for compilation */
|
||||
#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
|
||||
|
||||
|
||||
/* --- Useful constants --- */
|
||||
#define LZ4HC_CLEVEL_MIN 3
|
||||
#define LZ4HC_CLEVEL_DEFAULT 9
|
||||
#define LZ4HC_CLEVEL_OPT_MIN 10
|
||||
#define LZ4HC_CLEVEL_MAX 12
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Block Compression
|
||||
**************************************/
|
||||
/*! LZ4_compress_HC() :
|
||||
* Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
|
||||
* `dst` must be already allocated.
|
||||
* Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
|
||||
* Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
|
||||
* `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work.
|
||||
* Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX.
|
||||
* @return : the number of bytes written into 'dst'
|
||||
* or 0 if compression fails.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
|
||||
|
||||
|
||||
/* Note :
|
||||
* Decompression functions are provided within "lz4.h" (BSD license)
|
||||
*/
|
||||
|
||||
|
||||
/*! LZ4_compress_HC_extStateHC() :
|
||||
* Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
|
||||
* `state` size is provided by LZ4_sizeofStateHC().
|
||||
* Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
|
||||
*/
|
||||
LZ4LIB_API int LZ4_sizeofStateHC(void);
|
||||
LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel);
|
||||
|
||||
|
||||
/*! LZ4_compress_HC_destSize() : v1.9.0+
|
||||
* Will compress as much data as possible from `src`
|
||||
* to fit into `targetDstSize` budget.
|
||||
* Result is provided in 2 parts :
|
||||
* @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
|
||||
* or 0 if compression fails.
|
||||
* `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src`
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC,
|
||||
const char* src, char* dst,
|
||||
int* srcSizePtr, int targetDstSize,
|
||||
int compressionLevel);
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Streaming Compression
|
||||
* Bufferless synchronous API
|
||||
**************************************/
|
||||
typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */
|
||||
|
||||
/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
|
||||
* These functions create and release memory for LZ4 HC streaming state.
|
||||
* Newly created states are automatically initialized.
|
||||
* A same state can be used multiple times consecutively,
|
||||
* starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
|
||||
*/
|
||||
LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
|
||||
LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
|
||||
|
||||
/*
|
||||
These functions compress data in successive blocks of any size,
|
||||
using previous blocks as dictionary, to improve compression ratio.
|
||||
One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
|
||||
There is an exception for ring buffers, which can be smaller than 64 KB.
|
||||
Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue().
|
||||
|
||||
Before starting compression, state must be allocated and properly initialized.
|
||||
LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT.
|
||||
|
||||
Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream)
|
||||
or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental).
|
||||
LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once,
|
||||
which is automatically the case when state is created using LZ4_createStreamHC().
|
||||
|
||||
After reset, a first "fictional block" can be designated as initial dictionary,
|
||||
using LZ4_loadDictHC() (Optional).
|
||||
|
||||
Invoke LZ4_compress_HC_continue() to compress each successive block.
|
||||
The number of blocks is unlimited.
|
||||
Previous input blocks, including initial dictionary when present,
|
||||
must remain accessible and unmodified during compression.
|
||||
|
||||
It's allowed to update compression level anytime between blocks,
|
||||
using LZ4_setCompressionLevel() (experimental).
|
||||
|
||||
'dst' buffer should be sized to handle worst case scenarios
|
||||
(see LZ4_compressBound(), it ensures compression success).
|
||||
In case of failure, the API does not guarantee recovery,
|
||||
so the state _must_ be reset.
|
||||
To ensure compression success
|
||||
whenever `dst` buffer size cannot be made >= LZ4_compressBound(),
|
||||
consider using LZ4_compress_HC_continue_destSize().
|
||||
|
||||
Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
|
||||
it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC().
|
||||
Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB)
|
||||
|
||||
After completing a streaming compression,
|
||||
it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state,
|
||||
just by resetting it, using LZ4_resetStreamHC_fast().
|
||||
*/
|
||||
|
||||
LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */
|
||||
LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize);
|
||||
|
||||
LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr,
|
||||
const char* src, char* dst,
|
||||
int srcSize, int maxDstSize);
|
||||
|
||||
/*! LZ4_compress_HC_continue_destSize() : v1.9.0+
|
||||
* Similar to LZ4_compress_HC_continue(),
|
||||
* but will read as much data as possible from `src`
|
||||
* to fit into `targetDstSize` budget.
|
||||
* Result is provided into 2 parts :
|
||||
* @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
|
||||
* or 0 if compression fails.
|
||||
* `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`.
|
||||
* Note that this function may not consume the entire input.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
|
||||
const char* src, char* dst,
|
||||
int* srcSizePtr, int targetDstSize);
|
||||
|
||||
LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
|
||||
|
||||
|
||||
|
||||
/*^**********************************************
|
||||
* !!!!!! STATIC LINKING ONLY !!!!!!
|
||||
***********************************************/
|
||||
|
||||
/*-******************************************************************
|
||||
* PRIVATE DEFINITIONS :
|
||||
* Do not use these definitions directly.
|
||||
* They are merely exposed to allow static allocation of `LZ4_streamHC_t`.
|
||||
* Declare an `LZ4_streamHC_t` directly, rather than any type below.
|
||||
* Even then, only do so in the context of static linking, as definitions may change between versions.
|
||||
********************************************************************/
|
||||
|
||||
#define LZ4HC_DICTIONARY_LOGSIZE 16
|
||||
#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
|
||||
#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
|
||||
|
||||
#define LZ4HC_HASH_LOG 15
|
||||
#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
|
||||
#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
|
||||
|
||||
|
||||
/* Never ever use these definitions directly !
|
||||
* Declare or allocate an LZ4_streamHC_t instead.
|
||||
**/
|
||||
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
|
||||
struct LZ4HC_CCtx_internal
|
||||
{
|
||||
LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
|
||||
LZ4_u16 chainTable[LZ4HC_MAXD];
|
||||
const LZ4_byte* end; /* next block here to continue on current prefix */
|
||||
const LZ4_byte* prefixStart; /* Indexes relative to this position */
|
||||
const LZ4_byte* dictStart; /* alternate reference for extDict */
|
||||
LZ4_u32 dictLimit; /* below that point, need extDict */
|
||||
LZ4_u32 lowLimit; /* below that point, no more dict */
|
||||
LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
|
||||
const LZ4HC_CCtx_internal* dictCtx;
|
||||
};
|
||||
|
||||
#define LZ4_STREAMHC_MINSIZE 262200 /* static size, for inter-version compatibility */
|
||||
union LZ4_streamHC_u {
|
||||
char minStateSize[LZ4_STREAMHC_MINSIZE];
|
||||
LZ4HC_CCtx_internal internal_donotuse;
|
||||
}; /* previously typedef'd to LZ4_streamHC_t */
|
||||
|
||||
/* LZ4_streamHC_t :
|
||||
* This structure allows static allocation of LZ4 HC streaming state.
|
||||
* This can be used to allocate statically on stack, or as part of a larger structure.
|
||||
*
|
||||
* Such state **must** be initialized using LZ4_initStreamHC() before first use.
|
||||
*
|
||||
* Note that invoking LZ4_initStreamHC() is not required when
|
||||
* the state was created using LZ4_createStreamHC() (which is recommended).
|
||||
* Using the normal builder, a newly created state is automatically initialized.
|
||||
*
|
||||
* Static allocation shall only be used in combination with static linking.
|
||||
*/
|
||||
|
||||
/* LZ4_initStreamHC() : v1.9.0+
|
||||
* Required before first use of a statically allocated LZ4_streamHC_t.
|
||||
* Before v1.9.0 : use LZ4_resetStreamHC() instead
|
||||
*/
|
||||
LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC(void* buffer, size_t size);
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Deprecated Functions
|
||||
**************************************/
|
||||
/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
|
||||
|
||||
/* deprecated compression functions */
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
|
||||
|
||||
/* Obsolete streaming functions; degraded functionality; do not use!
|
||||
*
|
||||
* In order to perform streaming compression, these functions depended on data
|
||||
* that is no longer tracked in the state. They have been preserved as well as
|
||||
* possible: using them will still produce a correct output. However, use of
|
||||
* LZ4_slideInputBufferHC() will truncate the history of the stream, rather
|
||||
* than preserve a window-sized chunk of history.
|
||||
*/
|
||||
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer);
|
||||
LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data);
|
||||
#endif
|
||||
LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
|
||||
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
|
||||
LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
|
||||
LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer);
|
||||
|
||||
|
||||
/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC().
|
||||
* The intention is to emphasize the difference with LZ4_resetStreamHC_fast(),
|
||||
* which is now the recommended function to start a new stream of blocks,
|
||||
* but cannot be used to initialize a memory segment containing arbitrary garbage data.
|
||||
*
|
||||
* It is recommended to switch to LZ4_initStreamHC().
|
||||
* LZ4_resetStreamHC() will generate deprecation warnings in a future version.
|
||||
*/
|
||||
LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LZ4_HC_H_19834876238432 */
|
||||
|
||||
|
||||
/*-**************************************************
|
||||
* !!!!! STATIC LINKING ONLY !!!!!
|
||||
* Following definitions are considered experimental.
|
||||
* They should not be linked from DLL,
|
||||
* as there is no guarantee of API stability yet.
|
||||
* Prototypes will be promoted to "stable" status
|
||||
* after successful usage in real-life scenarios.
|
||||
***************************************************/
|
||||
#ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */
|
||||
#ifndef LZ4_HC_SLO_098092834
|
||||
#define LZ4_HC_SLO_098092834
|
||||
|
||||
#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
|
||||
#include "lz4.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental)
|
||||
* It's possible to change compression level
|
||||
* between successive invocations of LZ4_compress_HC_continue*()
|
||||
* for dynamic adaptation.
|
||||
*/
|
||||
LZ4LIB_STATIC_API void LZ4_setCompressionLevel(
|
||||
LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
|
||||
|
||||
/*! LZ4_favorDecompressionSpeed() : v1.8.2+ (experimental)
|
||||
* Opt. Parser will favor decompression speed over compression ratio.
|
||||
* Only applicable to levels >= LZ4HC_CLEVEL_OPT_MIN.
|
||||
*/
|
||||
LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed(
|
||||
LZ4_streamHC_t* LZ4_streamHCPtr, int favor);
|
||||
|
||||
/*! LZ4_resetStreamHC_fast() : v1.9.0+
|
||||
* When an LZ4_streamHC_t is known to be in a internally coherent state,
|
||||
* it can often be prepared for a new compression with almost no work, only
|
||||
* sometimes falling back to the full, expensive reset that is always required
|
||||
* when the stream is in an indeterminate state (i.e., the reset performed by
|
||||
* LZ4_resetStreamHC()).
|
||||
*
|
||||
* LZ4_streamHCs are guaranteed to be in a valid state when:
|
||||
* - returned from LZ4_createStreamHC()
|
||||
* - reset by LZ4_resetStreamHC()
|
||||
* - memset(stream, 0, sizeof(LZ4_streamHC_t))
|
||||
* - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast()
|
||||
* - the stream was in a valid state and was then used in any compression call
|
||||
* that returned success
|
||||
* - the stream was in an indeterminate state and was used in a compression
|
||||
* call that fully reset the state (LZ4_compress_HC_extStateHC()) and that
|
||||
* returned success
|
||||
*
|
||||
* Note:
|
||||
* A stream that was last used in a compression call that returned an error
|
||||
* may be passed to this function. However, it will be fully reset, which will
|
||||
* clear any existing history and settings from the context.
|
||||
*/
|
||||
LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast(
|
||||
LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
|
||||
|
||||
/*! LZ4_compress_HC_extStateHC_fastReset() :
|
||||
* A variant of LZ4_compress_HC_extStateHC().
|
||||
*
|
||||
* Using this variant avoids an expensive initialization step. It is only safe
|
||||
* to call if the state buffer is known to be correctly initialized already
|
||||
* (see above comment on LZ4_resetStreamHC_fast() for a definition of
|
||||
* "correctly initialized"). From a high level, the difference is that this
|
||||
* function initializes the provided state with a call to
|
||||
* LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a
|
||||
* call to LZ4_resetStreamHC().
|
||||
*/
|
||||
LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset (
|
||||
void* state,
|
||||
const char* src, char* dst,
|
||||
int srcSize, int dstCapacity,
|
||||
int compressionLevel);
|
||||
|
||||
/*! LZ4_attach_HC_dictionary() :
|
||||
* This is an experimental API that allows for the efficient use of a
|
||||
* static dictionary many times.
|
||||
*
|
||||
* Rather than re-loading the dictionary buffer into a working context before
|
||||
* each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
|
||||
* working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
|
||||
* in which the working stream references the dictionary stream in-place.
|
||||
*
|
||||
* Several assumptions are made about the state of the dictionary stream.
|
||||
* Currently, only streams which have been prepared by LZ4_loadDictHC() should
|
||||
* be expected to work.
|
||||
*
|
||||
* Alternatively, the provided dictionary stream pointer may be NULL, in which
|
||||
* case any existing dictionary stream is unset.
|
||||
*
|
||||
* A dictionary should only be attached to a stream without any history (i.e.,
|
||||
* a stream that has just been reset).
|
||||
*
|
||||
* The dictionary will remain attached to the working stream only for the
|
||||
* current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
|
||||
* dictionary context association from the working stream. The dictionary
|
||||
* stream (and source buffer) must remain in-place / accessible / unchanged
|
||||
* through the lifetime of the stream session.
|
||||
*/
|
||||
LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
|
||||
LZ4_streamHC_t *working_stream,
|
||||
const LZ4_streamHC_t *dictionary_stream);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LZ4_HC_SLO_098092834 */
|
||||
#endif /* LZ4_HC_STATIC_LINKING_ONLY */
|
File diff suppressed because it is too large
Load Diff
@ -1,328 +0,0 @@
|
||||
/*
|
||||
xxHash - Extremely Fast Hash algorithm
|
||||
Header File
|
||||
Copyright (C) 2012-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
/* Notice extracted from xxHash homepage :
|
||||
|
||||
xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
|
||||
It also successfully passes all tests from the SMHasher suite.
|
||||
|
||||
Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
|
||||
|
||||
Name Speed Q.Score Author
|
||||
xxHash 5.4 GB/s 10
|
||||
CrapWow 3.2 GB/s 2 Andrew
|
||||
MumurHash 3a 2.7 GB/s 10 Austin Appleby
|
||||
SpookyHash 2.0 GB/s 10 Bob Jenkins
|
||||
SBox 1.4 GB/s 9 Bret Mulvey
|
||||
Lookup3 1.2 GB/s 9 Bob Jenkins
|
||||
SuperFastHash 1.2 GB/s 1 Paul Hsieh
|
||||
CityHash64 1.05 GB/s 10 Pike & Alakuijala
|
||||
FNV 0.55 GB/s 5 Fowler, Noll, Vo
|
||||
CRC32 0.43 GB/s 9
|
||||
MD5-32 0.33 GB/s 10 Ronald L. Rivest
|
||||
SHA1-32 0.28 GB/s 10
|
||||
|
||||
Q.Score is a measure of quality of the hash function.
|
||||
It depends on successfully passing SMHasher test set.
|
||||
10 is a perfect score.
|
||||
|
||||
A 64-bit version, named XXH64, is available since r35.
|
||||
It offers much better speed, but for 64-bit applications only.
|
||||
Name Speed on 64 bits Speed on 32 bits
|
||||
XXH64 13.8 GB/s 1.9 GB/s
|
||||
XXH32 6.8 GB/s 6.0 GB/s
|
||||
*/
|
||||
|
||||
#ifndef XXHASH_H_5627135585666179
|
||||
#define XXHASH_H_5627135585666179 1
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************
|
||||
* Definitions
|
||||
******************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
|
||||
|
||||
|
||||
/* ****************************
|
||||
* API modifier
|
||||
******************************/
|
||||
/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
|
||||
* This is useful to include xxhash functions in `static` mode
|
||||
* in order to inline them, and remove their symbol from the public list.
|
||||
* Inlining can offer dramatic performance improvement on small keys.
|
||||
* Methodology :
|
||||
* #define XXH_INLINE_ALL
|
||||
* #include "xxhash.h"
|
||||
* `xxhash.c` is automatically included.
|
||||
* It's not useful to compile and link it as a separate module.
|
||||
*/
|
||||
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
|
||||
# ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY
|
||||
# endif
|
||||
# if defined(__GNUC__)
|
||||
# define XXH_PUBLIC_API static __inline __attribute__((unused))
|
||||
# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define XXH_PUBLIC_API static inline
|
||||
# elif defined(_MSC_VER)
|
||||
# define XXH_PUBLIC_API static __inline
|
||||
# else
|
||||
/* this version may generate warnings for unused static functions */
|
||||
# define XXH_PUBLIC_API static
|
||||
# endif
|
||||
#else
|
||||
# define XXH_PUBLIC_API /* do nothing */
|
||||
#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
|
||||
|
||||
/*! XXH_NAMESPACE, aka Namespace Emulation :
|
||||
*
|
||||
* If you want to include _and expose_ xxHash functions from within your own library,
|
||||
* but also want to avoid symbol collisions with other libraries which may also include xxHash,
|
||||
*
|
||||
* you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
|
||||
* with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
|
||||
*
|
||||
* Note that no change is required within the calling program as long as it includes `xxhash.h` :
|
||||
* regular symbol name will be automatically translated by this header.
|
||||
*/
|
||||
#ifdef XXH_NAMESPACE
|
||||
# define XXH_CAT(A,B) A##B
|
||||
# define XXH_NAME2(A,B) XXH_CAT(A,B)
|
||||
# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
|
||||
# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
|
||||
# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
|
||||
# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
|
||||
# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
|
||||
# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
|
||||
# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
|
||||
# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
|
||||
# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
|
||||
# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
|
||||
# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
|
||||
# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
|
||||
# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
|
||||
# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
|
||||
# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
|
||||
# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
|
||||
# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
|
||||
# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
|
||||
# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Version
|
||||
***************************************/
|
||||
#define XXH_VERSION_MAJOR 0
|
||||
#define XXH_VERSION_MINOR 6
|
||||
#define XXH_VERSION_RELEASE 5
|
||||
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
|
||||
XXH_PUBLIC_API unsigned XXH_versionNumber (void);
|
||||
|
||||
|
||||
/*-**********************************************************************
|
||||
* 32-bit hash
|
||||
************************************************************************/
|
||||
typedef unsigned int XXH32_hash_t;
|
||||
|
||||
/*! XXH32() :
|
||||
Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
|
||||
The memory between input & input+length must be valid (allocated and read-accessible).
|
||||
"seed" can be used to alter the result predictably.
|
||||
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
|
||||
|
||||
/*====== Streaming ======*/
|
||||
typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
|
||||
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
|
||||
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
|
||||
|
||||
/*
|
||||
* Streaming functions generate the xxHash of an input provided in multiple segments.
|
||||
* Note that, for small input, they are slower than single-call functions, due to state management.
|
||||
* For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
|
||||
*
|
||||
* XXH state must first be allocated, using XXH*_createState() .
|
||||
*
|
||||
* Start a new hash by initializing state with a seed, using XXH*_reset().
|
||||
*
|
||||
* Then, feed the hash state by calling XXH*_update() as many times as necessary.
|
||||
* The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
|
||||
*
|
||||
* Finally, a hash value can be produced anytime, by using XXH*_digest().
|
||||
* This function returns the nn-bits hash as an int or long long.
|
||||
*
|
||||
* It's still possible to continue inserting input into the hash state after a digest,
|
||||
* and generate some new hashes later on, by calling again XXH*_digest().
|
||||
*
|
||||
* When done, free XXH state space if it was allocated dynamically.
|
||||
*/
|
||||
|
||||
/*====== Canonical representation ======*/
|
||||
|
||||
typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
|
||||
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
|
||||
|
||||
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
|
||||
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
|
||||
* These functions allow transformation of hash result into and from its canonical format.
|
||||
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef XXH_NO_LONG_LONG
|
||||
/*-**********************************************************************
|
||||
* 64-bit hash
|
||||
************************************************************************/
|
||||
typedef unsigned long long XXH64_hash_t;
|
||||
|
||||
/*! XXH64() :
|
||||
Calculate the 64-bit hash of sequence of length "len" stored at memory address "input".
|
||||
"seed" can be used to alter the result predictably.
|
||||
This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
|
||||
*/
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
|
||||
|
||||
/*====== Streaming ======*/
|
||||
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
|
||||
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
|
||||
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
|
||||
|
||||
/*====== Canonical representation ======*/
|
||||
typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
|
||||
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
|
||||
#endif /* XXH_NO_LONG_LONG */
|
||||
|
||||
|
||||
|
||||
#ifdef XXH_STATIC_LINKING_ONLY
|
||||
|
||||
/* ================================================================================================
|
||||
This section contains declarations which are not guaranteed to remain stable.
|
||||
They may change in future versions, becoming incompatible with a different version of the library.
|
||||
These declarations should only be used with static linking.
|
||||
Never use them in association with dynamic linking !
|
||||
=================================================================================================== */
|
||||
|
||||
/* These definitions are only present to allow
|
||||
* static allocation of XXH state, on stack or in a struct for example.
|
||||
* Never **ever** use members directly. */
|
||||
|
||||
#if !defined (__VMS) \
|
||||
&& (defined (__cplusplus) \
|
||||
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
|
||||
struct XXH32_state_s {
|
||||
uint32_t total_len_32;
|
||||
uint32_t large_len;
|
||||
uint32_t v1;
|
||||
uint32_t v2;
|
||||
uint32_t v3;
|
||||
uint32_t v4;
|
||||
uint32_t mem32[4];
|
||||
uint32_t memsize;
|
||||
uint32_t reserved; /* never read nor write, might be removed in a future version */
|
||||
}; /* typedef'd to XXH32_state_t */
|
||||
|
||||
struct XXH64_state_s {
|
||||
uint64_t total_len;
|
||||
uint64_t v1;
|
||||
uint64_t v2;
|
||||
uint64_t v3;
|
||||
uint64_t v4;
|
||||
uint64_t mem64[4];
|
||||
uint32_t memsize;
|
||||
uint32_t reserved[2]; /* never read nor write, might be removed in a future version */
|
||||
}; /* typedef'd to XXH64_state_t */
|
||||
|
||||
# else
|
||||
|
||||
struct XXH32_state_s {
|
||||
unsigned total_len_32;
|
||||
unsigned large_len;
|
||||
unsigned v1;
|
||||
unsigned v2;
|
||||
unsigned v3;
|
||||
unsigned v4;
|
||||
unsigned mem32[4];
|
||||
unsigned memsize;
|
||||
unsigned reserved; /* never read nor write, might be removed in a future version */
|
||||
}; /* typedef'd to XXH32_state_t */
|
||||
|
||||
# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
|
||||
struct XXH64_state_s {
|
||||
unsigned long long total_len;
|
||||
unsigned long long v1;
|
||||
unsigned long long v2;
|
||||
unsigned long long v3;
|
||||
unsigned long long v4;
|
||||
unsigned long long mem64[4];
|
||||
unsigned memsize;
|
||||
unsigned reserved[2]; /* never read nor write, might be removed in a future version */
|
||||
}; /* typedef'd to XXH64_state_t */
|
||||
# endif
|
||||
|
||||
# endif
|
||||
|
||||
|
||||
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
|
||||
# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
|
||||
#endif
|
||||
|
||||
#endif /* XXH_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* XXHASH_H_5627135585666179 */
|
@ -1,949 +0,0 @@
|
||||
// Copyright (C) 2018-2019, Cloudflare, Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
|
||||
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef QUICHE_H
|
||||
#define QUICHE_H
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__)
|
||||
#include <winsock2.h>
|
||||
#include <ws2tcpip.h>
|
||||
#include <time.h>
|
||||
#else
|
||||
#include <sys/socket.h>
|
||||
#include <sys/time.h>
|
||||
#endif
|
||||
|
||||
#ifdef __unix__
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
#ifdef _MSC_VER
|
||||
#include <BaseTsd.h>
|
||||
#define ssize_t SSIZE_T
|
||||
#endif
|
||||
|
||||
// QUIC transport API.
|
||||
//
|
||||
|
||||
// The current QUIC wire version.
|
||||
#define QUICHE_PROTOCOL_VERSION 0x00000001
|
||||
|
||||
// The maximum length of a connection ID.
|
||||
#define QUICHE_MAX_CONN_ID_LEN 20
|
||||
|
||||
// The minimum length of Initial packets sent by a client.
|
||||
#define QUICHE_MIN_CLIENT_INITIAL_LEN 1200
|
||||
|
||||
enum quiche_error {
|
||||
// There is no more work to do.
|
||||
QUICHE_ERR_DONE = -1,
|
||||
|
||||
// The provided buffer is too short.
|
||||
QUICHE_ERR_BUFFER_TOO_SHORT = -2,
|
||||
|
||||
// The provided packet cannot be parsed because its version is unknown.
|
||||
QUICHE_ERR_UNKNOWN_VERSION = -3,
|
||||
|
||||
// The provided packet cannot be parsed because it contains an invalid
|
||||
// frame.
|
||||
QUICHE_ERR_INVALID_FRAME = -4,
|
||||
|
||||
// The provided packet cannot be parsed.
|
||||
QUICHE_ERR_INVALID_PACKET = -5,
|
||||
|
||||
// The operation cannot be completed because the connection is in an
|
||||
// invalid state.
|
||||
QUICHE_ERR_INVALID_STATE = -6,
|
||||
|
||||
// The operation cannot be completed because the stream is in an
|
||||
// invalid state.
|
||||
QUICHE_ERR_INVALID_STREAM_STATE = -7,
|
||||
|
||||
// The peer's transport params cannot be parsed.
|
||||
QUICHE_ERR_INVALID_TRANSPORT_PARAM = -8,
|
||||
|
||||
// A cryptographic operation failed.
|
||||
QUICHE_ERR_CRYPTO_FAIL = -9,
|
||||
|
||||
// The TLS handshake failed.
|
||||
QUICHE_ERR_TLS_FAIL = -10,
|
||||
|
||||
// The peer violated the local flow control limits.
|
||||
QUICHE_ERR_FLOW_CONTROL = -11,
|
||||
|
||||
// The peer violated the local stream limits.
|
||||
QUICHE_ERR_STREAM_LIMIT = -12,
|
||||
|
||||
// The specified stream was stopped by the peer.
|
||||
QUICHE_ERR_STREAM_STOPPED = -15,
|
||||
|
||||
// The specified stream was reset by the peer.
|
||||
QUICHE_ERR_STREAM_RESET = -16,
|
||||
|
||||
// The received data exceeds the stream's final size.
|
||||
QUICHE_ERR_FINAL_SIZE = -13,
|
||||
|
||||
// Error in congestion control.
|
||||
QUICHE_ERR_CONGESTION_CONTROL = -14,
|
||||
};
|
||||
|
||||
// Returns a human readable string with the quiche version number.
|
||||
const char *quiche_version(void);
|
||||
|
||||
// Enables logging. |cb| will be called with log messages
|
||||
int quiche_enable_debug_logging(void (*cb)(const char *line, void *argp),
|
||||
void *argp);
|
||||
|
||||
// Stores configuration shared between multiple connections.
|
||||
typedef struct quiche_config quiche_config;
|
||||
|
||||
// Creates a config object with the given version.
|
||||
quiche_config *quiche_config_new(uint32_t version);
|
||||
|
||||
// Configures the given certificate chain.
|
||||
int quiche_config_load_cert_chain_from_pem_file(quiche_config *config,
|
||||
const char *path);
|
||||
|
||||
// Configures the given private key.
|
||||
int quiche_config_load_priv_key_from_pem_file(quiche_config *config,
|
||||
const char *path);
|
||||
|
||||
// Specifies a file where trusted CA certificates are stored for the purposes of certificate verification.
|
||||
int quiche_config_load_verify_locations_from_file(quiche_config *config,
|
||||
const char *path);
|
||||
|
||||
// Specifies a directory where trusted CA certificates are stored for the purposes of certificate verification.
|
||||
int quiche_config_load_verify_locations_from_directory(quiche_config *config,
|
||||
const char *path);
|
||||
|
||||
// Configures whether to verify the peer's certificate.
|
||||
void quiche_config_verify_peer(quiche_config *config, bool v);
|
||||
|
||||
// Configures whether to send GREASE.
|
||||
void quiche_config_grease(quiche_config *config, bool v);
|
||||
|
||||
// Enables logging of secrets.
|
||||
void quiche_config_log_keys(quiche_config *config);
|
||||
|
||||
// Enables sending or receiving early data.
|
||||
void quiche_config_enable_early_data(quiche_config *config);
|
||||
|
||||
// Configures the list of supported application protocols.
|
||||
int quiche_config_set_application_protos(quiche_config *config,
|
||||
const uint8_t *protos,
|
||||
size_t protos_len);
|
||||
|
||||
// Sets the `max_idle_timeout` transport parameter, in milliseconds, default is
|
||||
// no timeout.
|
||||
void quiche_config_set_max_idle_timeout(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `max_udp_payload_size transport` parameter.
|
||||
void quiche_config_set_max_recv_udp_payload_size(quiche_config *config, size_t v);
|
||||
|
||||
// Sets the maximum outgoing UDP payload size.
|
||||
void quiche_config_set_max_send_udp_payload_size(quiche_config *config, size_t v);
|
||||
|
||||
// Sets the `initial_max_data` transport parameter.
|
||||
void quiche_config_set_initial_max_data(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `initial_max_stream_data_bidi_local` transport parameter.
|
||||
void quiche_config_set_initial_max_stream_data_bidi_local(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
|
||||
void quiche_config_set_initial_max_stream_data_bidi_remote(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `initial_max_stream_data_uni` transport parameter.
|
||||
void quiche_config_set_initial_max_stream_data_uni(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `initial_max_streams_bidi` transport parameter.
|
||||
void quiche_config_set_initial_max_streams_bidi(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `initial_max_streams_uni` transport parameter.
|
||||
void quiche_config_set_initial_max_streams_uni(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `ack_delay_exponent` transport parameter.
|
||||
void quiche_config_set_ack_delay_exponent(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `max_ack_delay` transport parameter.
|
||||
void quiche_config_set_max_ack_delay(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the `disable_active_migration` transport parameter.
|
||||
void quiche_config_set_disable_active_migration(quiche_config *config, bool v);
|
||||
|
||||
enum quiche_cc_algorithm {
|
||||
QUICHE_CC_RENO = 0,
|
||||
QUICHE_CC_CUBIC = 1,
|
||||
QUICHE_CC_BBR = 2,
|
||||
};
|
||||
|
||||
// Sets the congestion control algorithm used.
|
||||
void quiche_config_set_cc_algorithm(quiche_config *config, enum quiche_cc_algorithm algo);
|
||||
|
||||
// Configures whether to use HyStart++.
|
||||
void quiche_config_enable_hystart(quiche_config *config, bool v);
|
||||
|
||||
// Configures whether to enable pacing (enabled by default).
|
||||
void quiche_config_enable_pacing(quiche_config *config, bool v);
|
||||
|
||||
// Configures whether to enable receiving DATAGRAM frames.
|
||||
void quiche_config_enable_dgram(quiche_config *config, bool enabled,
|
||||
size_t recv_queue_len,
|
||||
size_t send_queue_len);
|
||||
|
||||
// Sets the maximum connection window.
|
||||
void quiche_config_set_max_connection_window(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the maximum stream window.
|
||||
void quiche_config_set_max_stream_window(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the limit of active connection IDs.
|
||||
void quiche_config_set_active_connection_id_limit(quiche_config *config, uint64_t v);
|
||||
|
||||
// Sets the initial stateless reset token. |v| must contain 16 bytes, otherwise the behaviour is undefined.
|
||||
void quiche_config_set_stateless_reset_token(quiche_config *config, const uint8_t *v);
|
||||
|
||||
// Frees the config object.
|
||||
void quiche_config_free(quiche_config *config);
|
||||
|
||||
// Extracts version, type, source / destination connection ID and address
|
||||
// verification token from the packet in |buf|.
|
||||
int quiche_header_info(const uint8_t *buf, size_t buf_len, size_t dcil,
|
||||
uint32_t *version, uint8_t *type,
|
||||
uint8_t *scid, size_t *scid_len,
|
||||
uint8_t *dcid, size_t *dcid_len,
|
||||
uint8_t *token, size_t *token_len);
|
||||
|
||||
// A QUIC connection.
|
||||
typedef struct quiche_conn quiche_conn;
|
||||
|
||||
// Creates a new server-side connection.
|
||||
quiche_conn *quiche_accept(const uint8_t *scid, size_t scid_len,
|
||||
const uint8_t *odcid, size_t odcid_len,
|
||||
const struct sockaddr *local, size_t local_len,
|
||||
const struct sockaddr *peer, size_t peer_len,
|
||||
quiche_config *config);
|
||||
|
||||
// Creates a new client-side connection.
|
||||
quiche_conn *quiche_connect(const char *server_name,
|
||||
const uint8_t *scid, size_t scid_len,
|
||||
const struct sockaddr *local, size_t local_len,
|
||||
const struct sockaddr *peer, size_t peer_len,
|
||||
quiche_config *config);
|
||||
|
||||
// Writes a version negotiation packet.
|
||||
ssize_t quiche_negotiate_version(const uint8_t *scid, size_t scid_len,
|
||||
const uint8_t *dcid, size_t dcid_len,
|
||||
uint8_t *out, size_t out_len);
|
||||
|
||||
// Writes a retry packet.
|
||||
ssize_t quiche_retry(const uint8_t *scid, size_t scid_len,
|
||||
const uint8_t *dcid, size_t dcid_len,
|
||||
const uint8_t *new_scid, size_t new_scid_len,
|
||||
const uint8_t *token, size_t token_len,
|
||||
uint32_t version, uint8_t *out, size_t out_len);
|
||||
|
||||
// Returns true if the given protocol version is supported.
|
||||
bool quiche_version_is_supported(uint32_t version);
|
||||
|
||||
quiche_conn *quiche_conn_new_with_tls(const uint8_t *scid, size_t scid_len,
|
||||
const uint8_t *odcid, size_t odcid_len,
|
||||
const struct sockaddr *local, size_t local_len,
|
||||
const struct sockaddr *peer, size_t peer_len,
|
||||
quiche_config *config, void *ssl,
|
||||
bool is_server);
|
||||
|
||||
// Enables keylog to the specified file path. Returns true on success.
|
||||
bool quiche_conn_set_keylog_path(quiche_conn *conn, const char *path);
|
||||
|
||||
// Enables keylog to the specified file descriptor. Unix only.
|
||||
void quiche_conn_set_keylog_fd(quiche_conn *conn, int fd);
|
||||
|
||||
// Enables qlog to the specified file path. Returns true on success.
|
||||
bool quiche_conn_set_qlog_path(quiche_conn *conn, const char *path,
|
||||
const char *log_title, const char *log_desc);
|
||||
|
||||
// Enables qlog to the specified file descriptor. Unix only.
|
||||
void quiche_conn_set_qlog_fd(quiche_conn *conn, int fd, const char *log_title,
|
||||
const char *log_desc);
|
||||
|
||||
// Configures the given session for resumption.
|
||||
int quiche_conn_set_session(quiche_conn *conn, const uint8_t *buf, size_t buf_len);
|
||||
|
||||
typedef struct {
|
||||
// The remote address the packet was received from.
|
||||
struct sockaddr *from;
|
||||
socklen_t from_len;
|
||||
|
||||
// The local address the packet was received on.
|
||||
struct sockaddr *to;
|
||||
socklen_t to_len;
|
||||
} quiche_recv_info;
|
||||
|
||||
// Processes QUIC packets received from the peer.
|
||||
ssize_t quiche_conn_recv(quiche_conn *conn, uint8_t *buf, size_t buf_len,
|
||||
const quiche_recv_info *info);
|
||||
|
||||
typedef struct {
|
||||
// The local address the packet should be sent from.
|
||||
struct sockaddr_storage from;
|
||||
socklen_t from_len;
|
||||
|
||||
// The remote address the packet should be sent to.
|
||||
struct sockaddr_storage to;
|
||||
socklen_t to_len;
|
||||
|
||||
// The time to send the packet out.
|
||||
struct timespec at;
|
||||
} quiche_send_info;
|
||||
|
||||
// Writes a single QUIC packet to be sent to the peer.
|
||||
ssize_t quiche_conn_send(quiche_conn *conn, uint8_t *out, size_t out_len,
|
||||
quiche_send_info *out_info);
|
||||
|
||||
// Returns the size of the send quantum, in bytes.
|
||||
size_t quiche_conn_send_quantum(const quiche_conn *conn);
|
||||
|
||||
// Reads contiguous data from a stream.
|
||||
ssize_t quiche_conn_stream_recv(quiche_conn *conn, uint64_t stream_id,
|
||||
uint8_t *out, size_t buf_len, bool *fin);
|
||||
|
||||
// Writes data to a stream.
|
||||
ssize_t quiche_conn_stream_send(quiche_conn *conn, uint64_t stream_id,
|
||||
const uint8_t *buf, size_t buf_len, bool fin);
|
||||
|
||||
// The side of the stream to be shut down.
|
||||
enum quiche_shutdown {
|
||||
QUICHE_SHUTDOWN_READ = 0,
|
||||
QUICHE_SHUTDOWN_WRITE = 1,
|
||||
};
|
||||
|
||||
// Sets the priority for a stream.
|
||||
int quiche_conn_stream_priority(quiche_conn *conn, uint64_t stream_id,
|
||||
uint8_t urgency, bool incremental);
|
||||
|
||||
// Shuts down reading or writing from/to the specified stream.
|
||||
int quiche_conn_stream_shutdown(quiche_conn *conn, uint64_t stream_id,
|
||||
enum quiche_shutdown direction, uint64_t err);
|
||||
|
||||
// Returns the stream's send capacity in bytes.
|
||||
ssize_t quiche_conn_stream_capacity(const quiche_conn *conn, uint64_t stream_id);
|
||||
|
||||
// Returns true if the stream has data that can be read.
|
||||
bool quiche_conn_stream_readable(const quiche_conn *conn, uint64_t stream_id);
|
||||
|
||||
// Returns the next stream that has data to read, or -1 if no such stream is
|
||||
// available.
|
||||
int64_t quiche_conn_stream_readable_next(quiche_conn *conn);
|
||||
|
||||
// Returns true if the stream has enough send capacity.
|
||||
//
|
||||
// On error a value lower than 0 is returned.
|
||||
int quiche_conn_stream_writable(quiche_conn *conn, uint64_t stream_id, size_t len);
|
||||
|
||||
// Returns the next stream that can be written to, or -1 if no such stream is
|
||||
// available.
|
||||
int64_t quiche_conn_stream_writable_next(quiche_conn *conn);
|
||||
|
||||
// Returns true if all the data has been read from the specified stream.
|
||||
bool quiche_conn_stream_finished(const quiche_conn *conn, uint64_t stream_id);
|
||||
|
||||
typedef struct quiche_stream_iter quiche_stream_iter;
|
||||
|
||||
// Returns an iterator over streams that have outstanding data to read.
|
||||
quiche_stream_iter *quiche_conn_readable(const quiche_conn *conn);
|
||||
|
||||
// Returns an iterator over streams that can be written to.
|
||||
quiche_stream_iter *quiche_conn_writable(const quiche_conn *conn);
|
||||
|
||||
// Returns the maximum possible size of egress UDP payloads.
|
||||
size_t quiche_conn_max_send_udp_payload_size(const quiche_conn *conn);
|
||||
|
||||
// Returns the amount of time until the next timeout event, in nanoseconds.
|
||||
uint64_t quiche_conn_timeout_as_nanos(const quiche_conn *conn);
|
||||
|
||||
// Returns the amount of time until the next timeout event, in milliseconds.
|
||||
uint64_t quiche_conn_timeout_as_millis(const quiche_conn *conn);
|
||||
|
||||
// Processes a timeout event.
|
||||
void quiche_conn_on_timeout(quiche_conn *conn);
|
||||
|
||||
// Closes the connection with the given error and reason.
|
||||
int quiche_conn_close(quiche_conn *conn, bool app, uint64_t err,
|
||||
const uint8_t *reason, size_t reason_len);
|
||||
|
||||
// Returns a string uniquely representing the connection.
|
||||
void quiche_conn_trace_id(const quiche_conn *conn, const uint8_t **out, size_t *out_len);
|
||||
|
||||
// Returns the source connection ID.
|
||||
void quiche_conn_source_id(const quiche_conn *conn, const uint8_t **out, size_t *out_len);
|
||||
|
||||
// Returns the destination connection ID.
|
||||
void quiche_conn_destination_id(const quiche_conn *conn, const uint8_t **out, size_t *out_len);
|
||||
|
||||
// Returns the negotiated ALPN protocol.
|
||||
void quiche_conn_application_proto(const quiche_conn *conn, const uint8_t **out,
|
||||
size_t *out_len);
|
||||
|
||||
// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
|
||||
void quiche_conn_peer_cert(const quiche_conn *conn, const uint8_t **out, size_t *out_len);
|
||||
|
||||
// Returns the serialized cryptographic session for the connection.
|
||||
void quiche_conn_session(const quiche_conn *conn, const uint8_t **out, size_t *out_len);
|
||||
|
||||
// Returns true if the connection handshake is complete.
|
||||
bool quiche_conn_is_established(const quiche_conn *conn);
|
||||
|
||||
// Returns true if the connection has a pending handshake that has progressed
|
||||
// enough to send or receive early data.
|
||||
bool quiche_conn_is_in_early_data(const quiche_conn *conn);
|
||||
|
||||
// Returns whether there is stream or DATAGRAM data available to read.
|
||||
bool quiche_conn_is_readable(const quiche_conn *conn);
|
||||
|
||||
// Returns true if the connection is draining.
|
||||
bool quiche_conn_is_draining(const quiche_conn *conn);
|
||||
|
||||
// Returns the number of bidirectional streams that can be created
|
||||
// before the peer's stream count limit is reached.
|
||||
uint64_t quiche_conn_peer_streams_left_bidi(const quiche_conn *conn);
|
||||
|
||||
// Returns the number of unidirectional streams that can be created
|
||||
// before the peer's stream count limit is reached.
|
||||
uint64_t quiche_conn_peer_streams_left_uni(const quiche_conn *conn);
|
||||
|
||||
// Returns true if the connection is closed.
|
||||
bool quiche_conn_is_closed(const quiche_conn *conn);
|
||||
|
||||
// Returns true if the connection was closed due to the idle timeout.
|
||||
bool quiche_conn_is_timed_out(const quiche_conn *conn);
|
||||
|
||||
// Returns true if a connection error was received, and updates the provided
|
||||
// parameters accordingly.
|
||||
bool quiche_conn_peer_error(const quiche_conn *conn,
|
||||
bool *is_app,
|
||||
uint64_t *error_code,
|
||||
const uint8_t **reason,
|
||||
size_t *reason_len);
|
||||
|
||||
// Returns true if a connection error was queued or sent, and updates the provided
|
||||
// parameters accordingly.
|
||||
bool quiche_conn_local_error(const quiche_conn *conn,
|
||||
bool *is_app,
|
||||
uint64_t *error_code,
|
||||
const uint8_t **reason,
|
||||
size_t *reason_len);
|
||||
|
||||
// Initializes the stream's application data.
|
||||
//
|
||||
// Stream data can only be initialized once. Additional calls to this method
|
||||
// will fail.
|
||||
//
|
||||
// Note that the application is responsible for freeing the data.
|
||||
int quiche_conn_stream_init_application_data(quiche_conn *conn,
|
||||
uint64_t stream_id,
|
||||
void *data);
|
||||
|
||||
// Returns the stream's application data, if any was initialized.
|
||||
void *quiche_conn_stream_application_data(quiche_conn *conn, uint64_t stream_id);
|
||||
|
||||
// Fetches the next stream from the given iterator. Returns false if there are
|
||||
// no more elements in the iterator.
|
||||
bool quiche_stream_iter_next(quiche_stream_iter *iter, uint64_t *stream_id);
|
||||
|
||||
// Frees the given stream iterator object.
|
||||
void quiche_stream_iter_free(quiche_stream_iter *iter);
|
||||
|
||||
typedef struct {
|
||||
// The number of QUIC packets received on this connection.
|
||||
size_t recv;
|
||||
|
||||
// The number of QUIC packets sent on this connection.
|
||||
size_t sent;
|
||||
|
||||
// The number of QUIC packets that were lost.
|
||||
size_t lost;
|
||||
|
||||
// The number of sent QUIC packets with retransmitted data.
|
||||
size_t retrans;
|
||||
|
||||
// The number of sent bytes.
|
||||
uint64_t sent_bytes;
|
||||
|
||||
// The number of received bytes.
|
||||
uint64_t recv_bytes;
|
||||
|
||||
// The number of bytes lost.
|
||||
uint64_t lost_bytes;
|
||||
|
||||
// The number of stream bytes retransmitted.
|
||||
uint64_t stream_retrans_bytes;
|
||||
|
||||
// The number of known paths for the connection.
|
||||
size_t paths_count;
|
||||
|
||||
// The maximum idle timeout.
|
||||
uint64_t peer_max_idle_timeout;
|
||||
|
||||
// The maximum UDP payload size.
|
||||
uint64_t peer_max_udp_payload_size;
|
||||
|
||||
// The initial flow control maximum data for the connection.
|
||||
uint64_t peer_initial_max_data;
|
||||
|
||||
// The initial flow control maximum data for local bidirectional streams.
|
||||
uint64_t peer_initial_max_stream_data_bidi_local;
|
||||
|
||||
// The initial flow control maximum data for remote bidirectional streams.
|
||||
uint64_t peer_initial_max_stream_data_bidi_remote;
|
||||
|
||||
// The initial flow control maximum data for unidirectional streams.
|
||||
uint64_t peer_initial_max_stream_data_uni;
|
||||
|
||||
// The initial maximum bidirectional streams.
|
||||
uint64_t peer_initial_max_streams_bidi;
|
||||
|
||||
// The initial maximum unidirectional streams.
|
||||
uint64_t peer_initial_max_streams_uni;
|
||||
|
||||
// The ACK delay exponent.
|
||||
uint64_t peer_ack_delay_exponent;
|
||||
|
||||
// The max ACK delay.
|
||||
uint64_t peer_max_ack_delay;
|
||||
|
||||
// Whether active migration is disabled.
|
||||
bool peer_disable_active_migration;
|
||||
|
||||
// The active connection ID limit.
|
||||
uint64_t peer_active_conn_id_limit;
|
||||
|
||||
// DATAGRAM frame extension parameter, if any.
|
||||
ssize_t peer_max_datagram_frame_size;
|
||||
} quiche_stats;
|
||||
|
||||
// Collects and returns statistics about the connection.
|
||||
void quiche_conn_stats(const quiche_conn *conn, quiche_stats *out);
|
||||
|
||||
typedef struct {
|
||||
// The local address used by this path.
|
||||
struct sockaddr_storage local_addr;
|
||||
socklen_t local_addr_len;
|
||||
|
||||
// The peer address seen by this path.
|
||||
struct sockaddr_storage peer_addr;
|
||||
socklen_t peer_addr_len;
|
||||
|
||||
// The validation state of the path.
|
||||
ssize_t validation_state;
|
||||
|
||||
// Whether this path is active.
|
||||
bool active;
|
||||
|
||||
// The number of QUIC packets received on this path.
|
||||
size_t recv;
|
||||
|
||||
// The number of QUIC packets sent on this path.
|
||||
size_t sent;
|
||||
|
||||
// The number of QUIC packets that were lost on this path.
|
||||
size_t lost;
|
||||
|
||||
// The number of sent QUIC packets with retransmitted data on this path.
|
||||
size_t retrans;
|
||||
|
||||
// The estimated round-trip time of the path (in nanoseconds).
|
||||
uint64_t rtt;
|
||||
|
||||
// The size of the path's congestion window in bytes.
|
||||
size_t cwnd;
|
||||
|
||||
// The number of sent bytes on this path.
|
||||
uint64_t sent_bytes;
|
||||
|
||||
// The number of received bytes on this path.
|
||||
uint64_t recv_bytes;
|
||||
|
||||
// The number of bytes lost on this path.
|
||||
uint64_t lost_bytes;
|
||||
|
||||
// The number of stream bytes retransmitted on this path.
|
||||
uint64_t stream_retrans_bytes;
|
||||
|
||||
// The current PMTU for the path.
|
||||
size_t pmtu;
|
||||
|
||||
// The most recent data delivery rate estimate in bytes/s.
|
||||
uint64_t delivery_rate;
|
||||
} quiche_path_stats;
|
||||
|
||||
|
||||
// Collects and returns statistics about the specified path for the connection.
|
||||
//
|
||||
// The `idx` argument represent the path's index (also see the `paths_count`
|
||||
// field of `quiche_stats`).
|
||||
int quiche_conn_path_stats(const quiche_conn *conn, size_t idx, quiche_path_stats *out);
|
||||
|
||||
// Returns the maximum DATAGRAM payload that can be sent.
|
||||
ssize_t quiche_conn_dgram_max_writable_len(const quiche_conn *conn);
|
||||
|
||||
// Returns the length of the first stored DATAGRAM.
|
||||
ssize_t quiche_conn_dgram_recv_front_len(const quiche_conn *conn);
|
||||
|
||||
// Returns the number of items in the DATAGRAM receive queue.
|
||||
ssize_t quiche_conn_dgram_recv_queue_len(const quiche_conn *conn);
|
||||
|
||||
// Returns the total size of all items in the DATAGRAM receive queue.
|
||||
ssize_t quiche_conn_dgram_recv_queue_byte_size(const quiche_conn *conn);
|
||||
|
||||
// Returns the number of items in the DATAGRAM send queue.
|
||||
ssize_t quiche_conn_dgram_send_queue_len(const quiche_conn *conn);
|
||||
|
||||
// Returns the total size of all items in the DATAGRAM send queue.
|
||||
ssize_t quiche_conn_dgram_send_queue_byte_size(const quiche_conn *conn);
|
||||
|
||||
// Reads the first received DATAGRAM.
|
||||
ssize_t quiche_conn_dgram_recv(quiche_conn *conn, uint8_t *buf,
|
||||
size_t buf_len);
|
||||
|
||||
// Sends data in a DATAGRAM frame.
|
||||
ssize_t quiche_conn_dgram_send(quiche_conn *conn, const uint8_t *buf,
|
||||
size_t buf_len);
|
||||
|
||||
// Purges queued outgoing DATAGRAMs matching the predicate.
|
||||
void quiche_conn_dgram_purge_outgoing(quiche_conn *conn,
|
||||
bool (*f)(uint8_t *, size_t));
|
||||
|
||||
// Schedule an ack-eliciting packet on the active path.
|
||||
ssize_t quiche_conn_send_ack_eliciting(quiche_conn *conn);
|
||||
|
||||
// Schedule an ack-eliciting packet on the specified path.
|
||||
ssize_t quiche_conn_send_ack_eliciting_on_path(quiche_conn *conn,
|
||||
const struct sockaddr *local, size_t local_len,
|
||||
const struct sockaddr *peer, size_t peer_len);
|
||||
|
||||
// Frees the connection object.
|
||||
void quiche_conn_free(quiche_conn *conn);
|
||||
|
||||
|
||||
// HTTP/3 API
|
||||
//
|
||||
|
||||
// List of ALPN tokens of supported HTTP/3 versions.
|
||||
#define QUICHE_H3_APPLICATION_PROTOCOL "\x02h3\x05h3-29\x05h3-28\x05h3-27"
|
||||
|
||||
enum quiche_h3_error {
|
||||
// There is no error or no work to do
|
||||
QUICHE_H3_ERR_DONE = -1,
|
||||
|
||||
// The provided buffer is too short.
|
||||
QUICHE_H3_ERR_BUFFER_TOO_SHORT = -2,
|
||||
|
||||
// Internal error in the HTTP/3 stack.
|
||||
QUICHE_H3_ERR_INTERNAL_ERROR = -3,
|
||||
|
||||
// Endpoint detected that the peer is exhibiting behavior that causes.
|
||||
// excessive load.
|
||||
QUICHE_H3_ERR_EXCESSIVE_LOAD = -4,
|
||||
|
||||
// Stream ID or Push ID greater that current maximum was
|
||||
// used incorrectly, such as exceeding a limit, reducing a limit,
|
||||
// or being reused.
|
||||
QUICHE_H3_ERR_ID_ERROR= -5,
|
||||
|
||||
// The endpoint detected that its peer created a stream that it will not
|
||||
// accept.
|
||||
QUICHE_H3_ERR_STREAM_CREATION_ERROR = -6,
|
||||
|
||||
// A required critical stream was closed.
|
||||
QUICHE_H3_ERR_CLOSED_CRITICAL_STREAM = -7,
|
||||
|
||||
// No SETTINGS frame at beginning of control stream.
|
||||
QUICHE_H3_ERR_MISSING_SETTINGS = -8,
|
||||
|
||||
// A frame was received which is not permitted in the current state.
|
||||
QUICHE_H3_ERR_FRAME_UNEXPECTED = -9,
|
||||
|
||||
// Frame violated layout or size rules.
|
||||
QUICHE_H3_ERR_FRAME_ERROR = -10,
|
||||
|
||||
// QPACK Header block decompression failure.
|
||||
QUICHE_H3_ERR_QPACK_DECOMPRESSION_FAILED = -11,
|
||||
|
||||
// -12 was previously used for TransportError, skip it
|
||||
|
||||
// The underlying QUIC stream (or connection) doesn't have enough capacity
|
||||
// for the operation to complete. The application should retry later on.
|
||||
QUICHE_H3_ERR_STREAM_BLOCKED = -13,
|
||||
|
||||
// Error in the payload of a SETTINGS frame.
|
||||
QUICHE_H3_ERR_SETTINGS_ERROR = -14,
|
||||
|
||||
// Server rejected request.
|
||||
QUICHE_H3_ERR_REQUEST_REJECTED = -15,
|
||||
|
||||
// Request or its response cancelled.
|
||||
QUICHE_H3_ERR_REQUEST_CANCELLED = -16,
|
||||
|
||||
// Client's request stream terminated without containing a full-formed
|
||||
// request.
|
||||
QUICHE_H3_ERR_REQUEST_INCOMPLETE = -17,
|
||||
|
||||
// An HTTP message was malformed and cannot be processed.
|
||||
QUICHE_H3_ERR_MESSAGE_ERROR = -18,
|
||||
|
||||
// The TCP connection established in response to a CONNECT request was
|
||||
// reset or abnormally closed.
|
||||
QUICHE_H3_ERR_CONNECT_ERROR = -19,
|
||||
|
||||
// The requested operation cannot be served over HTTP/3. Peer should retry
|
||||
// over HTTP/1.1.
|
||||
QUICHE_H3_ERR_VERSION_FALLBACK = -20,
|
||||
|
||||
// The following QUICHE_H3_TRANSPORT_ERR_* errors are propagated
|
||||
// from the QUIC transport layer.
|
||||
|
||||
// See QUICHE_ERR_DONE.
|
||||
QUICHE_H3_TRANSPORT_ERR_DONE = QUICHE_ERR_DONE - 1000,
|
||||
|
||||
// See QUICHE_ERR_BUFFER_TOO_SHORT.
|
||||
QUICHE_H3_TRANSPORT_ERR_BUFFER_TOO_SHORT = QUICHE_ERR_BUFFER_TOO_SHORT - 1000,
|
||||
|
||||
// See QUICHE_ERR_UNKNOWN_VERSION.
|
||||
QUICHE_H3_TRANSPORT_ERR_UNKNOWN_VERSION = QUICHE_ERR_UNKNOWN_VERSION - 1000,
|
||||
|
||||
// See QUICHE_ERR_INVALID_FRAME.
|
||||
QUICHE_H3_TRANSPORT_ERR_INVALID_FRAME = QUICHE_ERR_INVALID_FRAME - 1000,
|
||||
|
||||
// See QUICHE_ERR_INVALID_PACKET.
|
||||
QUICHE_H3_TRANSPORT_ERR_INVALID_PACKET = QUICHE_ERR_INVALID_PACKET - 1000,
|
||||
|
||||
// See QUICHE_ERR_INVALID_STATE.
|
||||
QUICHE_H3_TRANSPORT_ERR_INVALID_STATE = QUICHE_ERR_INVALID_STATE - 1000,
|
||||
|
||||
// See QUICHE_ERR_INVALID_STREAM_STATE.
|
||||
QUICHE_H3_TRANSPORT_ERR_INVALID_STREAM_STATE = QUICHE_ERR_INVALID_STREAM_STATE - 1000,
|
||||
|
||||
// See QUICHE_ERR_INVALID_TRANSPORT_PARAM.
|
||||
QUICHE_H3_TRANSPORT_ERR_INVALID_TRANSPORT_PARAM = QUICHE_ERR_INVALID_TRANSPORT_PARAM - 1000,
|
||||
|
||||
// See QUICHE_ERR_CRYPTO_FAIL.
|
||||
QUICHE_H3_TRANSPORT_ERR_CRYPTO_FAIL = QUICHE_ERR_CRYPTO_FAIL - 1000,
|
||||
|
||||
// See QUICHE_ERR_TLS_FAIL.
|
||||
QUICHE_H3_TRANSPORT_ERR_TLS_FAIL = QUICHE_ERR_TLS_FAIL - 1000,
|
||||
|
||||
// See QUICHE_ERR_FLOW_CONTROL.
|
||||
QUICHE_H3_TRANSPORT_ERR_FLOW_CONTROL = QUICHE_ERR_FLOW_CONTROL - 1000,
|
||||
|
||||
// See QUICHE_ERR_STREAM_LIMIT.
|
||||
QUICHE_H3_TRANSPORT_ERR_STREAM_LIMIT = QUICHE_ERR_STREAM_LIMIT - 1000,
|
||||
|
||||
// See QUICHE_ERR_STREAM_STOPPED.
|
||||
QUICHE_H3_TRANSPORT_ERR_STREAM_STOPPED = QUICHE_ERR_STREAM_STOPPED - 1000,
|
||||
|
||||
// See QUICHE_ERR_STREAM_RESET.
|
||||
QUICHE_H3_TRANSPORT_ERR_STREAM_RESET = QUICHE_ERR_STREAM_RESET - 1000,
|
||||
|
||||
// See QUICHE_ERR_FINAL_SIZE.
|
||||
QUICHE_H3_TRANSPORT_ERR_FINAL_SIZE = QUICHE_ERR_FINAL_SIZE - 1000,
|
||||
|
||||
// See QUICHE_ERR_CONGESTION_CONTROL.
|
||||
QUICHE_H3_TRANSPORT_ERR_CONGESTION_CONTROL = QUICHE_ERR_CONGESTION_CONTROL - 1000,
|
||||
};
|
||||
|
||||
// Stores configuration shared between multiple connections.
|
||||
typedef struct quiche_h3_config quiche_h3_config;
|
||||
|
||||
// Creates an HTTP/3 config object with default settings values.
|
||||
quiche_h3_config *quiche_h3_config_new(void);
|
||||
|
||||
// Sets the `SETTINGS_MAX_FIELD_SECTION_SIZE` setting.
|
||||
void quiche_h3_config_set_max_field_section_size(quiche_h3_config *config, uint64_t v);
|
||||
|
||||
// Sets the `SETTINGS_QPACK_MAX_TABLE_CAPACITY` setting.
|
||||
void quiche_h3_config_set_qpack_max_table_capacity(quiche_h3_config *config, uint64_t v);
|
||||
|
||||
// Sets the `SETTINGS_QPACK_BLOCKED_STREAMS` setting.
|
||||
void quiche_h3_config_set_qpack_blocked_streams(quiche_h3_config *config, uint64_t v);
|
||||
|
||||
// Sets the `SETTINGS_ENABLE_CONNECT_PROTOCOL` setting.
|
||||
void quiche_h3_config_enable_extended_connect(quiche_h3_config *config, bool enabled);
|
||||
|
||||
// Frees the HTTP/3 config object.
|
||||
void quiche_h3_config_free(quiche_h3_config *config);
|
||||
|
||||
// An HTTP/3 connection.
|
||||
typedef struct quiche_h3_conn quiche_h3_conn;
|
||||
|
||||
// Creates a new server-side connection.
|
||||
quiche_h3_conn *quiche_h3_accept(quiche_conn *quiche_conn,
|
||||
quiche_h3_config *config);
|
||||
|
||||
// Creates a new HTTP/3 connection using the provided QUIC connection.
|
||||
quiche_h3_conn *quiche_h3_conn_new_with_transport(quiche_conn *quiche_conn,
|
||||
quiche_h3_config *config);
|
||||
|
||||
enum quiche_h3_event_type {
|
||||
QUICHE_H3_EVENT_HEADERS,
|
||||
QUICHE_H3_EVENT_DATA,
|
||||
QUICHE_H3_EVENT_FINISHED,
|
||||
QUICHE_H3_EVENT_DATAGRAM,
|
||||
QUICHE_H3_EVENT_GOAWAY,
|
||||
QUICHE_H3_EVENT_RESET,
|
||||
QUICHE_H3_EVENT_PRIORITY_UPDATE,
|
||||
};
|
||||
|
||||
typedef struct quiche_h3_event quiche_h3_event;
|
||||
|
||||
// Processes HTTP/3 data received from the peer.
|
||||
int64_t quiche_h3_conn_poll(quiche_h3_conn *conn, quiche_conn *quic_conn,
|
||||
quiche_h3_event **ev);
|
||||
|
||||
// Returns the type of the event.
|
||||
enum quiche_h3_event_type quiche_h3_event_type(quiche_h3_event *ev);
|
||||
|
||||
// Iterates over the headers in the event.
|
||||
//
|
||||
// The `cb` callback will be called for each header in `ev`. `cb` should check
|
||||
// the validity of pseudo-headers and headers. If `cb` returns any value other
|
||||
// than `0`, processing will be interrupted and the value is returned to the
|
||||
// caller.
|
||||
int quiche_h3_event_for_each_header(quiche_h3_event *ev,
|
||||
int (*cb)(uint8_t *name, size_t name_len,
|
||||
uint8_t *value, size_t value_len,
|
||||
void *argp),
|
||||
void *argp);
|
||||
|
||||
// Iterates over the peer's HTTP/3 settings.
|
||||
//
|
||||
// The `cb` callback will be called for each setting in `conn`.
|
||||
// If `cb` returns any value other than `0`, processing will be interrupted and
|
||||
// the value is returned to the caller.
|
||||
int quiche_h3_for_each_setting(quiche_h3_conn *conn,
|
||||
int (*cb)(uint64_t identifier,
|
||||
uint64_t value, void *argp),
|
||||
void *argp);
|
||||
|
||||
// Check whether data will follow the headers on the stream.
|
||||
bool quiche_h3_event_headers_has_body(quiche_h3_event *ev);
|
||||
|
||||
// Check whether or not extended connection is enabled by the peer
|
||||
bool quiche_h3_extended_connect_enabled_by_peer(quiche_h3_conn *conn);
|
||||
|
||||
// Frees the HTTP/3 event object.
|
||||
void quiche_h3_event_free(quiche_h3_event *ev);
|
||||
|
||||
typedef struct {
|
||||
const uint8_t *name;
|
||||
size_t name_len;
|
||||
|
||||
const uint8_t *value;
|
||||
size_t value_len;
|
||||
} quiche_h3_header;
|
||||
|
||||
// Extensible Priorities parameters.
|
||||
typedef struct {
|
||||
uint8_t urgency;
|
||||
bool incremental;
|
||||
} quiche_h3_priority;
|
||||
|
||||
// Sends an HTTP/3 request.
|
||||
int64_t quiche_h3_send_request(quiche_h3_conn *conn, quiche_conn *quic_conn,
|
||||
quiche_h3_header *headers, size_t headers_len,
|
||||
bool fin);
|
||||
|
||||
// Sends an HTTP/3 response on the specified stream with default priority.
|
||||
int quiche_h3_send_response(quiche_h3_conn *conn, quiche_conn *quic_conn,
|
||||
uint64_t stream_id, quiche_h3_header *headers,
|
||||
size_t headers_len, bool fin);
|
||||
|
||||
// Sends an HTTP/3 response on the specified stream with specified priority.
|
||||
int quiche_h3_send_response_with_priority(quiche_h3_conn *conn,
|
||||
quiche_conn *quic_conn, uint64_t stream_id,
|
||||
quiche_h3_header *headers, size_t headers_len,
|
||||
quiche_h3_priority *priority, bool fin);
|
||||
|
||||
// Sends an HTTP/3 body chunk on the given stream.
|
||||
ssize_t quiche_h3_send_body(quiche_h3_conn *conn, quiche_conn *quic_conn,
|
||||
uint64_t stream_id, uint8_t *body, size_t body_len,
|
||||
bool fin);
|
||||
|
||||
// Reads request or response body data into the provided buffer.
|
||||
ssize_t quiche_h3_recv_body(quiche_h3_conn *conn, quiche_conn *quic_conn,
|
||||
uint64_t stream_id, uint8_t *out, size_t out_len);
|
||||
|
||||
// Try to parse an Extensible Priority field value.
|
||||
int quiche_h3_parse_extensible_priority(uint8_t *priority,
|
||||
size_t priority_len,
|
||||
quiche_h3_priority *parsed);
|
||||
|
||||
/// Sends a PRIORITY_UPDATE frame on the control stream with specified
|
||||
/// request stream ID and priority.
|
||||
int quiche_h3_send_priority_update_for_request(quiche_h3_conn *conn,
|
||||
quiche_conn *quic_conn,
|
||||
uint64_t stream_id,
|
||||
quiche_h3_priority *priority);
|
||||
|
||||
// Take the last received PRIORITY_UPDATE frame for a stream.
|
||||
//
|
||||
// The `cb` callback will be called once. `cb` should check the validity of
|
||||
// priority field value contents. If `cb` returns any value other than `0`,
|
||||
// processing will be interrupted and the value is returned to the caller.
|
||||
int quiche_h3_take_last_priority_update(quiche_h3_conn *conn,
|
||||
uint64_t prioritized_element_id,
|
||||
int (*cb)(uint8_t *priority_field_value,
|
||||
uint64_t priority_field_value_len,
|
||||
void *argp),
|
||||
void *argp);
|
||||
|
||||
// Returns whether the peer enabled HTTP/3 DATAGRAM frame support.
|
||||
bool quiche_h3_dgram_enabled_by_peer(quiche_h3_conn *conn,
|
||||
quiche_conn *quic_conn);
|
||||
|
||||
// Writes data to the DATAGRAM send queue.
|
||||
ssize_t quiche_h3_send_dgram(quiche_h3_conn *conn, quiche_conn *quic_conn,
|
||||
uint64_t flow_id, uint8_t *data, size_t data_len);
|
||||
|
||||
// Reads data from the DATAGRAM receive queue.
|
||||
ssize_t quiche_h3_recv_dgram(quiche_h3_conn *conn, quiche_conn *quic_conn,
|
||||
uint64_t *flow_id, size_t *flow_id_len,
|
||||
uint8_t *out, size_t out_len);
|
||||
|
||||
// Frees the HTTP/3 connection object.
|
||||
void quiche_h3_conn_free(quiche_h3_conn *conn);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
} // extern C
|
||||
#endif
|
||||
|
||||
#endif // QUICHE_H
|
@ -1,357 +0,0 @@
|
||||
# ################################################################
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under both the BSD-style license (found in the
|
||||
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
# in the COPYING file in the root directory of this source tree).
|
||||
# You may select, at your option, one of the above-listed licenses.
|
||||
# ################################################################
|
||||
|
||||
# Modules
|
||||
ZSTD_LIB_COMPRESSION ?= 1
|
||||
ZSTD_LIB_DECOMPRESSION ?= 1
|
||||
ZSTD_LIB_DICTBUILDER ?= 1
|
||||
ZSTD_LIB_DEPRECATED ?= 0
|
||||
|
||||
# Input variables for libzstd.mk
|
||||
ifeq ($(ZSTD_LIB_COMPRESSION), 0)
|
||||
ZSTD_LIB_DICTBUILDER = 0
|
||||
ZSTD_LIB_DEPRECATED = 0
|
||||
endif
|
||||
|
||||
ifeq ($(ZSTD_LIB_DECOMPRESSION), 0)
|
||||
ZSTD_LEGACY_SUPPORT = 0
|
||||
ZSTD_LIB_DEPRECATED = 0
|
||||
endif
|
||||
|
||||
include libzstd.mk
|
||||
|
||||
ZSTD_FILES := $(ZSTD_COMMON_FILES) $(ZSTD_LEGACY_FILES)
|
||||
|
||||
ifneq ($(ZSTD_LIB_COMPRESSION), 0)
|
||||
ZSTD_FILES += $(ZSTD_COMPRESS_FILES)
|
||||
endif
|
||||
|
||||
ifneq ($(ZSTD_LIB_DECOMPRESSION), 0)
|
||||
ZSTD_FILES += $(ZSTD_DECOMPRESS_FILES)
|
||||
endif
|
||||
|
||||
ifneq ($(ZSTD_LIB_DEPRECATED), 0)
|
||||
ZSTD_FILES += $(ZSTD_DEPRECATED_FILES)
|
||||
endif
|
||||
|
||||
ifneq ($(ZSTD_LIB_DICTBUILDER), 0)
|
||||
ZSTD_FILES += $(ZSTD_DICTBUILDER_FILES)
|
||||
endif
|
||||
|
||||
ZSTD_LOCAL_SRC := $(notdir $(ZSTD_FILES))
|
||||
ZSTD_LOCAL_OBJ0 := $(ZSTD_LOCAL_SRC:.c=.o)
|
||||
ZSTD_LOCAL_OBJ := $(ZSTD_LOCAL_OBJ0:.S=.o)
|
||||
|
||||
VERSION := $(ZSTD_VERSION)
|
||||
|
||||
# Note: by default, the static library is built single-threaded and dynamic library is built
|
||||
# multi-threaded. It is possible to force multi or single threaded builds by appending
|
||||
# -mt or -nomt to the build target (like lib-mt for multi-threaded, lib-nomt for single-threaded).
|
||||
.PHONY: default
|
||||
default: lib-release
|
||||
|
||||
CPPFLAGS_DYNLIB += -DZSTD_MULTITHREAD # dynamic library build defaults to multi-threaded
|
||||
LDFLAGS_DYNLIB += -pthread
|
||||
CPPFLAGS_STATLIB += # static library build defaults to single-threaded
|
||||
|
||||
|
||||
ifeq ($(findstring GCC,$(CCVER)),GCC)
|
||||
decompress/zstd_decompress_block.o : CFLAGS+=-fno-tree-vectorize
|
||||
endif
|
||||
|
||||
|
||||
# macOS linker doesn't support -soname, and use different extension
|
||||
# see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html
|
||||
ifeq ($(UNAME), Darwin)
|
||||
SHARED_EXT = dylib
|
||||
SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT)
|
||||
SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT)
|
||||
SONAME_FLAGS = -install_name $(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER)
|
||||
else
|
||||
ifeq ($(UNAME), AIX)
|
||||
SONAME_FLAGS =
|
||||
else
|
||||
SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR)
|
||||
endif
|
||||
SHARED_EXT = so
|
||||
SHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR)
|
||||
SHARED_EXT_VER = $(SHARED_EXT).$(LIBVER)
|
||||
endif
|
||||
|
||||
|
||||
.PHONY: all
|
||||
all: lib
|
||||
|
||||
|
||||
.PHONY: libzstd.a # must be run every time
|
||||
libzstd.a: CPPFLAGS += $(CPPFLAGS_STATLIB)
|
||||
|
||||
SET_CACHE_DIRECTORY = \
|
||||
+$(MAKE) --no-print-directory $@ \
|
||||
BUILD_DIR=obj/$(HASH_DIR) \
|
||||
CPPFLAGS="$(CPPFLAGS)" \
|
||||
CFLAGS="$(CFLAGS)" \
|
||||
LDFLAGS="$(LDFLAGS)"
|
||||
|
||||
ifndef BUILD_DIR
|
||||
# determine BUILD_DIR from compilation flags
|
||||
|
||||
libzstd.a:
|
||||
$(SET_CACHE_DIRECTORY)
|
||||
|
||||
else
|
||||
# BUILD_DIR is defined
|
||||
|
||||
ZSTD_STATLIB_DIR := $(BUILD_DIR)/static
|
||||
ZSTD_STATLIB := $(ZSTD_STATLIB_DIR)/libzstd.a
|
||||
ZSTD_STATLIB_OBJ := $(addprefix $(ZSTD_STATLIB_DIR)/,$(ZSTD_LOCAL_OBJ))
|
||||
$(ZSTD_STATLIB): ARFLAGS = rcs
|
||||
$(ZSTD_STATLIB): | $(ZSTD_STATLIB_DIR)
|
||||
$(ZSTD_STATLIB): $(ZSTD_STATLIB_OBJ)
|
||||
# Check for multithread flag at target execution time
|
||||
$(if $(filter -DZSTD_MULTITHREAD,$(CPPFLAGS)),\
|
||||
@echo compiling multi-threaded static library $(LIBVER),\
|
||||
@echo compiling single-threaded static library $(LIBVER))
|
||||
$(AR) $(ARFLAGS) $@ $^
|
||||
|
||||
libzstd.a: $(ZSTD_STATLIB)
|
||||
cp -f $< $@
|
||||
|
||||
endif
|
||||
|
||||
ifneq (,$(filter Windows%,$(TARGET_SYSTEM)))
|
||||
|
||||
LIBZSTD = dll/libzstd.dll
|
||||
$(LIBZSTD): $(ZSTD_FILES)
|
||||
@echo compiling dynamic library $(LIBVER)
|
||||
$(CC) $(FLAGS) -DZSTD_DLL_EXPORT=1 -Wl,--out-implib,dll/libzstd.dll.a -shared $^ -o $@
|
||||
|
||||
else # not Windows
|
||||
|
||||
LIBZSTD = libzstd.$(SHARED_EXT_VER)
|
||||
.PHONY: $(LIBZSTD) # must be run every time
|
||||
$(LIBZSTD): CPPFLAGS += $(CPPFLAGS_DYNLIB)
|
||||
$(LIBZSTD): CFLAGS += -fPIC -fvisibility=hidden
|
||||
$(LIBZSTD): LDFLAGS += -shared $(LDFLAGS_DYNLIB)
|
||||
|
||||
ifndef BUILD_DIR
|
||||
# determine BUILD_DIR from compilation flags
|
||||
|
||||
$(LIBZSTD):
|
||||
$(SET_CACHE_DIRECTORY)
|
||||
|
||||
else
|
||||
# BUILD_DIR is defined
|
||||
|
||||
ZSTD_DYNLIB_DIR := $(BUILD_DIR)/dynamic
|
||||
ZSTD_DYNLIB := $(ZSTD_DYNLIB_DIR)/$(LIBZSTD)
|
||||
ZSTD_DYNLIB_OBJ := $(addprefix $(ZSTD_DYNLIB_DIR)/,$(ZSTD_LOCAL_OBJ))
|
||||
|
||||
$(ZSTD_DYNLIB): | $(ZSTD_DYNLIB_DIR)
|
||||
$(ZSTD_DYNLIB): $(ZSTD_DYNLIB_OBJ)
|
||||
# Check for multithread flag at target execution time
|
||||
$(if $(filter -DZSTD_MULTITHREAD,$(CPPFLAGS)),\
|
||||
@echo compiling multi-threaded dynamic library $(LIBVER),\
|
||||
@echo compiling single-threaded dynamic library $(LIBVER))
|
||||
$(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@
|
||||
@echo creating versioned links
|
||||
ln -sf $@ libzstd.$(SHARED_EXT_MAJOR)
|
||||
ln -sf $@ libzstd.$(SHARED_EXT)
|
||||
|
||||
$(LIBZSTD): $(ZSTD_DYNLIB)
|
||||
cp -f $< $@
|
||||
|
||||
endif # ifndef BUILD_DIR
|
||||
endif # if windows
|
||||
|
||||
.PHONY: libzstd
|
||||
libzstd : $(LIBZSTD)
|
||||
|
||||
.PHONY: lib
|
||||
lib : libzstd.a libzstd
|
||||
|
||||
|
||||
# note : do not define lib-mt or lib-release as .PHONY
|
||||
# make does not consider implicit pattern rule for .PHONY target
|
||||
|
||||
%-mt : CPPFLAGS_DYNLIB := -DZSTD_MULTITHREAD
|
||||
%-mt : CPPFLAGS_STATLIB := -DZSTD_MULTITHREAD
|
||||
%-mt : LDFLAGS_DYNLIB := -pthread
|
||||
%-mt : %
|
||||
@echo multi-threaded build completed
|
||||
|
||||
%-nomt : CPPFLAGS_DYNLIB :=
|
||||
%-nomt : LDFLAGS_DYNLIB :=
|
||||
%-nomt : CPPFLAGS_STATLIB :=
|
||||
%-nomt : %
|
||||
@echo single-threaded build completed
|
||||
|
||||
%-release : DEBUGFLAGS :=
|
||||
%-release : %
|
||||
@echo release build completed
|
||||
|
||||
|
||||
# Generate .h dependencies automatically
|
||||
|
||||
DEPFLAGS = -MT $@ -MMD -MP -MF
|
||||
|
||||
$(ZSTD_DYNLIB_DIR)/%.o : %.c $(ZSTD_DYNLIB_DIR)/%.d | $(ZSTD_DYNLIB_DIR)
|
||||
@echo CC $@
|
||||
$(COMPILE.c) $(DEPFLAGS) $(ZSTD_DYNLIB_DIR)/$*.d $(OUTPUT_OPTION) $<
|
||||
|
||||
$(ZSTD_STATLIB_DIR)/%.o : %.c $(ZSTD_STATLIB_DIR)/%.d | $(ZSTD_STATLIB_DIR)
|
||||
@echo CC $@
|
||||
$(COMPILE.c) $(DEPFLAGS) $(ZSTD_STATLIB_DIR)/$*.d $(OUTPUT_OPTION) $<
|
||||
|
||||
$(ZSTD_DYNLIB_DIR)/%.o : %.S | $(ZSTD_DYNLIB_DIR)
|
||||
@echo AS $@
|
||||
$(COMPILE.S) $(OUTPUT_OPTION) $<
|
||||
|
||||
$(ZSTD_STATLIB_DIR)/%.o : %.S | $(ZSTD_STATLIB_DIR)
|
||||
@echo AS $@
|
||||
$(COMPILE.S) $(OUTPUT_OPTION) $<
|
||||
|
||||
MKDIR ?= mkdir
|
||||
$(BUILD_DIR) $(ZSTD_DYNLIB_DIR) $(ZSTD_STATLIB_DIR):
|
||||
$(MKDIR) -p $@
|
||||
|
||||
DEPFILES := $(ZSTD_DYNLIB_OBJ:.o=.d) $(ZSTD_STATLIB_OBJ:.o=.d)
|
||||
$(DEPFILES):
|
||||
|
||||
include $(wildcard $(DEPFILES))
|
||||
|
||||
|
||||
# Special case : building library in single-thread mode _and_ without zstdmt_compress.c
|
||||
ZSTDMT_FILES = compress/zstdmt_compress.c
|
||||
ZSTD_NOMT_FILES = $(filter-out $(ZSTDMT_FILES),$(ZSTD_FILES))
|
||||
libzstd-nomt: CFLAGS += -fPIC -fvisibility=hidden
|
||||
libzstd-nomt: LDFLAGS += -shared
|
||||
libzstd-nomt: $(ZSTD_NOMT_FILES)
|
||||
@echo compiling single-thread dynamic library $(LIBVER)
|
||||
@echo files : $(ZSTD_NOMT_FILES)
|
||||
$(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(RM) -r *.dSYM # macOS-specific
|
||||
$(RM) core *.o *.a *.gcda *.$(SHARED_EXT) *.$(SHARED_EXT).* libzstd.pc
|
||||
$(RM) dll/libzstd.dll dll/libzstd.lib libzstd-nomt*
|
||||
$(RM) -r obj/*
|
||||
@echo Cleaning library completed
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# make install is validated only for below listed environments
|
||||
#-----------------------------------------------------------------------------
|
||||
ifneq (,$(filter $(UNAME),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX))
|
||||
|
||||
lib: libzstd.pc
|
||||
|
||||
HAS_EXPLICIT_EXEC_PREFIX := $(if $(or $(EXEC_PREFIX),$(exec_prefix)),1,)
|
||||
|
||||
DESTDIR ?=
|
||||
# directory variables : GNU conventions prefer lowercase
|
||||
# see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html
|
||||
# support both lower and uppercase (BSD), use uppercase in script
|
||||
prefix ?= /usr/local
|
||||
PREFIX ?= $(prefix)
|
||||
exec_prefix ?= $(PREFIX)
|
||||
EXEC_PREFIX ?= $(exec_prefix)
|
||||
libdir ?= $(EXEC_PREFIX)/lib
|
||||
LIBDIR ?= $(libdir)
|
||||
includedir ?= $(PREFIX)/include
|
||||
INCLUDEDIR ?= $(includedir)
|
||||
|
||||
PCINCDIR := $(patsubst $(PREFIX)%,%,$(INCLUDEDIR))
|
||||
PCLIBDIR := $(patsubst $(EXEC_PREFIX)%,%,$(LIBDIR))
|
||||
|
||||
# If we successfully stripped off a prefix, we'll add a reference to the
|
||||
# relevant pc variable.
|
||||
PCINCPREFIX := $(if $(findstring $(INCLUDEDIR),$(PCINCDIR)),,$${prefix})
|
||||
PCLIBPREFIX := $(if $(findstring $(LIBDIR),$(PCLIBDIR)),,$${exec_prefix})
|
||||
|
||||
# If no explicit EXEC_PREFIX was set by the caller, write it out as a reference
|
||||
# to PREFIX, rather than as a resolved value.
|
||||
PCEXEC_PREFIX := $(if $(HAS_EXPLICIT_EXEC_PREFIX),$(EXEC_PREFIX),$${prefix})
|
||||
|
||||
ifneq (,$(filter $(UNAME),FreeBSD NetBSD DragonFly))
|
||||
PKGCONFIGDIR ?= $(PREFIX)/libdata/pkgconfig
|
||||
else
|
||||
PKGCONFIGDIR ?= $(LIBDIR)/pkgconfig
|
||||
endif
|
||||
|
||||
ifneq (,$(filter $(UNAME),SunOS))
|
||||
INSTALL ?= ginstall
|
||||
else
|
||||
INSTALL ?= install
|
||||
endif
|
||||
|
||||
INSTALL_PROGRAM ?= $(INSTALL)
|
||||
INSTALL_DATA ?= $(INSTALL) -m 644
|
||||
|
||||
|
||||
libzstd.pc: libzstd.pc.in
|
||||
@echo creating pkgconfig
|
||||
@sed \
|
||||
-e 's|@PREFIX@|$(PREFIX)|' \
|
||||
-e 's|@EXEC_PREFIX@|$(PCEXEC_PREFIX)|' \
|
||||
-e 's|@INCLUDEDIR@|$(PCINCPREFIX)$(PCINCDIR)|' \
|
||||
-e 's|@LIBDIR@|$(PCLIBPREFIX)$(PCLIBDIR)|' \
|
||||
-e 's|@VERSION@|$(VERSION)|' \
|
||||
-e 's|@LIBS_PRIVATE@|$(LDFLAGS_DYNLIB)|' \
|
||||
$< >$@
|
||||
|
||||
.PHONY: install
|
||||
install: install-pc install-static install-shared install-includes
|
||||
@echo zstd static and shared library installed
|
||||
|
||||
.PHONY: install-pc
|
||||
install-pc: libzstd.pc
|
||||
[ -e $(DESTDIR)$(PKGCONFIGDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(PKGCONFIGDIR)/
|
||||
$(INSTALL_DATA) libzstd.pc $(DESTDIR)$(PKGCONFIGDIR)/
|
||||
|
||||
.PHONY: install-static
|
||||
install-static:
|
||||
# only generate libzstd.a if it's not already present
|
||||
[ -e libzstd.a ] || $(MAKE) libzstd.a-release
|
||||
[ -e $(DESTDIR)$(LIBDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/
|
||||
@echo Installing static library
|
||||
$(INSTALL_DATA) libzstd.a $(DESTDIR)$(LIBDIR)
|
||||
|
||||
.PHONY: install-shared
|
||||
install-shared:
|
||||
# only generate libzstd.so if it's not already present
|
||||
[ -e $(LIBZSTD) ] || $(MAKE) libzstd-release
|
||||
[ -e $(DESTDIR)$(LIBDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/
|
||||
@echo Installing shared library
|
||||
$(INSTALL_PROGRAM) $(LIBZSTD) $(DESTDIR)$(LIBDIR)
|
||||
ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR)
|
||||
ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT)
|
||||
|
||||
.PHONY: install-includes
|
||||
install-includes:
|
||||
[ -e $(DESTDIR)$(INCLUDEDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(INCLUDEDIR)/
|
||||
@echo Installing includes
|
||||
$(INSTALL_DATA) zstd.h $(DESTDIR)$(INCLUDEDIR)
|
||||
$(INSTALL_DATA) zstd_errors.h $(DESTDIR)$(INCLUDEDIR)
|
||||
$(INSTALL_DATA) zdict.h $(DESTDIR)$(INCLUDEDIR)
|
||||
|
||||
.PHONY: uninstall
|
||||
uninstall:
|
||||
$(RM) $(DESTDIR)$(LIBDIR)/libzstd.a
|
||||
$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT)
|
||||
$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR)
|
||||
$(RM) $(DESTDIR)$(LIBDIR)/$(LIBZSTD)
|
||||
$(RM) $(DESTDIR)$(PKGCONFIGDIR)/libzstd.pc
|
||||
$(RM) $(DESTDIR)$(INCLUDEDIR)/zstd.h
|
||||
$(RM) $(DESTDIR)$(INCLUDEDIR)/zstd_errors.h
|
||||
$(RM) $(DESTDIR)$(INCLUDEDIR)/zdict.h
|
||||
@echo zstd libraries successfully uninstalled
|
||||
|
||||
endif
|
@ -1,55 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* This file provides custom allocation primitives
|
||||
*/
|
||||
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
|
||||
|
||||
#include "mem.h" /* MEM_STATIC */
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "../zstd.h" /* ZSTD_customMem */
|
||||
|
||||
#ifndef ZSTD_ALLOCATIONS_H
|
||||
#define ZSTD_ALLOCATIONS_H
|
||||
|
||||
/* custom memory allocation functions */
|
||||
|
||||
MEM_STATIC void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc)
|
||||
return customMem.customAlloc(customMem.opaque, size);
|
||||
return ZSTD_malloc(size);
|
||||
}
|
||||
|
||||
MEM_STATIC void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc) {
|
||||
/* calloc implemented as malloc+memset;
|
||||
* not as efficient as calloc, but next best guess for custom malloc */
|
||||
void* const ptr = customMem.customAlloc(customMem.opaque, size);
|
||||
ZSTD_memset(ptr, 0, size);
|
||||
return ptr;
|
||||
}
|
||||
return ZSTD_calloc(1, size);
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
|
||||
{
|
||||
if (ptr!=NULL) {
|
||||
if (customMem.customFree)
|
||||
customMem.customFree(customMem.opaque, ptr);
|
||||
else
|
||||
ZSTD_free(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ZSTD_ALLOCATIONS_H */
|
@ -1,175 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_BITS_H
|
||||
#define ZSTD_BITS_H
|
||||
|
||||
#include "mem.h"
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
{
|
||||
static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3,
|
||||
30, 22, 20, 15, 25, 17, 4, 8,
|
||||
31, 27, 13, 23, 21, 19, 16, 7,
|
||||
26, 12, 18, 6, 11, 5, 10, 9};
|
||||
return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >> 27];
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
# if defined(_MSC_VER)
|
||||
# if STATIC_BMI2 == 1
|
||||
return (unsigned)_tzcnt_u32(val);
|
||||
# else
|
||||
if (val != 0) {
|
||||
unsigned long r;
|
||||
_BitScanForward(&r, val);
|
||||
return (unsigned)r;
|
||||
} else {
|
||||
/* Should not reach this code path */
|
||||
__assume(0);
|
||||
}
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (unsigned)__builtin_ctz(val);
|
||||
# else
|
||||
return ZSTD_countTrailingZeros32_fallback(val);
|
||||
# endif
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) {
|
||||
assert(val != 0);
|
||||
{
|
||||
static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29,
|
||||
11, 14, 16, 18, 22, 25, 3, 30,
|
||||
8, 12, 20, 28, 15, 17, 24, 7,
|
||||
19, 27, 23, 6, 26, 5, 4, 31};
|
||||
val |= val >> 1;
|
||||
val |= val >> 2;
|
||||
val |= val >> 4;
|
||||
val |= val >> 8;
|
||||
val |= val >> 16;
|
||||
return 31 - DeBruijnClz[(val * 0x07C4ACDDU) >> 27];
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
# if defined(_MSC_VER)
|
||||
# if STATIC_BMI2 == 1
|
||||
return (unsigned)_lzcnt_u32(val);
|
||||
# else
|
||||
if (val != 0) {
|
||||
unsigned long r;
|
||||
_BitScanReverse(&r, val);
|
||||
return (unsigned)(31 - r);
|
||||
} else {
|
||||
/* Should not reach this code path */
|
||||
__assume(0);
|
||||
}
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (unsigned)__builtin_clz(val);
|
||||
# else
|
||||
return ZSTD_countLeadingZeros32_fallback(val);
|
||||
# endif
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
# if defined(_MSC_VER) && defined(_WIN64)
|
||||
# if STATIC_BMI2 == 1
|
||||
return (unsigned)_tzcnt_u64(val);
|
||||
# else
|
||||
if (val != 0) {
|
||||
unsigned long r;
|
||||
_BitScanForward64(&r, val);
|
||||
return (unsigned)r;
|
||||
} else {
|
||||
/* Should not reach this code path */
|
||||
__assume(0);
|
||||
}
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__)
|
||||
return (unsigned)__builtin_ctzll(val);
|
||||
# else
|
||||
{
|
||||
U32 mostSignificantWord = (U32)(val >> 32);
|
||||
U32 leastSignificantWord = (U32)val;
|
||||
if (leastSignificantWord == 0) {
|
||||
return 32 + ZSTD_countTrailingZeros32(mostSignificantWord);
|
||||
} else {
|
||||
return ZSTD_countTrailingZeros32(leastSignificantWord);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
# if defined(_MSC_VER) && defined(_WIN64)
|
||||
# if STATIC_BMI2 == 1
|
||||
return (unsigned)_lzcnt_u64(val);
|
||||
# else
|
||||
if (val != 0) {
|
||||
unsigned long r;
|
||||
_BitScanReverse64(&r, val);
|
||||
return (unsigned)(63 - r);
|
||||
} else {
|
||||
/* Should not reach this code path */
|
||||
__assume(0);
|
||||
}
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (unsigned)(__builtin_clzll(val));
|
||||
# else
|
||||
{
|
||||
U32 mostSignificantWord = (U32)(val >> 32);
|
||||
U32 leastSignificantWord = (U32)val;
|
||||
if (mostSignificantWord == 0) {
|
||||
return 32 + ZSTD_countLeadingZeros32(leastSignificantWord);
|
||||
} else {
|
||||
return ZSTD_countLeadingZeros32(mostSignificantWord);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val)
|
||||
{
|
||||
if (MEM_isLittleEndian()) {
|
||||
if (MEM_64bits()) {
|
||||
return ZSTD_countTrailingZeros64((U64)val) >> 3;
|
||||
} else {
|
||||
return ZSTD_countTrailingZeros32((U32)val) >> 3;
|
||||
}
|
||||
} else { /* Big Endian CPU */
|
||||
if (MEM_64bits()) {
|
||||
return ZSTD_countLeadingZeros64((U64)val) >> 3;
|
||||
} else {
|
||||
return ZSTD_countLeadingZeros32((U32)val) >> 3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
|
||||
{
|
||||
assert(val != 0);
|
||||
return 31 - ZSTD_countLeadingZeros32(val);
|
||||
}
|
||||
|
||||
#endif /* ZSTD_BITS_H */
|
@ -1,437 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* bitstream
|
||||
* Part of FSE library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
#ifndef BITSTREAM_H_MODULE
|
||||
#define BITSTREAM_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
/*
|
||||
* This API consists of small unitary functions, which must be inlined for best performance.
|
||||
* Since link-time-optimization is not available for all compilers,
|
||||
* these functions are defined into a .h to be included.
|
||||
*/
|
||||
|
||||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include "mem.h" /* unaligned access routines */
|
||||
#include "compiler.h" /* UNLIKELY() */
|
||||
#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
|
||||
#include "error_private.h" /* error codes and messages */
|
||||
#include "bits.h" /* ZSTD_highbit32 */
|
||||
|
||||
|
||||
/*=========================================
|
||||
* Target specific
|
||||
=========================================*/
|
||||
#ifndef ZSTD_NO_INTRINSICS
|
||||
# if (defined(__BMI__) || defined(__BMI2__)) && defined(__GNUC__)
|
||||
# include <immintrin.h> /* support for bextr (experimental)/bzhi */
|
||||
# elif defined(__ICCARM__)
|
||||
# include <intrinsics.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define STREAM_ACCUMULATOR_MIN_32 25
|
||||
#define STREAM_ACCUMULATOR_MIN_64 57
|
||||
#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
|
||||
|
||||
|
||||
/*-******************************************
|
||||
* bitStream encoding API (write forward)
|
||||
********************************************/
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
typedef struct {
|
||||
size_t bitContainer;
|
||||
unsigned bitPos;
|
||||
char* startPtr;
|
||||
char* ptr;
|
||||
char* endPtr;
|
||||
} BIT_CStream_t;
|
||||
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
|
||||
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
|
||||
|
||||
/* Start with initCStream, providing the size of buffer to write into.
|
||||
* bitStream will never write outside of this buffer.
|
||||
* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
|
||||
*
|
||||
* bits are first added to a local register.
|
||||
* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
|
||||
* Writing data into memory is an explicit operation, performed by the flushBits function.
|
||||
* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
|
||||
* After a flushBits, a maximum of 7 bits might still be stored into local register.
|
||||
*
|
||||
* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
|
||||
*
|
||||
* Last operation is to close the bitStream.
|
||||
* The function returns the final size of CStream in bytes.
|
||||
* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
|
||||
*/
|
||||
|
||||
|
||||
/*-********************************************
|
||||
* bitStream decoding API (read backward)
|
||||
**********************************************/
|
||||
typedef struct {
|
||||
size_t bitContainer;
|
||||
unsigned bitsConsumed;
|
||||
const char* ptr;
|
||||
const char* start;
|
||||
const char* limitPtr;
|
||||
} BIT_DStream_t;
|
||||
|
||||
typedef enum { BIT_DStream_unfinished = 0,
|
||||
BIT_DStream_endOfBuffer = 1,
|
||||
BIT_DStream_completed = 2,
|
||||
BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
|
||||
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
|
||||
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
|
||||
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
|
||||
|
||||
|
||||
/* Start by invoking BIT_initDStream().
|
||||
* A chunk of the bitStream is then stored into a local register.
|
||||
* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
|
||||
* You can then retrieve bitFields stored into the local register, **in reverse order**.
|
||||
* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
|
||||
* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
|
||||
* Otherwise, it can be less than that, so proceed accordingly.
|
||||
* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
|
||||
*/
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* unsafe API
|
||||
******************************************/
|
||||
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
|
||||
/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
|
||||
|
||||
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
|
||||
/* unsafe version; does not check buffer overflow */
|
||||
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
|
||||
/* faster, but works only if nbBits >= 1 */
|
||||
|
||||
/*===== Local Constants =====*/
|
||||
static const unsigned BIT_mask[] = {
|
||||
0, 1, 3, 7, 0xF, 0x1F,
|
||||
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
|
||||
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
|
||||
0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
|
||||
0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
|
||||
0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
|
||||
#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
|
||||
|
||||
/*-**************************************************************
|
||||
* bitStream encoding
|
||||
****************************************************************/
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
|
||||
void* startPtr, size_t dstCapacity)
|
||||
{
|
||||
bitC->bitContainer = 0;
|
||||
bitC->bitPos = 0;
|
||||
bitC->startPtr = (char*)startPtr;
|
||||
bitC->ptr = bitC->startPtr;
|
||||
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
|
||||
if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
|
||||
return 0;
|
||||
}
|
||||
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
|
||||
{
|
||||
#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 && !defined(ZSTD_NO_INTRINSICS)
|
||||
return _bzhi_u64(bitContainer, nbBits);
|
||||
#else
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
return bitContainer & BIT_mask[nbBits];
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
* can add up to 31 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
bitC->bitContainer |= BIT_getLowerBits(value, nbBits) << bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_addBitsFast() :
|
||||
* works only if `value` is _clean_,
|
||||
* meaning all high bits above nbBits are 0 */
|
||||
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
assert((value>>nbBits) == 0);
|
||||
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
bitC->bitContainer |= value << bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_flushBitsFast() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* unsafe version; does not check buffer overflow */
|
||||
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
|
||||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
||||
/*! BIT_flushBits() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* safe version; check for buffer overflow, and prevents it.
|
||||
* note : does not signal buffer overflow.
|
||||
* overflow will be revealed later on using BIT_closeCStream() */
|
||||
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
|
||||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
|
||||
{
|
||||
BIT_addBitsFast(bitC, 1, 1); /* endMark */
|
||||
BIT_flushBits(bitC);
|
||||
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
|
||||
return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
|
||||
}
|
||||
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
|
||||
{
|
||||
if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
|
||||
|
||||
bitD->start = (const char*)srcBuffer;
|
||||
bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
|
||||
|
||||
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
|
||||
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
|
||||
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
|
||||
} else {
|
||||
bitD->ptr = bitD->start;
|
||||
bitD->bitContainer = *(const BYTE*)(bitD->start);
|
||||
switch(srcSize)
|
||||
{
|
||||
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
default: break;
|
||||
}
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
|
||||
}
|
||||
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
|
||||
{
|
||||
return bitContainer >> start;
|
||||
}
|
||||
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
|
||||
{
|
||||
U32 const regMask = sizeof(bitContainer)*8 - 1;
|
||||
/* if start > regMask, bitstream is corrupted, and result is undefined */
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
/* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
|
||||
* than accessing memory. When bmi2 instruction is not present, we consider
|
||||
* such cpus old (pre-Haswell, 2013) and their performance is not of that
|
||||
* importance.
|
||||
*/
|
||||
#if defined(__x86_64__) || defined(_M_X86)
|
||||
return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
|
||||
#else
|
||||
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
/* arbitrate between double-shift and shift+mask */
|
||||
#if 1
|
||||
/* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
|
||||
* bitstream is likely corrupted, and result is undefined */
|
||||
return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
|
||||
#else
|
||||
/* this code path is slower on my os-x laptop */
|
||||
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
|
||||
return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
|
||||
assert(nbBits >= 1);
|
||||
return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
|
||||
}
|
||||
|
||||
MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
bitD->bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBits(bitD, nbBits);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBitsFast(bitD, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (UNLIKELY(bitD->ptr < bitD->limitPtr))
|
||||
return BIT_DStream_overflow;
|
||||
assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
|
||||
bitD->ptr -= bitD->bitsConsumed >> 3;
|
||||
bitD->bitsConsumed &= 7;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
|
||||
return BIT_DStream_overflow;
|
||||
|
||||
if (bitD->ptr >= bitD->limitPtr) {
|
||||
return BIT_reloadDStreamFast(bitD);
|
||||
}
|
||||
if (bitD->ptr == bitD->start) {
|
||||
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_completed;
|
||||
}
|
||||
/* start < ptr < limitPtr */
|
||||
{ U32 nbBytes = bitD->bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_unfinished;
|
||||
if (bitD->ptr - nbBytes < bitD->start) {
|
||||
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
|
||||
result = BIT_DStream_endOfBuffer;
|
||||
}
|
||||
bitD->ptr -= nbBytes;
|
||||
bitD->bitsConsumed -= nbBytes*8;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
|
||||
{
|
||||
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* BITSTREAM_H_MODULE */
|
@ -1,354 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPILER_H
|
||||
#define ZSTD_COMPILER_H
|
||||
|
||||
#include "portability_macros.h"
|
||||
|
||||
/*-*******************************************************
|
||||
* Compiler specifics
|
||||
*********************************************************/
|
||||
/* force inlining */
|
||||
|
||||
#if !defined(ZSTD_NO_INLINE)
|
||||
#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# define INLINE_KEYWORD inline
|
||||
#else
|
||||
# define INLINE_KEYWORD
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) || defined(__ICCARM__)
|
||||
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
||||
#elif defined(_MSC_VER)
|
||||
# define FORCE_INLINE_ATTR __forceinline
|
||||
#else
|
||||
# define FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#define INLINE_KEYWORD
|
||||
#define FORCE_INLINE_ATTR
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
|
||||
This explicitly marks such functions as __cdecl so that the code will still compile
|
||||
if a CC other than __cdecl has been made the default.
|
||||
*/
|
||||
#if defined(_MSC_VER)
|
||||
# define WIN_CDECL __cdecl
|
||||
#else
|
||||
# define WIN_CDECL
|
||||
#endif
|
||||
|
||||
/**
|
||||
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
|
||||
* parameters. They must be inlined for the compiler to eliminate the constant
|
||||
* branches.
|
||||
*/
|
||||
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
/**
|
||||
* HINT_INLINE is used to help the compiler generate better code. It is *not*
|
||||
* used for "templates", so it can be tweaked based on the compilers
|
||||
* performance.
|
||||
*
|
||||
* gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
|
||||
* always_inline attribute.
|
||||
*
|
||||
* clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
|
||||
* attribute.
|
||||
*/
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
|
||||
# define HINT_INLINE static INLINE_KEYWORD
|
||||
#else
|
||||
# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
|
||||
#if defined(__GNUC__)
|
||||
# define UNUSED_ATTR __attribute__((unused))
|
||||
#else
|
||||
# define UNUSED_ATTR
|
||||
#endif
|
||||
|
||||
/* force no inlining */
|
||||
#ifdef _MSC_VER
|
||||
# define FORCE_NOINLINE static __declspec(noinline)
|
||||
#else
|
||||
# if defined(__GNUC__) || defined(__ICCARM__)
|
||||
# define FORCE_NOINLINE static __attribute__((__noinline__))
|
||||
# else
|
||||
# define FORCE_NOINLINE static
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/* target attribute */
|
||||
#if defined(__GNUC__) || defined(__ICCARM__)
|
||||
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
|
||||
#else
|
||||
# define TARGET_ATTRIBUTE(target)
|
||||
#endif
|
||||
|
||||
/* Target attribute for BMI2 dynamic dispatch.
|
||||
* Enable lzcnt, bmi, and bmi2.
|
||||
* We test for bmi1 & bmi2. lzcnt is included in bmi1.
|
||||
*/
|
||||
#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
|
||||
|
||||
/* prefetch
|
||||
* can be disabled, by declaring NO_PREFETCH build macro */
|
||||
#if defined(NO_PREFETCH)
|
||||
# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
|
||||
# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
|
||||
#else
|
||||
# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
|
||||
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
|
||||
# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
|
||||
# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
|
||||
# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
|
||||
# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
|
||||
# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
|
||||
# elif defined(__aarch64__)
|
||||
# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
|
||||
# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
|
||||
# else
|
||||
# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
|
||||
# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
|
||||
# endif
|
||||
#endif /* NO_PREFETCH */
|
||||
|
||||
#define CACHELINE_SIZE 64
|
||||
|
||||
#define PREFETCH_AREA(p, s) { \
|
||||
const char* const _ptr = (const char*)(p); \
|
||||
size_t const _size = (size_t)(s); \
|
||||
size_t _pos; \
|
||||
for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
|
||||
PREFETCH_L2(_ptr + _pos); \
|
||||
} \
|
||||
}
|
||||
|
||||
/* vectorization
|
||||
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
|
||||
* and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
|
||||
#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
|
||||
# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
|
||||
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
|
||||
# else
|
||||
# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
|
||||
# endif
|
||||
#else
|
||||
# define DONT_VECTORIZE
|
||||
#endif
|
||||
|
||||
/* Tell the compiler that a branch is likely or unlikely.
|
||||
* Only use these macros if it causes the compiler to generate better code.
|
||||
* If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
|
||||
* and clang, please do.
|
||||
*/
|
||||
#if defined(__GNUC__)
|
||||
#define LIKELY(x) (__builtin_expect((x), 1))
|
||||
#define UNLIKELY(x) (__builtin_expect((x), 0))
|
||||
#else
|
||||
#define LIKELY(x) (x)
|
||||
#define UNLIKELY(x) (x)
|
||||
#endif
|
||||
|
||||
#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
|
||||
# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); }
|
||||
#else
|
||||
# define ZSTD_UNREACHABLE { assert(0); }
|
||||
#endif
|
||||
|
||||
/* disable warnings */
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# include <intrin.h> /* For Visual 2005 */
|
||||
# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
|
||||
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
|
||||
#endif
|
||||
|
||||
/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
|
||||
#ifndef STATIC_BMI2
|
||||
# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
|
||||
# ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2
|
||||
# define STATIC_BMI2 1
|
||||
# endif
|
||||
# elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__)
|
||||
# define STATIC_BMI2 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef STATIC_BMI2
|
||||
#define STATIC_BMI2 0
|
||||
#endif
|
||||
|
||||
/* compile time determination of SIMD support */
|
||||
#if !defined(ZSTD_NO_INTRINSICS)
|
||||
# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2))
|
||||
# define ZSTD_ARCH_X86_SSE2
|
||||
# endif
|
||||
# if defined(__ARM_NEON) || defined(_M_ARM64)
|
||||
# define ZSTD_ARCH_ARM_NEON
|
||||
# endif
|
||||
#
|
||||
# if defined(ZSTD_ARCH_X86_SSE2)
|
||||
# include <emmintrin.h>
|
||||
# elif defined(ZSTD_ARCH_ARM_NEON)
|
||||
# include <arm_neon.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* C-language Attributes are added in C23. */
|
||||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
|
||||
# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
|
||||
#else
|
||||
# define ZSTD_HAS_C_ATTRIBUTE(x) 0
|
||||
#endif
|
||||
|
||||
/* Only use C++ attributes in C++. Some compilers report support for C++
|
||||
* attributes when compiling with C.
|
||||
*/
|
||||
#if defined(__cplusplus) && defined(__has_cpp_attribute)
|
||||
# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
|
||||
#else
|
||||
# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
|
||||
#endif
|
||||
|
||||
/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
|
||||
* - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
|
||||
* - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
|
||||
* - Else: __attribute__((__fallthrough__))
|
||||
*/
|
||||
#ifndef ZSTD_FALLTHROUGH
|
||||
# if ZSTD_HAS_C_ATTRIBUTE(fallthrough)
|
||||
# define ZSTD_FALLTHROUGH [[fallthrough]]
|
||||
# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough)
|
||||
# define ZSTD_FALLTHROUGH [[fallthrough]]
|
||||
# elif __has_attribute(__fallthrough__)
|
||||
/* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon
|
||||
* gcc complains about: a label can only be part of a statement and a declaration is not a statement.
|
||||
*/
|
||||
# define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__))
|
||||
# else
|
||||
# define ZSTD_FALLTHROUGH
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*-**************************************************************
|
||||
* Alignment check
|
||||
*****************************************************************/
|
||||
|
||||
/* this test was initially positioned in mem.h,
|
||||
* but this file is removed (or replaced) for linux kernel
|
||||
* so it's now hosted in compiler.h,
|
||||
* which remains valid for both user & kernel spaces.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_ALIGNOF
|
||||
# if defined(__GNUC__) || defined(_MSC_VER)
|
||||
/* covers gcc, clang & MSVC */
|
||||
/* note : this section must come first, before C11,
|
||||
* due to a limitation in the kernel source generator */
|
||||
# define ZSTD_ALIGNOF(T) __alignof(T)
|
||||
|
||||
# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
|
||||
/* C11 support */
|
||||
# include <stdalign.h>
|
||||
# define ZSTD_ALIGNOF(T) alignof(T)
|
||||
|
||||
# else
|
||||
/* No known support for alignof() - imperfect backup */
|
||||
# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
|
||||
|
||||
# endif
|
||||
#endif /* ZSTD_ALIGNOF */
|
||||
|
||||
/*-**************************************************************
|
||||
* Sanitizer
|
||||
*****************************************************************/
|
||||
|
||||
/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
|
||||
* abundance of caution, disable our custom poisoning on mingw. */
|
||||
#ifdef __MINGW32__
|
||||
#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE
|
||||
#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1
|
||||
#endif
|
||||
#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE
|
||||
#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
||||
* We therefore declare the functions we need ourselves, rather than trying to
|
||||
* include the header file... */
|
||||
#include <stddef.h> /* size_t */
|
||||
#define ZSTD_DEPS_NEED_STDINT
|
||||
#include "zstd_deps.h" /* intptr_t */
|
||||
|
||||
/* Make memory region fully initialized (without changing its contents). */
|
||||
void __msan_unpoison(const volatile void *a, size_t size);
|
||||
|
||||
/* Make memory region fully uninitialized (without changing its contents).
|
||||
This is a legacy interface that does not update origin information. Use
|
||||
__msan_allocated_memory() instead. */
|
||||
void __msan_poison(const volatile void *a, size_t size);
|
||||
|
||||
/* Returns the offset of the first (at least partially) poisoned byte in the
|
||||
memory range, or -1 if the whole range is good. */
|
||||
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
||||
#endif
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
|
||||
* We therefore declare the functions we need ourselves, rather than trying to
|
||||
* include the header file... */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
/**
|
||||
* Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
|
||||
*
|
||||
* This memory must be previously allocated by your program. Instrumented
|
||||
* code is forbidden from accessing addresses in this region until it is
|
||||
* unpoisoned. This function is not guaranteed to poison the entire region -
|
||||
* it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
|
||||
* alignment restrictions.
|
||||
*
|
||||
* \note This function is not thread-safe because no two threads can poison or
|
||||
* unpoison memory in the same memory region simultaneously.
|
||||
*
|
||||
* \param addr Start of memory region.
|
||||
* \param size Size of memory region. */
|
||||
void __asan_poison_memory_region(void const volatile *addr, size_t size);
|
||||
|
||||
/**
|
||||
* Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
|
||||
*
|
||||
* This memory must be previously allocated by your program. Accessing
|
||||
* addresses in this region is allowed until this region is poisoned again.
|
||||
* This function could unpoison a super-region of <c>[addr, addr+size)</c> due
|
||||
* to ASan alignment restrictions.
|
||||
*
|
||||
* \note This function is not thread-safe because no two threads can
|
||||
* poison or unpoison memory in the same memory region simultaneously.
|
||||
*
|
||||
* \param addr Start of memory region.
|
||||
* \param size Size of memory region. */
|
||||
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_COMPILER_H */
|
@ -1,213 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMMON_CPU_H
|
||||
#define ZSTD_COMMON_CPU_H
|
||||
|
||||
/**
|
||||
* Implementation taken from folly/CpuId.h
|
||||
* https://github.com/facebook/folly/blob/master/folly/CpuId.h
|
||||
*/
|
||||
|
||||
#include "mem.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
U32 f1c;
|
||||
U32 f1d;
|
||||
U32 f7b;
|
||||
U32 f7c;
|
||||
} ZSTD_cpuid_t;
|
||||
|
||||
MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
|
||||
U32 f1c = 0;
|
||||
U32 f1d = 0;
|
||||
U32 f7b = 0;
|
||||
U32 f7c = 0;
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
|
||||
int reg[4];
|
||||
__cpuid((int*)reg, 0);
|
||||
{
|
||||
int const n = reg[0];
|
||||
if (n >= 1) {
|
||||
__cpuid((int*)reg, 1);
|
||||
f1c = (U32)reg[2];
|
||||
f1d = (U32)reg[3];
|
||||
}
|
||||
if (n >= 7) {
|
||||
__cpuidex((int*)reg, 7, 0);
|
||||
f7b = (U32)reg[1];
|
||||
f7c = (U32)reg[2];
|
||||
}
|
||||
}
|
||||
#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
|
||||
/* The following block like the normal cpuid branch below, but gcc
|
||||
* reserves ebx for use of its pic register so we must specially
|
||||
* handle the save and restore to avoid clobbering the register
|
||||
*/
|
||||
U32 n;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(n)
|
||||
: "a"(0)
|
||||
: "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
U32 f1a;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(f1a), "=c"(f1c), "=d"(f1d)
|
||||
: "a"(1));
|
||||
}
|
||||
if (n >= 7) {
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"movl %%ebx, %%eax\n\t"
|
||||
"popl %%ebx"
|
||||
: "=a"(f7b), "=c"(f7c)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
|
||||
U32 n;
|
||||
__asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
U32 f1a;
|
||||
__asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
|
||||
}
|
||||
if (n >= 7) {
|
||||
U32 f7a;
|
||||
__asm__("cpuid"
|
||||
: "=a"(f7a), "=b"(f7b), "=c"(f7c)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#endif
|
||||
{
|
||||
ZSTD_cpuid_t cpuid;
|
||||
cpuid.f1c = f1c;
|
||||
cpuid.f1d = f1d;
|
||||
cpuid.f7b = f7b;
|
||||
cpuid.f7c = f7c;
|
||||
return cpuid;
|
||||
}
|
||||
}
|
||||
|
||||
#define X(name, r, bit) \
|
||||
MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
|
||||
return ((cpuid.r) & (1U << bit)) != 0; \
|
||||
}
|
||||
|
||||
/* cpuid(1): Processor Info and Feature Bits. */
|
||||
#define C(name, bit) X(name, f1c, bit)
|
||||
C(sse3, 0)
|
||||
C(pclmuldq, 1)
|
||||
C(dtes64, 2)
|
||||
C(monitor, 3)
|
||||
C(dscpl, 4)
|
||||
C(vmx, 5)
|
||||
C(smx, 6)
|
||||
C(eist, 7)
|
||||
C(tm2, 8)
|
||||
C(ssse3, 9)
|
||||
C(cnxtid, 10)
|
||||
C(fma, 12)
|
||||
C(cx16, 13)
|
||||
C(xtpr, 14)
|
||||
C(pdcm, 15)
|
||||
C(pcid, 17)
|
||||
C(dca, 18)
|
||||
C(sse41, 19)
|
||||
C(sse42, 20)
|
||||
C(x2apic, 21)
|
||||
C(movbe, 22)
|
||||
C(popcnt, 23)
|
||||
C(tscdeadline, 24)
|
||||
C(aes, 25)
|
||||
C(xsave, 26)
|
||||
C(osxsave, 27)
|
||||
C(avx, 28)
|
||||
C(f16c, 29)
|
||||
C(rdrand, 30)
|
||||
#undef C
|
||||
#define D(name, bit) X(name, f1d, bit)
|
||||
D(fpu, 0)
|
||||
D(vme, 1)
|
||||
D(de, 2)
|
||||
D(pse, 3)
|
||||
D(tsc, 4)
|
||||
D(msr, 5)
|
||||
D(pae, 6)
|
||||
D(mce, 7)
|
||||
D(cx8, 8)
|
||||
D(apic, 9)
|
||||
D(sep, 11)
|
||||
D(mtrr, 12)
|
||||
D(pge, 13)
|
||||
D(mca, 14)
|
||||
D(cmov, 15)
|
||||
D(pat, 16)
|
||||
D(pse36, 17)
|
||||
D(psn, 18)
|
||||
D(clfsh, 19)
|
||||
D(ds, 21)
|
||||
D(acpi, 22)
|
||||
D(mmx, 23)
|
||||
D(fxsr, 24)
|
||||
D(sse, 25)
|
||||
D(sse2, 26)
|
||||
D(ss, 27)
|
||||
D(htt, 28)
|
||||
D(tm, 29)
|
||||
D(pbe, 31)
|
||||
#undef D
|
||||
|
||||
/* cpuid(7): Extended Features. */
|
||||
#define B(name, bit) X(name, f7b, bit)
|
||||
B(bmi1, 3)
|
||||
B(hle, 4)
|
||||
B(avx2, 5)
|
||||
B(smep, 7)
|
||||
B(bmi2, 8)
|
||||
B(erms, 9)
|
||||
B(invpcid, 10)
|
||||
B(rtm, 11)
|
||||
B(mpx, 14)
|
||||
B(avx512f, 16)
|
||||
B(avx512dq, 17)
|
||||
B(rdseed, 18)
|
||||
B(adx, 19)
|
||||
B(smap, 20)
|
||||
B(avx512ifma, 21)
|
||||
B(pcommit, 22)
|
||||
B(clflushopt, 23)
|
||||
B(clwb, 24)
|
||||
B(avx512pf, 26)
|
||||
B(avx512er, 27)
|
||||
B(avx512cd, 28)
|
||||
B(sha, 29)
|
||||
B(avx512bw, 30)
|
||||
B(avx512vl, 31)
|
||||
#undef B
|
||||
#define C(name, bit) X(name, f7c, bit)
|
||||
C(prefetchwt1, 0)
|
||||
C(avx512vbmi, 1)
|
||||
#undef C
|
||||
|
||||
#undef X
|
||||
|
||||
#endif /* ZSTD_COMMON_CPU_H */
|
@ -1,24 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* debug
|
||||
* Part of FSE library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/*
|
||||
* This module only hosts one global variable
|
||||
* which can be used to dynamically influence the verbosity of traces,
|
||||
* such as DEBUGLOG and RAWLOG
|
||||
*/
|
||||
|
||||
#include "debug.h"
|
||||
|
||||
int g_debuglevel = DEBUGLEVEL;
|
@ -1,107 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* debug
|
||||
* Part of FSE library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/*
|
||||
* The purpose of this header is to enable debug functions.
|
||||
* They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
|
||||
* and DEBUG_STATIC_ASSERT() for compile-time.
|
||||
*
|
||||
* By default, DEBUGLEVEL==0, which means run-time debug is disabled.
|
||||
*
|
||||
* Level 1 enables assert() only.
|
||||
* Starting level 2, traces can be generated and pushed to stderr.
|
||||
* The higher the level, the more verbose the traces.
|
||||
*
|
||||
* It's possible to dynamically adjust level using variable g_debug_level,
|
||||
* which is only declared if DEBUGLEVEL>=2,
|
||||
* and is a global variable, not multi-thread protected (use with care)
|
||||
*/
|
||||
|
||||
#ifndef DEBUG_H_12987983217
|
||||
#define DEBUG_H_12987983217
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* static assert is triggered at compile time, leaving no runtime artefact.
|
||||
* static assert only works with compile-time constants.
|
||||
* Also, this variant can only be used inside a function. */
|
||||
#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
|
||||
|
||||
|
||||
/* DEBUGLEVEL is expected to be defined externally,
|
||||
* typically through compiler command line.
|
||||
* Value must be a number. */
|
||||
#ifndef DEBUGLEVEL
|
||||
# define DEBUGLEVEL 0
|
||||
#endif
|
||||
|
||||
|
||||
/* recommended values for DEBUGLEVEL :
|
||||
* 0 : release mode, no debug, all run-time checks disabled
|
||||
* 1 : enables assert() only, no display
|
||||
* 2 : reserved, for currently active debug path
|
||||
* 3 : events once per object lifetime (CCtx, CDict, etc.)
|
||||
* 4 : events once per frame
|
||||
* 5 : events once per block
|
||||
* 6 : events once per sequence (verbose)
|
||||
* 7+: events at every position (*very* verbose)
|
||||
*
|
||||
* It's generally inconvenient to output traces > 5.
|
||||
* In which case, it's possible to selectively trigger high verbosity levels
|
||||
* by modifying g_debug_level.
|
||||
*/
|
||||
|
||||
#if (DEBUGLEVEL>=1)
|
||||
# define ZSTD_DEPS_NEED_ASSERT
|
||||
# include "zstd_deps.h"
|
||||
#else
|
||||
# ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
|
||||
# define assert(condition) ((void)0) /* disable assert (default) */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if (DEBUGLEVEL>=2)
|
||||
# define ZSTD_DEPS_NEED_IO
|
||||
# include "zstd_deps.h"
|
||||
extern int g_debuglevel; /* the variable is only declared,
|
||||
it actually lives in debug.c,
|
||||
and is shared by the whole process.
|
||||
It's not thread-safe.
|
||||
It's useful when enabling very verbose levels
|
||||
on selective conditions (such as position in src) */
|
||||
|
||||
# define RAWLOG(l, ...) { \
|
||||
if (l<=g_debuglevel) { \
|
||||
ZSTD_DEBUG_PRINT(__VA_ARGS__); \
|
||||
} }
|
||||
# define DEBUGLOG(l, ...) { \
|
||||
if (l<=g_debuglevel) { \
|
||||
ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
|
||||
ZSTD_DEBUG_PRINT(" \n"); \
|
||||
} }
|
||||
#else
|
||||
# define RAWLOG(l, ...) {} /* disabled */
|
||||
# define DEBUGLOG(l, ...) {} /* disabled */
|
||||
#endif
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* DEBUG_H_12987983217 */
|
@ -1,340 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* Common functions of New Generation Entropy library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "mem.h"
|
||||
#include "error_private.h" /* ERR_*, ERROR */
|
||||
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
|
||||
#include "fse.h"
|
||||
#include "huf.h"
|
||||
#include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */
|
||||
|
||||
|
||||
/*=== Version ===*/
|
||||
unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
|
||||
|
||||
|
||||
/*=== Error Management ===*/
|
||||
unsigned FSE_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
unsigned HUF_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) headerBuffer;
|
||||
const BYTE* const iend = istart + hbSize;
|
||||
const BYTE* ip = istart;
|
||||
int nbBits;
|
||||
int remaining;
|
||||
int threshold;
|
||||
U32 bitStream;
|
||||
int bitCount;
|
||||
unsigned charnum = 0;
|
||||
unsigned const maxSV1 = *maxSVPtr + 1;
|
||||
int previous0 = 0;
|
||||
|
||||
if (hbSize < 8) {
|
||||
/* This function only works when hbSize >= 8 */
|
||||
char buffer[8] = {0};
|
||||
ZSTD_memcpy(buffer, headerBuffer, hbSize);
|
||||
{ size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
|
||||
buffer, sizeof(buffer));
|
||||
if (FSE_isError(countSize)) return countSize;
|
||||
if (countSize > hbSize) return ERROR(corruption_detected);
|
||||
return countSize;
|
||||
} }
|
||||
assert(hbSize >= 8);
|
||||
|
||||
/* init */
|
||||
ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
|
||||
bitStream = MEM_readLE32(ip);
|
||||
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
|
||||
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
|
||||
bitStream >>= 4;
|
||||
bitCount = 4;
|
||||
*tableLogPtr = nbBits;
|
||||
remaining = (1<<nbBits)+1;
|
||||
threshold = 1<<nbBits;
|
||||
nbBits++;
|
||||
|
||||
for (;;) {
|
||||
if (previous0) {
|
||||
/* Count the number of repeats. Each time the
|
||||
* 2-bit repeat code is 0b11 there is another
|
||||
* repeat.
|
||||
* Avoid UB by setting the high bit to 1.
|
||||
*/
|
||||
int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
|
||||
while (repeats >= 12) {
|
||||
charnum += 3 * 12;
|
||||
if (LIKELY(ip <= iend-7)) {
|
||||
ip += 3;
|
||||
} else {
|
||||
bitCount -= (int)(8 * (iend - 7 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
|
||||
}
|
||||
charnum += 3 * repeats;
|
||||
bitStream >>= 2 * repeats;
|
||||
bitCount += 2 * repeats;
|
||||
|
||||
/* Add the final repeat which isn't 0b11. */
|
||||
assert((bitStream & 3) < 3);
|
||||
charnum += bitStream & 3;
|
||||
bitCount += 2;
|
||||
|
||||
/* This is an error, but break and return an error
|
||||
* at the end, because returning out of a loop makes
|
||||
* it harder for the compiler to optimize.
|
||||
*/
|
||||
if (charnum >= maxSV1) break;
|
||||
|
||||
/* We don't need to set the normalized count to 0
|
||||
* because we already memset the whole buffer to 0.
|
||||
*/
|
||||
|
||||
if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
|
||||
assert((bitCount >> 3) <= 3); /* For first condition to work */
|
||||
ip += bitCount>>3;
|
||||
bitCount &= 7;
|
||||
} else {
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
}
|
||||
{
|
||||
int const max = (2*threshold-1) - remaining;
|
||||
int count;
|
||||
|
||||
if ((bitStream & (threshold-1)) < (U32)max) {
|
||||
count = bitStream & (threshold-1);
|
||||
bitCount += nbBits-1;
|
||||
} else {
|
||||
count = bitStream & (2*threshold-1);
|
||||
if (count >= threshold) count -= max;
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--; /* extra accuracy */
|
||||
/* When it matters (small blocks), this is a
|
||||
* predictable branch, because we don't use -1.
|
||||
*/
|
||||
if (count >= 0) {
|
||||
remaining -= count;
|
||||
} else {
|
||||
assert(count == -1);
|
||||
remaining += count;
|
||||
}
|
||||
normalizedCounter[charnum++] = (short)count;
|
||||
previous0 = !count;
|
||||
|
||||
assert(threshold > 1);
|
||||
if (remaining < threshold) {
|
||||
/* This branch can be folded into the
|
||||
* threshold update condition because we
|
||||
* know that threshold > 1.
|
||||
*/
|
||||
if (remaining <= 1) break;
|
||||
nbBits = ZSTD_highbit32(remaining) + 1;
|
||||
threshold = 1 << (nbBits - 1);
|
||||
}
|
||||
if (charnum >= maxSV1) break;
|
||||
|
||||
if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
|
||||
ip += bitCount>>3;
|
||||
bitCount &= 7;
|
||||
} else {
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
} }
|
||||
if (remaining != 1) return ERROR(corruption_detected);
|
||||
/* Only possible when there are too many zeros. */
|
||||
if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
|
||||
if (bitCount > 32) return ERROR(corruption_detected);
|
||||
*maxSVPtr = charnum-1;
|
||||
|
||||
ip += (bitCount+7)>>3;
|
||||
return ip-istart;
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
static size_t FSE_readNCount_body_default(
|
||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
|
||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t FSE_readNCount_bmi2(
|
||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize, int bmi2)
|
||||
{
|
||||
#if DYNAMIC_BMI2
|
||||
if (bmi2) {
|
||||
return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
#endif
|
||||
(void)bmi2;
|
||||
return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
size_t FSE_readNCount(
|
||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
|
||||
return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t wkspSize,
|
||||
int bmi2)
|
||||
{
|
||||
U32 weightTotal;
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
size_t iSize;
|
||||
size_t oSize;
|
||||
|
||||
if (!srcSize) return ERROR(srcSize_wrong);
|
||||
iSize = ip[0];
|
||||
/* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
if (iSize >= 128) { /* special header */
|
||||
oSize = iSize - 127;
|
||||
iSize = ((oSize+1)/2);
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
if (oSize >= hwSize) return ERROR(corruption_detected);
|
||||
ip += 1;
|
||||
{ U32 n;
|
||||
for (n=0; n<oSize; n+=2) {
|
||||
huffWeight[n] = ip[n/2] >> 4;
|
||||
huffWeight[n+1] = ip[n/2] & 15;
|
||||
} } }
|
||||
else { /* header compressed with FSE (normal case) */
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
/* max (hwSize-1) values decoded, as last one is implied */
|
||||
oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
|
||||
if (FSE_isError(oSize)) return oSize;
|
||||
}
|
||||
|
||||
/* collect weight stats */
|
||||
ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
|
||||
weightTotal = 0;
|
||||
{ U32 n; for (n=0; n<oSize; n++) {
|
||||
if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (1 << huffWeight[n]) >> 1;
|
||||
} }
|
||||
if (weightTotal == 0) return ERROR(corruption_detected);
|
||||
|
||||
/* get last non-null symbol weight (implied, total must be 2^n) */
|
||||
{ U32 const tableLog = ZSTD_highbit32(weightTotal) + 1;
|
||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
*tableLogPtr = tableLog;
|
||||
/* determine last weight */
|
||||
{ U32 const total = 1 << tableLog;
|
||||
U32 const rest = total - weightTotal;
|
||||
U32 const verif = 1 << ZSTD_highbit32(rest);
|
||||
U32 const lastWeight = ZSTD_highbit32(rest) + 1;
|
||||
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
|
||||
huffWeight[oSize] = (BYTE)lastWeight;
|
||||
rankStats[lastWeight]++;
|
||||
} }
|
||||
|
||||
/* check tree construction validity */
|
||||
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
|
||||
|
||||
/* results */
|
||||
*nbSymbolsPtr = (U32)(oSize+1);
|
||||
return iSize+1;
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
|
||||
}
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t wkspSize,
|
||||
int flags)
|
||||
{
|
||||
#if DYNAMIC_BMI2
|
||||
if (flags & HUF_flags_bmi2) {
|
||||
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
|
||||
}
|
||||
#endif
|
||||
(void)flags;
|
||||
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
|
||||
}
|
@ -1,63 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* The purpose of this file is to have a single list of error strings embedded in binary */
|
||||
|
||||
#include "error_private.h"
|
||||
|
||||
const char* ERR_getErrorString(ERR_enum code)
|
||||
{
|
||||
#ifdef ZSTD_STRIP_ERROR_STRINGS
|
||||
(void)code;
|
||||
return "Error strings stripped";
|
||||
#else
|
||||
static const char* const notErrorCode = "Unspecified error code";
|
||||
switch( code )
|
||||
{
|
||||
case PREFIX(no_error): return "No error detected";
|
||||
case PREFIX(GENERIC): return "Error (generic)";
|
||||
case PREFIX(prefix_unknown): return "Unknown frame descriptor";
|
||||
case PREFIX(version_unsupported): return "Version not supported";
|
||||
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
|
||||
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
|
||||
case PREFIX(corruption_detected): return "Data corruption detected";
|
||||
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
|
||||
case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification";
|
||||
case PREFIX(parameter_unsupported): return "Unsupported parameter";
|
||||
case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters";
|
||||
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
|
||||
case PREFIX(init_missing): return "Context should be init first";
|
||||
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
|
||||
case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
|
||||
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
|
||||
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
|
||||
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
|
||||
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
|
||||
case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected";
|
||||
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
|
||||
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
|
||||
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
|
||||
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
|
||||
case PREFIX(srcSize_wrong): return "Src size is incorrect";
|
||||
case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
|
||||
case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full";
|
||||
case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty";
|
||||
/* following error codes are not stable and may be removed or changed in a future version */
|
||||
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
|
||||
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
|
||||
case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
|
||||
case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
|
||||
case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code";
|
||||
case PREFIX(externalSequences_invalid): return "External sequences are not valid";
|
||||
case PREFIX(maxCode):
|
||||
default: return notErrorCode;
|
||||
}
|
||||
#endif
|
||||
}
|
@ -1,159 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* Note : this module is expected to remain private, do not expose it */
|
||||
|
||||
#ifndef ERROR_H_MODULE
|
||||
#define ERROR_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include "../zstd_errors.h" /* enum list */
|
||||
#include "compiler.h"
|
||||
#include "debug.h"
|
||||
#include "zstd_deps.h" /* size_t */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Compiler-specific
|
||||
******************************************/
|
||||
#if defined(__GNUC__)
|
||||
# define ERR_STATIC static __attribute__((unused))
|
||||
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define ERR_STATIC static inline
|
||||
#elif defined(_MSC_VER)
|
||||
# define ERR_STATIC static __inline
|
||||
#else
|
||||
# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
#endif
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Customization (error_public.h)
|
||||
******************************************/
|
||||
typedef ZSTD_ErrorCode ERR_enum;
|
||||
#define PREFIX(name) ZSTD_error_##name
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Error codes handling
|
||||
******************************************/
|
||||
#undef ERROR /* already defined on Visual Studio */
|
||||
#define ERROR(name) ZSTD_ERROR(name)
|
||||
#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
|
||||
|
||||
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
|
||||
|
||||
ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
|
||||
|
||||
/* check and forward error code */
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Error Strings
|
||||
******************************************/
|
||||
|
||||
const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
|
||||
|
||||
ERR_STATIC const char* ERR_getErrorName(size_t code)
|
||||
{
|
||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignore: this is an internal helper.
|
||||
*
|
||||
* This is a helper function to help force C99-correctness during compilation.
|
||||
* Under strict compilation modes, variadic macro arguments can't be empty.
|
||||
* However, variadic function arguments can be. Using a function therefore lets
|
||||
* us statically check that at least one (string) argument was passed,
|
||||
* independent of the compilation flags.
|
||||
*/
|
||||
static INLINE_KEYWORD UNUSED_ATTR
|
||||
void _force_has_format_string(const char *format, ...) {
|
||||
(void)format;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignore: this is an internal helper.
|
||||
*
|
||||
* We want to force this function invocation to be syntactically correct, but
|
||||
* we don't want to force runtime evaluation of its arguments.
|
||||
*/
|
||||
#define _FORCE_HAS_FORMAT_STRING(...) \
|
||||
if (0) { \
|
||||
_force_has_format_string(__VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define ERR_QUOTE(str) #str
|
||||
|
||||
/**
|
||||
* Return the specified error if the condition evaluates to true.
|
||||
*
|
||||
* In debug modes, prints additional information.
|
||||
* In order to do that (particularly, printing the conditional that failed),
|
||||
* this can't just wrap RETURN_ERROR().
|
||||
*/
|
||||
#define RETURN_ERROR_IF(cond, err, ...) \
|
||||
if (cond) { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
|
||||
__FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
|
||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return ERROR(err); \
|
||||
}
|
||||
|
||||
/**
|
||||
* Unconditionally return the specified error.
|
||||
*
|
||||
* In debug modes, prints additional information.
|
||||
*/
|
||||
#define RETURN_ERROR(err, ...) \
|
||||
do { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
|
||||
__FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
|
||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return ERROR(err); \
|
||||
} while(0);
|
||||
|
||||
/**
|
||||
* If the provided expression evaluates to an error code, returns that error code.
|
||||
*
|
||||
* In debug modes, prints additional information.
|
||||
*/
|
||||
#define FORWARD_IF_ERROR(err, ...) \
|
||||
do { \
|
||||
size_t const err_code = (err); \
|
||||
if (ERR_isError(err_code)) { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
|
||||
__FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
|
||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return err_code; \
|
||||
} \
|
||||
} while(0);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ERROR_H_MODULE */
|
@ -1,639 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* FSE : Finite State Entropy codec
|
||||
* Public Prototypes declaration
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef FSE_H
|
||||
#define FSE_H
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include "zstd_deps.h" /* size_t, ptrdiff_t */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE_PUBLIC_API : control library symbols visibility
|
||||
******************************************/
|
||||
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
|
||||
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
|
||||
# define FSE_PUBLIC_API __declspec(dllexport)
|
||||
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
|
||||
# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define FSE_PUBLIC_API
|
||||
#endif
|
||||
|
||||
/*------ Version ------*/
|
||||
#define FSE_VERSION_MAJOR 0
|
||||
#define FSE_VERSION_MINOR 9
|
||||
#define FSE_VERSION_RELEASE 0
|
||||
|
||||
#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
|
||||
#define FSE_QUOTE(str) #str
|
||||
#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
|
||||
#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
|
||||
|
||||
#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
|
||||
FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Tool functions
|
||||
******************************************/
|
||||
FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
|
||||
|
||||
/* Error Management */
|
||||
FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
|
||||
FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE detailed API
|
||||
******************************************/
|
||||
/*!
|
||||
FSE_compress() does the following:
|
||||
1. count symbol occurrence from source[] into table count[] (see hist.h)
|
||||
2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
3. save normalized counters to memory buffer using writeNCount()
|
||||
4. build encoding table 'CTable' from normalized counters
|
||||
5. encode the data stream using encoding table 'CTable'
|
||||
|
||||
FSE_decompress() does the following:
|
||||
1. read normalized counters with readNCount()
|
||||
2. build decoding table 'DTable' from normalized counters
|
||||
3. decode the data stream using decoding table 'DTable'
|
||||
|
||||
The following API allows targeting specific sub-functions for advanced tasks.
|
||||
For example, it's possible to compress several blocks using the same 'CTable',
|
||||
or to save and provide normalized distribution using external method.
|
||||
*/
|
||||
|
||||
/* *** COMPRESSION *** */
|
||||
|
||||
/*! FSE_optimalTableLog():
|
||||
dynamically downsize 'tableLog' when conditions are met.
|
||||
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
|
||||
@return : recommended tableLog (necessarily <= 'maxTableLog') */
|
||||
FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
|
||||
/*! FSE_normalizeCount():
|
||||
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
|
||||
useLowProbCount is a boolean parameter which trades off compressed size for
|
||||
faster header decoding. When it is set to 1, the compressed data will be slightly
|
||||
smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
|
||||
faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
|
||||
is a good default, since header deserialization makes a big speed difference.
|
||||
Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
|
||||
@return : tableLog,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
|
||||
const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
|
||||
|
||||
/*! FSE_NCountWriteBound():
|
||||
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
|
||||
Typically useful for allocation purpose. */
|
||||
FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_writeNCount():
|
||||
Compactly save 'normalizedCounter' into 'buffer'.
|
||||
@return : size of the compressed table,
|
||||
or an errorCode, which can be tested using FSE_isError(). */
|
||||
FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
|
||||
const short* normalizedCounter,
|
||||
unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! Constructor and Destructor of FSE_CTable.
|
||||
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
|
||||
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
|
||||
|
||||
/*! FSE_buildCTable():
|
||||
Builds `ct`, which must be already allocated, using FSE_createCTable().
|
||||
@return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_compress_usingCTable():
|
||||
Compress `src` using `ct` into `dst` which must be already allocated.
|
||||
@return : size of compressed data (<= `dstCapacity`),
|
||||
or 0 if compressed data could not fit into `dst`,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
----------
|
||||
The first step is to count all symbols. FSE_count() does this job very fast.
|
||||
Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
|
||||
'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
|
||||
maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
|
||||
FSE_count() will return the number of occurrence of the most frequent symbol.
|
||||
This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
The next step is to normalize the frequencies.
|
||||
FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
|
||||
It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
|
||||
You can use 'tableLog'==0 to mean "use default tableLog value".
|
||||
If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
|
||||
which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
|
||||
|
||||
The result of FSE_normalizeCount() will be saved into a table,
|
||||
called 'normalizedCounter', which is a table of signed short.
|
||||
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
|
||||
The return value is tableLog if everything proceeded as expected.
|
||||
It is 0 if there is a single symbol within distribution.
|
||||
If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
|
||||
'buffer' must be already allocated.
|
||||
For guaranteed success, buffer size must be at least FSE_headerBound().
|
||||
The result of the function is the number of bytes written into 'buffer'.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
|
||||
|
||||
'normalizedCounter' can then be used to create the compression table 'CTable'.
|
||||
The space required by 'CTable' must be already allocated, using FSE_createCTable().
|
||||
You can then use FSE_buildCTable() to fill 'CTable'.
|
||||
If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
|
||||
Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
|
||||
The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
|
||||
If it returns '0', compressed data could not fit into 'dst'.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
*/
|
||||
|
||||
|
||||
/* *** DECOMPRESSION *** */
|
||||
|
||||
/*! FSE_readNCount():
|
||||
Read compactly saved 'normalizedCounter' from 'rBuffer'.
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
|
||||
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
|
||||
const void* rBuffer, size_t rBuffSize);
|
||||
|
||||
/*! FSE_readNCount_bmi2():
|
||||
* Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
|
||||
*/
|
||||
FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
|
||||
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
|
||||
const void* rBuffer, size_t rBuffSize, int bmi2);
|
||||
|
||||
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
----------
|
||||
(Note : these functions only decompress FSE-compressed blocks.
|
||||
If block is uncompressed, use memcpy() instead
|
||||
If block is a single repeated byte, use memset() instead )
|
||||
|
||||
The first step is to obtain the normalized frequencies of symbols.
|
||||
This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
|
||||
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
|
||||
In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
|
||||
or size the table to handle worst case situations (typically 256).
|
||||
FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
|
||||
The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
|
||||
Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError().
|
||||
|
||||
The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
|
||||
This is performed by the function FSE_buildDTable().
|
||||
The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError().
|
||||
|
||||
`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
|
||||
`cSrcSize` must be strictly correct, otherwise decompression will fail.
|
||||
FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
|
||||
*/
|
||||
|
||||
#endif /* FSE_H */
|
||||
|
||||
#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
|
||||
#define FSE_H_FSE_STATIC_LINKING_ONLY
|
||||
|
||||
/* *** Dependency *** */
|
||||
#include "bitstream.h"
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* Static allocation
|
||||
*******************************************/
|
||||
/* FSE buffer bounds */
|
||||
#define FSE_NCOUNTBOUND 512
|
||||
#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
|
||||
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
|
||||
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
|
||||
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog)))
|
||||
|
||||
/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
|
||||
#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
|
||||
#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE advanced API
|
||||
***************************************** */
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
|
||||
/**< same as FSE_optimalTableLog(), which used `minus==2` */
|
||||
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
|
||||
/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
|
||||
* See FSE_buildCTable_wksp() for breakdown of workspace usage.
|
||||
*/
|
||||
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */)
|
||||
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
|
||||
#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
|
||||
FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
|
||||
|
||||
#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
|
||||
#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
|
||||
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
|
||||
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`.
|
||||
* Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */
|
||||
|
||||
typedef enum {
|
||||
FSE_repeat_none, /**< Cannot use the previous table */
|
||||
FSE_repeat_check, /**< Can use the previous table but it must be checked */
|
||||
FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */
|
||||
} FSE_repeat;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
typedef struct {
|
||||
ptrdiff_t value;
|
||||
const void* stateTable;
|
||||
const void* symbolTT;
|
||||
unsigned stateLog;
|
||||
} FSE_CState_t;
|
||||
|
||||
static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
|
||||
|
||||
static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
|
||||
|
||||
static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
|
||||
|
||||
/**<
|
||||
These functions are inner components of FSE_compress_usingCTable().
|
||||
They allow the creation of custom streams, mixing multiple tables and bit sources.
|
||||
|
||||
A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
|
||||
So the first symbol you will encode is the last you will decode, like a LIFO stack.
|
||||
|
||||
You will need a few variables to track your CStream. They are :
|
||||
|
||||
FSE_CTable ct; // Provided by FSE_buildCTable()
|
||||
BIT_CStream_t bitStream; // bitStream tracking structure
|
||||
FSE_CState_t state; // State tracking structure (can have several)
|
||||
|
||||
|
||||
The first thing to do is to init bitStream and state.
|
||||
size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
|
||||
FSE_initCState(&state, ct);
|
||||
|
||||
Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
|
||||
You can then encode your input data, byte after byte.
|
||||
FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
|
||||
Remember decoding will be done in reverse direction.
|
||||
FSE_encodeByte(&bitStream, &state, symbol);
|
||||
|
||||
At any time, you can also add any bit sequence.
|
||||
Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
|
||||
BIT_addBits(&bitStream, bitField, nbBits);
|
||||
|
||||
The above methods don't commit data to memory, they just store it into local register, for speed.
|
||||
Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
|
||||
Writing data to memory is a manual operation, performed by the flushBits function.
|
||||
BIT_flushBits(&bitStream);
|
||||
|
||||
Your last FSE encoding operation shall be to flush your last state value(s).
|
||||
FSE_flushState(&bitStream, &state);
|
||||
|
||||
Finally, you must close the bitStream.
|
||||
The function returns the size of CStream in bytes.
|
||||
If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
|
||||
If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
|
||||
size_t size = BIT_closeCStream(&bitStream);
|
||||
*/
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
typedef struct {
|
||||
size_t state;
|
||||
const void* table; /* precise table may vary, depending on U16 */
|
||||
} FSE_DState_t;
|
||||
|
||||
|
||||
static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
|
||||
|
||||
static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
|
||||
|
||||
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
|
||||
|
||||
/**<
|
||||
Let's now decompose FSE_decompress_usingDTable() into its unitary components.
|
||||
You will decode FSE-encoded symbols from the bitStream,
|
||||
and also any other bitFields you put in, **in reverse order**.
|
||||
|
||||
You will need a few variables to track your bitStream. They are :
|
||||
|
||||
BIT_DStream_t DStream; // Stream context
|
||||
FSE_DState_t DState; // State context. Multiple ones are possible
|
||||
FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
|
||||
|
||||
The first thing to do is to init the bitStream.
|
||||
errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
|
||||
|
||||
You should then retrieve your initial state(s)
|
||||
(in reverse flushing order if you have several ones) :
|
||||
errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
|
||||
|
||||
You can then decode your data, symbol after symbol.
|
||||
For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
|
||||
Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
|
||||
unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
|
||||
|
||||
You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
|
||||
Note : maximum allowed nbBits is 25, for 32-bits compatibility
|
||||
size_t bitField = BIT_readBits(&DStream, nbBits);
|
||||
|
||||
All above operations only read from local register (which size depends on size_t).
|
||||
Refueling the register from memory is manually performed by the reload method.
|
||||
endSignal = FSE_reloadDStream(&DStream);
|
||||
|
||||
BIT_reloadDStream() result tells if there is still some more data to read from DStream.
|
||||
BIT_DStream_unfinished : there is still some data left into the DStream.
|
||||
BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
|
||||
BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
|
||||
BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
|
||||
|
||||
When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
|
||||
to properly detect the exact end of stream.
|
||||
After each decoded symbol, check if DStream is fully consumed using this simple test :
|
||||
BIT_reloadDStream(&DStream) >= BIT_DStream_completed
|
||||
|
||||
When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
|
||||
Checking if DStream has reached its end is performed by :
|
||||
BIT_endOfDStream(&DStream);
|
||||
Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
|
||||
FSE_endOfDState(&DState);
|
||||
*/
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE unsafe API
|
||||
*******************************************/
|
||||
static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
|
||||
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* Implementation of inlined functions
|
||||
*******************************************/
|
||||
typedef struct {
|
||||
int deltaFindState;
|
||||
U32 deltaNbBits;
|
||||
} FSE_symbolCompressionTransform; /* total 8 bytes */
|
||||
|
||||
MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
|
||||
{
|
||||
const void* ptr = ct;
|
||||
const U16* u16ptr = (const U16*) ptr;
|
||||
const U32 tableLog = MEM_read16(ptr);
|
||||
statePtr->value = (ptrdiff_t)1<<tableLog;
|
||||
statePtr->stateTable = u16ptr+2;
|
||||
statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
|
||||
statePtr->stateLog = tableLog;
|
||||
}
|
||||
|
||||
|
||||
/*! FSE_initCState2() :
|
||||
* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
|
||||
* uses the smallest state value possible, saving the cost of this symbol */
|
||||
MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
|
||||
{
|
||||
FSE_initCState(statePtr, ct);
|
||||
{ const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
const U16* stateTable = (const U16*)(statePtr->stateTable);
|
||||
U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
|
||||
statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
|
||||
statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
|
||||
{
|
||||
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
const U16* const stateTable = (const U16*)(statePtr->stateTable);
|
||||
U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
|
||||
BIT_addBits(bitC, statePtr->value, nbBitsOut);
|
||||
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
|
||||
{
|
||||
BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
|
||||
BIT_flushBits(bitC);
|
||||
}
|
||||
|
||||
|
||||
/* FSE_getMaxNbBits() :
|
||||
* Approximate maximum cost of a symbol, in bits.
|
||||
* Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
|
||||
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
|
||||
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
|
||||
MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
|
||||
{
|
||||
const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
|
||||
return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
|
||||
}
|
||||
|
||||
/* FSE_bitCost() :
|
||||
* Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
|
||||
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
|
||||
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
|
||||
MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
|
||||
{
|
||||
const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
|
||||
U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
|
||||
U32 const threshold = (minNbBits+1) << 16;
|
||||
assert(tableLog < 16);
|
||||
assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */
|
||||
{ U32 const tableSize = 1 << tableLog;
|
||||
U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
|
||||
U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */
|
||||
U32 const bitMultiplier = 1 << accuracyLog;
|
||||
assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
|
||||
assert(normalizedDeltaFromThreshold <= bitMultiplier);
|
||||
return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ====== Decompression ====== */
|
||||
|
||||
typedef struct {
|
||||
U16 tableLog;
|
||||
U16 fastMode;
|
||||
} FSE_DTableHeader; /* sizeof U32 */
|
||||
|
||||
typedef struct
|
||||
{
|
||||
unsigned short newState;
|
||||
unsigned char symbol;
|
||||
unsigned char nbBits;
|
||||
} FSE_decode_t; /* size == U32 */
|
||||
|
||||
MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
|
||||
{
|
||||
const void* ptr = dt;
|
||||
const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
|
||||
DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
|
||||
BIT_reloadDStream(bitD);
|
||||
DStatePtr->table = dt + 1;
|
||||
}
|
||||
|
||||
MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
return DInfo.symbol;
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
size_t const lowBits = BIT_readBits(bitD, nbBits);
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
}
|
||||
|
||||
MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
BYTE const symbol = DInfo.symbol;
|
||||
size_t const lowBits = BIT_readBits(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
/*! FSE_decodeSymbolFast() :
|
||||
unsafe, only works if no symbol has a probability > 50% */
|
||||
MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
BYTE const symbol = DInfo.symbol;
|
||||
size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
|
||||
{
|
||||
return DStatePtr->state == 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/* **************************************************************
|
||||
* Tuning parameters
|
||||
****************************************************************/
|
||||
/*!MEMORY_USAGE :
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
||||
* Increasing memory usage improves compression ratio
|
||||
* Reduced memory usage can improve speed, due to cache effect
|
||||
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
|
||||
#ifndef FSE_MAX_MEMORY_USAGE
|
||||
# define FSE_MAX_MEMORY_USAGE 14
|
||||
#endif
|
||||
#ifndef FSE_DEFAULT_MEMORY_USAGE
|
||||
# define FSE_DEFAULT_MEMORY_USAGE 13
|
||||
#endif
|
||||
#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
|
||||
# error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
|
||||
#endif
|
||||
|
||||
/*!FSE_MAX_SYMBOL_VALUE :
|
||||
* Maximum symbol value authorized.
|
||||
* Required for proper stack allocation */
|
||||
#ifndef FSE_MAX_SYMBOL_VALUE
|
||||
# define FSE_MAX_SYMBOL_VALUE 255
|
||||
#endif
|
||||
|
||||
/* **************************************************************
|
||||
* template functions type & suffix
|
||||
****************************************************************/
|
||||
#define FSE_FUNCTION_TYPE BYTE
|
||||
#define FSE_FUNCTION_EXTENSION
|
||||
#define FSE_DECODE_TYPE FSE_decode_t
|
||||
|
||||
|
||||
#endif /* !FSE_COMMONDEFS_ONLY */
|
||||
|
||||
|
||||
/* ***************************************************************
|
||||
* Constants
|
||||
*****************************************************************/
|
||||
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
|
||||
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
|
||||
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
|
||||
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
|
||||
#define FSE_MIN_TABLELOG 5
|
||||
|
||||
#define FSE_TABLELOG_ABSOLUTE_MAX 15
|
||||
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
|
||||
# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
|
||||
#endif
|
||||
|
||||
#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
|
||||
|
||||
|
||||
#endif /* FSE_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
@ -1,311 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* FSE : Finite State Entropy decoder
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include "debug.h" /* assert */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "error_private.h"
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#include "zstd_deps.h"
|
||||
#include "bits.h" /* ZSTD_highbit32 */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
/*
|
||||
designed to be included
|
||||
for type-specific functions (template emulation in C)
|
||||
Objective is to write these functions only once, for improved maintenance
|
||||
*/
|
||||
|
||||
/* safety checks */
|
||||
#ifndef FSE_FUNCTION_EXTENSION
|
||||
# error "FSE_FUNCTION_EXTENSION must be defined"
|
||||
#endif
|
||||
#ifndef FSE_FUNCTION_TYPE
|
||||
# error "FSE_FUNCTION_TYPE must be defined"
|
||||
#endif
|
||||
|
||||
/* Function names */
|
||||
#define FSE_CAT(X,Y) X##Y
|
||||
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
||||
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
||||
|
||||
static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
|
||||
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
|
||||
U16* symbolNext = (U16*)workSpace;
|
||||
BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
|
||||
|
||||
U32 const maxSV1 = maxSymbolValue + 1;
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
/* Sanity Checks */
|
||||
if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
|
||||
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
|
||||
/* Init, lay down lowprob symbols */
|
||||
{ FSE_DTableHeader DTableH;
|
||||
DTableH.tableLog = (U16)tableLog;
|
||||
DTableH.fastMode = 1;
|
||||
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
|
||||
U32 s;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
if (normalizedCounter[s]==-1) {
|
||||
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
|
||||
symbolNext[s] = 1;
|
||||
} else {
|
||||
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
|
||||
symbolNext[s] = normalizedCounter[s];
|
||||
} } }
|
||||
ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
|
||||
}
|
||||
|
||||
/* Spread symbols */
|
||||
if (highThreshold == tableSize - 1) {
|
||||
size_t const tableMask = tableSize-1;
|
||||
size_t const step = FSE_TABLESTEP(tableSize);
|
||||
/* First lay down the symbols in order.
|
||||
* We use a uint64_t to lay down 8 bytes at a time. This reduces branch
|
||||
* misses since small blocks generally have small table logs, so nearly
|
||||
* all symbols have counts <= 8. We ensure we have 8 bytes at the end of
|
||||
* our buffer to handle the over-write.
|
||||
*/
|
||||
{
|
||||
U64 const add = 0x0101010101010101ull;
|
||||
size_t pos = 0;
|
||||
U64 sv = 0;
|
||||
U32 s;
|
||||
for (s=0; s<maxSV1; ++s, sv += add) {
|
||||
int i;
|
||||
int const n = normalizedCounter[s];
|
||||
MEM_write64(spread + pos, sv);
|
||||
for (i = 8; i < n; i += 8) {
|
||||
MEM_write64(spread + pos + i, sv);
|
||||
}
|
||||
pos += n;
|
||||
}
|
||||
}
|
||||
/* Now we spread those positions across the table.
|
||||
* The benefit of doing it in two stages is that we avoid the
|
||||
* variable size inner loop, which caused lots of branch misses.
|
||||
* Now we can run through all the positions without any branch misses.
|
||||
* We unroll the loop twice, since that is what empirically worked best.
|
||||
*/
|
||||
{
|
||||
size_t position = 0;
|
||||
size_t s;
|
||||
size_t const unroll = 2;
|
||||
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
|
||||
for (s = 0; s < (size_t)tableSize; s += unroll) {
|
||||
size_t u;
|
||||
for (u = 0; u < unroll; ++u) {
|
||||
size_t const uPosition = (position + (u * step)) & tableMask;
|
||||
tableDecode[uPosition].symbol = spread[s + u];
|
||||
}
|
||||
position = (position + (unroll * step)) & tableMask;
|
||||
}
|
||||
assert(position == 0);
|
||||
}
|
||||
} else {
|
||||
U32 const tableMask = tableSize-1;
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 s, position = 0;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
int i;
|
||||
for (i=0; i<normalizedCounter[s]; i++) {
|
||||
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
|
||||
} }
|
||||
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
|
||||
}
|
||||
|
||||
/* Build Decoding table */
|
||||
{ U32 u;
|
||||
for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
|
||||
U32 const nextState = symbolNext[symbol]++;
|
||||
tableDecode[u].nbBits = (BYTE) (tableLog - ZSTD_highbit32(nextState) );
|
||||
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
|
||||
} }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/*-*******************************************************
|
||||
* Decompression (Byte symbols)
|
||||
*********************************************************/
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
|
||||
void* dst, size_t maxDstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const FSE_DTable* dt, const unsigned fast)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const omax = op + maxDstSize;
|
||||
BYTE* const olimit = omax-3;
|
||||
|
||||
BIT_DStream_t bitD;
|
||||
FSE_DState_t state1;
|
||||
FSE_DState_t state2;
|
||||
|
||||
/* Init */
|
||||
CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
|
||||
|
||||
FSE_initDState(&state1, &bitD, dt);
|
||||
FSE_initDState(&state2, &bitD, dt);
|
||||
|
||||
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
|
||||
|
||||
/* 4 symbols per loop */
|
||||
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
|
||||
op[0] = FSE_GETSYMBOL(&state1);
|
||||
|
||||
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
BIT_reloadDStream(&bitD);
|
||||
|
||||
op[1] = FSE_GETSYMBOL(&state2);
|
||||
|
||||
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
|
||||
|
||||
op[2] = FSE_GETSYMBOL(&state1);
|
||||
|
||||
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
BIT_reloadDStream(&bitD);
|
||||
|
||||
op[3] = FSE_GETSYMBOL(&state2);
|
||||
}
|
||||
|
||||
/* tail */
|
||||
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
|
||||
while (1) {
|
||||
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
|
||||
*op++ = FSE_GETSYMBOL(&state1);
|
||||
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
|
||||
*op++ = FSE_GETSYMBOL(&state2);
|
||||
break;
|
||||
}
|
||||
|
||||
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
|
||||
*op++ = FSE_GETSYMBOL(&state2);
|
||||
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
|
||||
*op++ = FSE_GETSYMBOL(&state1);
|
||||
break;
|
||||
} }
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
short ncount[FSE_MAX_SYMBOL_VALUE + 1];
|
||||
FSE_DTable dtable[1]; /* Dynamically sized */
|
||||
} FSE_DecompressWksp;
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
unsigned maxLog, void* workSpace, size_t wkspSize,
|
||||
int bmi2)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*)cSrc;
|
||||
const BYTE* ip = istart;
|
||||
unsigned tableLog;
|
||||
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
||||
FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
|
||||
|
||||
DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
|
||||
if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
|
||||
|
||||
/* normal FSE decoding mode */
|
||||
{
|
||||
size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
|
||||
if (FSE_isError(NCountLength)) return NCountLength;
|
||||
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
|
||||
assert(NCountLength <= cSrcSize);
|
||||
ip += NCountLength;
|
||||
cSrcSize -= NCountLength;
|
||||
}
|
||||
|
||||
if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize);
|
||||
workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
|
||||
wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
|
||||
|
||||
CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
|
||||
|
||||
{
|
||||
const void* ptr = wksp->dtable;
|
||||
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
|
||||
const U32 fastMode = DTableH->fastMode;
|
||||
|
||||
/* select fast mode (static) */
|
||||
if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
|
||||
return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
|
||||
}
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
|
||||
{
|
||||
#if DYNAMIC_BMI2
|
||||
if (bmi2) {
|
||||
return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
|
||||
}
|
||||
#endif
|
||||
(void)bmi2;
|
||||
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
#endif /* FSE_COMMONDEFS_ONLY */
|
@ -1,273 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* huff0 huffman codec,
|
||||
* part of Finite State Entropy library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef HUF_H_298734234
|
||||
#define HUF_H_298734234
|
||||
|
||||
/* *** Dependencies *** */
|
||||
#include "zstd_deps.h" /* size_t */
|
||||
#include "mem.h" /* U32 */
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
|
||||
|
||||
/* *** Tool functions *** */
|
||||
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
|
||||
size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
|
||||
|
||||
/* Error Management */
|
||||
unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
|
||||
const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
|
||||
#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
|
||||
|
||||
/* *** Constants *** */
|
||||
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
|
||||
#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
|
||||
#define HUF_SYMBOLVALUE_MAX 255
|
||||
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
|
||||
# error "HUF_TABLELOG_MAX is too large !"
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Static allocation
|
||||
******************************************/
|
||||
/* HUF buffer bounds */
|
||||
#define HUF_CTABLEBOUND 129
|
||||
#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
|
||||
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* static allocation of HUF's Compression Table */
|
||||
/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
|
||||
typedef size_t HUF_CElt; /* consider it an incomplete type */
|
||||
#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
|
||||
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
|
||||
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
|
||||
HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
|
||||
|
||||
/* static allocation of HUF's DTable */
|
||||
typedef U32 HUF_DTable;
|
||||
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
|
||||
#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
|
||||
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Advanced decompression functions
|
||||
******************************************/
|
||||
|
||||
/**
|
||||
* Huffman flags bitset.
|
||||
* For all flags, 0 is the default value.
|
||||
*/
|
||||
typedef enum {
|
||||
/**
|
||||
* If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime.
|
||||
* Otherwise: Ignored.
|
||||
*/
|
||||
HUF_flags_bmi2 = (1 << 0),
|
||||
/**
|
||||
* If set: Test possible table depths to find the one that produces the smallest header + encoded size.
|
||||
* If unset: Use heuristic to find the table depth.
|
||||
*/
|
||||
HUF_flags_optimalDepth = (1 << 1),
|
||||
/**
|
||||
* If set: If the previous table can encode the input, always reuse the previous table.
|
||||
* If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.
|
||||
*/
|
||||
HUF_flags_preferRepeat = (1 << 2),
|
||||
/**
|
||||
* If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress.
|
||||
* If unset: Always histogram the entire input.
|
||||
*/
|
||||
HUF_flags_suspectUncompressible = (1 << 3),
|
||||
/**
|
||||
* If set: Don't use assembly implementations
|
||||
* If unset: Allow using assembly implementations
|
||||
*/
|
||||
HUF_flags_disableAsm = (1 << 4),
|
||||
/**
|
||||
* If set: Don't use the fast decoding loop, always use the fallback decoding loop.
|
||||
* If unset: Use the fast decoding loop when possible.
|
||||
*/
|
||||
HUF_flags_disableFast = (1 << 5)
|
||||
} HUF_flags_e;
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* HUF detailed API
|
||||
* ****************************************/
|
||||
#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra
|
||||
|
||||
/*! HUF_compress() does the following:
|
||||
* 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
|
||||
* 2. (optional) refine tableLog using HUF_optimalTableLog()
|
||||
* 3. build Huffman table from count using HUF_buildCTable()
|
||||
* 4. save Huffman table to memory buffer using HUF_writeCTable()
|
||||
* 5. encode the data stream using HUF_compress4X_usingCTable()
|
||||
*
|
||||
* The following API allows targeting specific sub-functions for advanced tasks.
|
||||
* For example, it's possible to compress several blocks using the same 'CTable',
|
||||
* or to save and regenerate 'CTable' using external methods.
|
||||
*/
|
||||
unsigned HUF_minTableLog(unsigned symbolCardinality);
|
||||
unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue);
|
||||
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace,
|
||||
size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */
|
||||
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
|
||||
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
||||
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
||||
|
||||
typedef enum {
|
||||
HUF_repeat_none, /**< Cannot use the previous table */
|
||||
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
|
||||
HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
|
||||
} HUF_repeat;
|
||||
|
||||
/** HUF_compress4X_repeat() :
|
||||
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid.
|
||||
* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
|
||||
size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
|
||||
*/
|
||||
#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192)
|
||||
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree,
|
||||
const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
|
||||
void* workSpace, size_t wkspSize);
|
||||
|
||||
/*! HUF_readStats() :
|
||||
* Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
* `huffWeight` is destination buffer.
|
||||
* @return : size read from `src` , or an error Code .
|
||||
* Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
|
||||
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/*! HUF_readStats_wksp() :
|
||||
* Same as HUF_readStats() but takes an external workspace which must be
|
||||
* 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
|
||||
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
|
||||
*/
|
||||
#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
|
||||
#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
|
||||
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
|
||||
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workspace, size_t wkspSize,
|
||||
int flags);
|
||||
|
||||
/** HUF_readCTable() :
|
||||
* Loading a CTable saved with HUF_writeCTable() */
|
||||
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
|
||||
|
||||
/** HUF_getNbBitsFromCTable() :
|
||||
* Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
|
||||
* Note 1 : is not inlined, as HUF_CElt definition is private */
|
||||
U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
|
||||
|
||||
/*
|
||||
* HUF_decompress() does the following:
|
||||
* 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
|
||||
* 2. build Huffman table from save, using HUF_readDTableX?()
|
||||
* 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
|
||||
*/
|
||||
|
||||
/** HUF_selectDecoder() :
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-computed metrics.
|
||||
* @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
|
||||
* Assumption : 0 < dstSize <= 128 KB */
|
||||
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
|
||||
|
||||
/**
|
||||
* The minimum workspace size for the `workSpace` used in
|
||||
* HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
|
||||
*
|
||||
* The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
|
||||
* HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
|
||||
* Buffer overflow errors may potentially occur if code modifications result in
|
||||
* a required workspace size greater than that specified in the following
|
||||
* macro.
|
||||
*/
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
|
||||
|
||||
|
||||
/* ====================== */
|
||||
/* single stream variants */
|
||||
/* ====================== */
|
||||
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
|
||||
/** HUF_compress1X_repeat() :
|
||||
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid.
|
||||
* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
|
||||
size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
|
||||
|
||||
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#ifndef HUF_FORCE_DECOMPRESS_X1
|
||||
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */
|
||||
#endif
|
||||
|
||||
/* BMI2 variants.
|
||||
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
|
||||
*/
|
||||
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
|
||||
#ifndef HUF_FORCE_DECOMPRESS_X2
|
||||
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#endif
|
||||
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
|
||||
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#ifndef HUF_FORCE_DECOMPRESS_X2
|
||||
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#endif
|
||||
#ifndef HUF_FORCE_DECOMPRESS_X1
|
||||
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#endif
|
||||
|
||||
#endif /* HUF_H_298734234 */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
@ -1,435 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef MEM_H_MODULE
|
||||
#define MEM_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <stddef.h> /* size_t, ptrdiff_t */
|
||||
#include "compiler.h" /* __has_builtin */
|
||||
#include "debug.h" /* DEBUG_STATIC_ASSERT */
|
||||
#include "zstd_deps.h" /* ZSTD_memcpy */
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Compiler specifics
|
||||
******************************************/
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
# include <stdlib.h> /* _byteswap_ulong */
|
||||
# include <intrin.h> /* _byteswap_* */
|
||||
#endif
|
||||
#if defined(__GNUC__)
|
||||
# define MEM_STATIC static __inline __attribute__((unused))
|
||||
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define MEM_STATIC static inline
|
||||
#elif defined(_MSC_VER)
|
||||
# define MEM_STATIC static __inline
|
||||
#else
|
||||
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
#endif
|
||||
|
||||
/*-**************************************************************
|
||||
* Basic Types
|
||||
*****************************************************************/
|
||||
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# if defined(_AIX)
|
||||
# include <inttypes.h>
|
||||
# else
|
||||
# include <stdint.h> /* intptr_t */
|
||||
# endif
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint8_t U8;
|
||||
typedef int8_t S8;
|
||||
typedef uint16_t U16;
|
||||
typedef int16_t S16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef int64_t S64;
|
||||
#else
|
||||
# include <limits.h>
|
||||
#if CHAR_BIT != 8
|
||||
# error "this implementation requires char to be exactly 8-bit type"
|
||||
#endif
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned char U8;
|
||||
typedef signed char S8;
|
||||
#if USHRT_MAX != 65535
|
||||
# error "this implementation requires short to be exactly 16-bit type"
|
||||
#endif
|
||||
typedef unsigned short U16;
|
||||
typedef signed short S16;
|
||||
#if UINT_MAX != 4294967295
|
||||
# error "this implementation requires int to be exactly 32-bit type"
|
||||
#endif
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
/* note : there are no limits defined for long long type in C90.
|
||||
* limits exist in C99, however, in such case, <stdint.h> is preferred */
|
||||
typedef unsigned long long U64;
|
||||
typedef signed long long S64;
|
||||
#endif
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Memory I/O API
|
||||
*****************************************************************/
|
||||
/*=== Static platform detection ===*/
|
||||
MEM_STATIC unsigned MEM_32bits(void);
|
||||
MEM_STATIC unsigned MEM_64bits(void);
|
||||
MEM_STATIC unsigned MEM_isLittleEndian(void);
|
||||
|
||||
/*=== Native unaligned read/write ===*/
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr);
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr);
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr);
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr);
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value);
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value);
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value);
|
||||
|
||||
/*=== Little endian unaligned read/write ===*/
|
||||
MEM_STATIC U16 MEM_readLE16(const void* memPtr);
|
||||
MEM_STATIC U32 MEM_readLE24(const void* memPtr);
|
||||
MEM_STATIC U32 MEM_readLE32(const void* memPtr);
|
||||
MEM_STATIC U64 MEM_readLE64(const void* memPtr);
|
||||
MEM_STATIC size_t MEM_readLEST(const void* memPtr);
|
||||
|
||||
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
|
||||
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
|
||||
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
|
||||
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
|
||||
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
|
||||
|
||||
/*=== Big endian unaligned read/write ===*/
|
||||
MEM_STATIC U32 MEM_readBE32(const void* memPtr);
|
||||
MEM_STATIC U64 MEM_readBE64(const void* memPtr);
|
||||
MEM_STATIC size_t MEM_readBEST(const void* memPtr);
|
||||
|
||||
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
|
||||
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
|
||||
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
|
||||
|
||||
/*=== Byteswap ===*/
|
||||
MEM_STATIC U32 MEM_swap32(U32 in);
|
||||
MEM_STATIC U64 MEM_swap64(U64 in);
|
||||
MEM_STATIC size_t MEM_swapST(size_t in);
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Memory I/O Implementation
|
||||
*****************************************************************/
|
||||
/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory:
|
||||
* Method 0 : always use `memcpy()`. Safe and portable.
|
||||
* Method 1 : Use compiler extension to set unaligned access.
|
||||
* Method 2 : direct access. This method is portable but violate C standard.
|
||||
* It can generate buggy code on targets depending on alignment.
|
||||
* Default : method 1 if supported, else method 0
|
||||
*/
|
||||
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
||||
# ifdef __GNUC__
|
||||
# define MEM_FORCE_MEMORY_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
|
||||
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
|
||||
|
||||
MEM_STATIC unsigned MEM_isLittleEndian(void)
|
||||
{
|
||||
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
return 1;
|
||||
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
|
||||
return 0;
|
||||
#elif defined(__clang__) && __LITTLE_ENDIAN__
|
||||
return 1;
|
||||
#elif defined(__clang__) && __BIG_ENDIAN__
|
||||
return 0;
|
||||
#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86)
|
||||
return 1;
|
||||
#elif defined(__DMC__) && defined(_M_IX86)
|
||||
return 1;
|
||||
#else
|
||||
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
return one.c[0];
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
|
||||
|
||||
/* violates C standard, by lying on structure alignment.
|
||||
Only use if no other choice to achieve best performance on target platform */
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
|
||||
|
||||
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
|
||||
|
||||
typedef __attribute__((aligned(1))) U16 unalign16;
|
||||
typedef __attribute__((aligned(1))) U32 unalign32;
|
||||
typedef __attribute__((aligned(1))) U64 unalign64;
|
||||
typedef __attribute__((aligned(1))) size_t unalignArch;
|
||||
|
||||
MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; }
|
||||
MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; }
|
||||
MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; }
|
||||
MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; }
|
||||
|
||||
#else
|
||||
|
||||
/* default method, safe and standard.
|
||||
can sometimes prove slower */
|
||||
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr)
|
||||
{
|
||||
U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr)
|
||||
{
|
||||
U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr)
|
||||
{
|
||||
U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr)
|
||||
{
|
||||
size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
|
||||
{
|
||||
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
|
||||
{
|
||||
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
||||
{
|
||||
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
||||
|
||||
MEM_STATIC U32 MEM_swap32_fallback(U32 in)
|
||||
{
|
||||
return ((in << 24) & 0xff000000 ) |
|
||||
((in << 8) & 0x00ff0000 ) |
|
||||
((in >> 8) & 0x0000ff00 ) |
|
||||
((in >> 24) & 0x000000ff );
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_swap32(U32 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_ulong(in);
|
||||
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
||||
|| (defined(__clang__) && __has_builtin(__builtin_bswap32))
|
||||
return __builtin_bswap32(in);
|
||||
#else
|
||||
return MEM_swap32_fallback(in);
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_swap64_fallback(U64 in)
|
||||
{
|
||||
return ((in << 56) & 0xff00000000000000ULL) |
|
||||
((in << 40) & 0x00ff000000000000ULL) |
|
||||
((in << 24) & 0x0000ff0000000000ULL) |
|
||||
((in << 8) & 0x000000ff00000000ULL) |
|
||||
((in >> 8) & 0x00000000ff000000ULL) |
|
||||
((in >> 24) & 0x0000000000ff0000ULL) |
|
||||
((in >> 40) & 0x000000000000ff00ULL) |
|
||||
((in >> 56) & 0x00000000000000ffULL);
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_swap64(U64 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_uint64(in);
|
||||
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
||||
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
|
||||
return __builtin_bswap64(in);
|
||||
#else
|
||||
return MEM_swap64_fallback(in);
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_swapST(size_t in)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_swap32((U32)in);
|
||||
else
|
||||
return (size_t)MEM_swap64((U64)in);
|
||||
}
|
||||
|
||||
/*=== Little endian r/w ===*/
|
||||
|
||||
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read16(memPtr);
|
||||
else {
|
||||
const BYTE* p = (const BYTE*)memPtr;
|
||||
return (U16)(p[0] + (p[1]<<8));
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
|
||||
{
|
||||
if (MEM_isLittleEndian()) {
|
||||
MEM_write16(memPtr, val);
|
||||
} else {
|
||||
BYTE* p = (BYTE*)memPtr;
|
||||
p[0] = (BYTE)val;
|
||||
p[1] = (BYTE)(val>>8);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
|
||||
{
|
||||
return (U32)MEM_readLE16(memPtr) + ((U32)(((const BYTE*)memPtr)[2]) << 16);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
|
||||
{
|
||||
MEM_writeLE16(memPtr, (U16)val);
|
||||
((BYTE*)memPtr)[2] = (BYTE)(val>>16);
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read32(memPtr);
|
||||
else
|
||||
return MEM_swap32(MEM_read32(memPtr));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write32(memPtr, val32);
|
||||
else
|
||||
MEM_write32(memPtr, MEM_swap32(val32));
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read64(memPtr);
|
||||
else
|
||||
return MEM_swap64(MEM_read64(memPtr));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write64(memPtr, val64);
|
||||
else
|
||||
MEM_write64(memPtr, MEM_swap64(val64));
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_readLE32(memPtr);
|
||||
else
|
||||
return (size_t)MEM_readLE64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
MEM_writeLE32(memPtr, (U32)val);
|
||||
else
|
||||
MEM_writeLE64(memPtr, (U64)val);
|
||||
}
|
||||
|
||||
/*=== Big endian r/w ===*/
|
||||
|
||||
MEM_STATIC U32 MEM_readBE32(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_swap32(MEM_read32(memPtr));
|
||||
else
|
||||
return MEM_read32(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write32(memPtr, MEM_swap32(val32));
|
||||
else
|
||||
MEM_write32(memPtr, val32);
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_readBE64(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_swap64(MEM_read64(memPtr));
|
||||
else
|
||||
return MEM_read64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write64(memPtr, MEM_swap64(val64));
|
||||
else
|
||||
MEM_write64(memPtr, val64);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readBEST(const void* memPtr)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_readBE32(memPtr);
|
||||
else
|
||||
return (size_t)MEM_readBE64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
MEM_writeBE32(memPtr, (U32)val);
|
||||
else
|
||||
MEM_writeBE64(memPtr, (U64)val);
|
||||
}
|
||||
|
||||
/* code only tested on 32 and 64 bits systems */
|
||||
MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* MEM_H_MODULE */
|
@ -1,371 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
/* ====== Dependencies ======= */
|
||||
#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */
|
||||
#include "zstd_deps.h" /* size_t */
|
||||
#include "debug.h" /* assert */
|
||||
#include "pool.h"
|
||||
|
||||
/* ====== Compiler specifics ====== */
|
||||
#if defined(_MSC_VER)
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
|
||||
#include "threading.h" /* pthread adaptation */
|
||||
|
||||
/* A job is a function and an opaque argument */
|
||||
typedef struct POOL_job_s {
|
||||
POOL_function function;
|
||||
void *opaque;
|
||||
} POOL_job;
|
||||
|
||||
struct POOL_ctx_s {
|
||||
ZSTD_customMem customMem;
|
||||
/* Keep track of the threads */
|
||||
ZSTD_pthread_t* threads;
|
||||
size_t threadCapacity;
|
||||
size_t threadLimit;
|
||||
|
||||
/* The queue is a circular buffer */
|
||||
POOL_job *queue;
|
||||
size_t queueHead;
|
||||
size_t queueTail;
|
||||
size_t queueSize;
|
||||
|
||||
/* The number of threads working on jobs */
|
||||
size_t numThreadsBusy;
|
||||
/* Indicates if the queue is empty */
|
||||
int queueEmpty;
|
||||
|
||||
/* The mutex protects the queue */
|
||||
ZSTD_pthread_mutex_t queueMutex;
|
||||
/* Condition variable for pushers to wait on when the queue is full */
|
||||
ZSTD_pthread_cond_t queuePushCond;
|
||||
/* Condition variables for poppers to wait on when the queue is empty */
|
||||
ZSTD_pthread_cond_t queuePopCond;
|
||||
/* Indicates if the queue is shutting down */
|
||||
int shutdown;
|
||||
};
|
||||
|
||||
/* POOL_thread() :
|
||||
* Work thread for the thread pool.
|
||||
* Waits for jobs and executes them.
|
||||
* @returns : NULL on failure else non-null.
|
||||
*/
|
||||
static void* POOL_thread(void* opaque) {
|
||||
POOL_ctx* const ctx = (POOL_ctx*)opaque;
|
||||
if (!ctx) { return NULL; }
|
||||
for (;;) {
|
||||
/* Lock the mutex and wait for a non-empty queue or until shutdown */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
|
||||
while ( ctx->queueEmpty
|
||||
|| (ctx->numThreadsBusy >= ctx->threadLimit) ) {
|
||||
if (ctx->shutdown) {
|
||||
/* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
|
||||
* a few threads will be shutdown while !queueEmpty,
|
||||
* but enough threads will remain active to finish the queue */
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return opaque;
|
||||
}
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
|
||||
}
|
||||
/* Pop a job off the queue */
|
||||
{ POOL_job const job = ctx->queue[ctx->queueHead];
|
||||
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
|
||||
ctx->numThreadsBusy++;
|
||||
ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
|
||||
/* Unlock the mutex, signal a pusher, and run the job */
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
|
||||
job.function(job.opaque);
|
||||
|
||||
/* If the intended queue size was 0, signal after finishing job */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->numThreadsBusy--;
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
}
|
||||
} /* for (;;) */
|
||||
assert(0); /* Unreachable */
|
||||
}
|
||||
|
||||
/* ZSTD_createThreadPool() : public access point */
|
||||
POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
|
||||
return POOL_create (numThreads, 0);
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
||||
ZSTD_customMem customMem)
|
||||
{
|
||||
POOL_ctx* ctx;
|
||||
/* Check parameters */
|
||||
if (!numThreads) { return NULL; }
|
||||
/* Allocate the context and zero initialize */
|
||||
ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
|
||||
if (!ctx) { return NULL; }
|
||||
/* Initialize the job queue.
|
||||
* It needs one extra space since one space is wasted to differentiate
|
||||
* empty and full queues.
|
||||
*/
|
||||
ctx->queueSize = queueSize + 1;
|
||||
ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem);
|
||||
ctx->queueHead = 0;
|
||||
ctx->queueTail = 0;
|
||||
ctx->numThreadsBusy = 0;
|
||||
ctx->queueEmpty = 1;
|
||||
{
|
||||
int error = 0;
|
||||
error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
|
||||
error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
|
||||
error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
|
||||
if (error) { POOL_free(ctx); return NULL; }
|
||||
}
|
||||
ctx->shutdown = 0;
|
||||
/* Allocate space for the thread handles */
|
||||
ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
||||
ctx->threadCapacity = 0;
|
||||
ctx->customMem = customMem;
|
||||
/* Check for errors */
|
||||
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
|
||||
/* Initialize the threads */
|
||||
{ size_t i;
|
||||
for (i = 0; i < numThreads; ++i) {
|
||||
if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
|
||||
ctx->threadCapacity = i;
|
||||
POOL_free(ctx);
|
||||
return NULL;
|
||||
} }
|
||||
ctx->threadCapacity = numThreads;
|
||||
ctx->threadLimit = numThreads;
|
||||
}
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/*! POOL_join() :
|
||||
Shutdown the queue, wake any sleeping threads, and join all of the threads.
|
||||
*/
|
||||
static void POOL_join(POOL_ctx* ctx) {
|
||||
/* Shut down the queue */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->shutdown = 1;
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
/* Wake up sleeping threads */
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
|
||||
/* Join all of the threads */
|
||||
{ size_t i;
|
||||
for (i = 0; i < ctx->threadCapacity; ++i) {
|
||||
ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */
|
||||
} }
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx *ctx) {
|
||||
if (!ctx) { return; }
|
||||
POOL_join(ctx);
|
||||
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
|
||||
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
|
||||
ZSTD_customFree(ctx->queue, ctx->customMem);
|
||||
ZSTD_customFree(ctx->threads, ctx->customMem);
|
||||
ZSTD_customFree(ctx, ctx->customMem);
|
||||
}
|
||||
|
||||
/*! POOL_joinJobs() :
|
||||
* Waits for all queued jobs to finish executing.
|
||||
*/
|
||||
void POOL_joinJobs(POOL_ctx* ctx) {
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
}
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
}
|
||||
|
||||
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
|
||||
POOL_free (pool);
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(const POOL_ctx* ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
return sizeof(*ctx)
|
||||
+ ctx->queueSize * sizeof(POOL_job)
|
||||
+ ctx->threadCapacity * sizeof(ZSTD_pthread_t);
|
||||
}
|
||||
|
||||
|
||||
/* @return : 0 on success, 1 on error */
|
||||
static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
|
||||
{
|
||||
if (numThreads <= ctx->threadCapacity) {
|
||||
if (!numThreads) return 1;
|
||||
ctx->threadLimit = numThreads;
|
||||
return 0;
|
||||
}
|
||||
/* numThreads > threadCapacity */
|
||||
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
|
||||
if (!threadPool) return 1;
|
||||
/* replace existing thread pool */
|
||||
ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
|
||||
ZSTD_customFree(ctx->threads, ctx->customMem);
|
||||
ctx->threads = threadPool;
|
||||
/* Initialize additional threads */
|
||||
{ size_t threadId;
|
||||
for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
|
||||
if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
|
||||
ctx->threadCapacity = threadId;
|
||||
return 1;
|
||||
} }
|
||||
} }
|
||||
/* successfully expanded */
|
||||
ctx->threadCapacity = numThreads;
|
||||
ctx->threadLimit = numThreads;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @return : 0 on success, 1 on error */
|
||||
int POOL_resize(POOL_ctx* ctx, size_t numThreads)
|
||||
{
|
||||
int result;
|
||||
if (ctx==NULL) return 1;
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
result = POOL_resize_internal(ctx, numThreads);
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 1 if the queue is full and 0 otherwise.
|
||||
*
|
||||
* When queueSize is 1 (pool was created with an intended queueSize of 0),
|
||||
* then a queue is empty if there is a thread free _and_ no job is waiting.
|
||||
*/
|
||||
static int isQueueFull(POOL_ctx const* ctx) {
|
||||
if (ctx->queueSize > 1) {
|
||||
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
|
||||
} else {
|
||||
return (ctx->numThreadsBusy == ctx->threadLimit) ||
|
||||
!ctx->queueEmpty;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
|
||||
{
|
||||
POOL_job job;
|
||||
job.function = function;
|
||||
job.opaque = opaque;
|
||||
assert(ctx != NULL);
|
||||
if (ctx->shutdown) return;
|
||||
|
||||
ctx->queueEmpty = 0;
|
||||
ctx->queue[ctx->queueTail] = job;
|
||||
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePopCond);
|
||||
}
|
||||
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
|
||||
{
|
||||
assert(ctx != NULL);
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
/* Wait until there is space in the queue for the new job */
|
||||
while (isQueueFull(ctx) && (!ctx->shutdown)) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
}
|
||||
POOL_add_internal(ctx, function, opaque);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
}
|
||||
|
||||
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
|
||||
{
|
||||
assert(ctx != NULL);
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
if (isQueueFull(ctx)) {
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return 0;
|
||||
}
|
||||
POOL_add_internal(ctx, function, opaque);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
|
||||
/* ========================== */
|
||||
/* No multi-threading support */
|
||||
/* ========================== */
|
||||
|
||||
|
||||
/* We don't need any data, but if it is empty, malloc() might return NULL. */
|
||||
struct POOL_ctx_s {
|
||||
int dummy;
|
||||
};
|
||||
static POOL_ctx g_poolCtx;
|
||||
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
||||
}
|
||||
|
||||
POOL_ctx*
|
||||
POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
|
||||
{
|
||||
(void)numThreads;
|
||||
(void)queueSize;
|
||||
(void)customMem;
|
||||
return &g_poolCtx;
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx* ctx) {
|
||||
assert(!ctx || ctx == &g_poolCtx);
|
||||
(void)ctx;
|
||||
}
|
||||
|
||||
void POOL_joinJobs(POOL_ctx* ctx){
|
||||
assert(!ctx || ctx == &g_poolCtx);
|
||||
(void)ctx;
|
||||
}
|
||||
|
||||
int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
|
||||
(void)ctx; (void)numThreads;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
}
|
||||
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(const POOL_ctx* ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
assert(ctx == &g_poolCtx);
|
||||
return sizeof(*ctx);
|
||||
}
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
@ -1,90 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef POOL_H
|
||||
#define POOL_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
#include "zstd_deps.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
|
||||
#include "../zstd.h"
|
||||
|
||||
typedef struct POOL_ctx_s POOL_ctx;
|
||||
|
||||
/*! POOL_create() :
|
||||
* Create a thread pool with at most `numThreads` threads.
|
||||
* `numThreads` must be at least 1.
|
||||
* The maximum number of queued jobs before blocking is `queueSize`.
|
||||
* @return : POOL_ctx pointer on success, else NULL.
|
||||
*/
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
||||
ZSTD_customMem customMem);
|
||||
|
||||
/*! POOL_free() :
|
||||
* Free a thread pool returned by POOL_create().
|
||||
*/
|
||||
void POOL_free(POOL_ctx* ctx);
|
||||
|
||||
|
||||
/*! POOL_joinJobs() :
|
||||
* Waits for all queued jobs to finish executing.
|
||||
*/
|
||||
void POOL_joinJobs(POOL_ctx* ctx);
|
||||
|
||||
/*! POOL_resize() :
|
||||
* Expands or shrinks pool's number of threads.
|
||||
* This is more efficient than releasing + creating a new context,
|
||||
* since it tries to preserve and re-use existing threads.
|
||||
* `numThreads` must be at least 1.
|
||||
* @return : 0 when resize was successful,
|
||||
* !0 (typically 1) if there is an error.
|
||||
* note : only numThreads can be resized, queueSize remains unchanged.
|
||||
*/
|
||||
int POOL_resize(POOL_ctx* ctx, size_t numThreads);
|
||||
|
||||
/*! POOL_sizeof() :
|
||||
* @return threadpool memory usage
|
||||
* note : compatible with NULL (returns 0 in this case)
|
||||
*/
|
||||
size_t POOL_sizeof(const POOL_ctx* ctx);
|
||||
|
||||
/*! POOL_function :
|
||||
* The function type that can be added to a thread pool.
|
||||
*/
|
||||
typedef void (*POOL_function)(void*);
|
||||
|
||||
/*! POOL_add() :
|
||||
* Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
|
||||
* Possibly blocks until there is room in the queue.
|
||||
* Note : The function may be executed asynchronously,
|
||||
* therefore, `opaque` must live until function has been completed.
|
||||
*/
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
|
||||
|
||||
|
||||
/*! POOL_tryAdd() :
|
||||
* Add the job `function(opaque)` to thread pool _if_ a queue slot is available.
|
||||
* Returns immediately even if not (does not block).
|
||||
* @return : 1 if successful, 0 if not.
|
||||
*/
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -1,156 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_PORTABILITY_MACROS_H
|
||||
#define ZSTD_PORTABILITY_MACROS_H
|
||||
|
||||
/**
|
||||
* This header file contains macro definitions to support portability.
|
||||
* This header is shared between C and ASM code, so it MUST only
|
||||
* contain macro definitions. It MUST not contain any C code.
|
||||
*
|
||||
* This header ONLY defines macros to detect platforms/feature support.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
/* compat. with non-clang compilers */
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
|
||||
/* compat. with non-clang compilers */
|
||||
#ifndef __has_builtin
|
||||
# define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
/* compat. with non-clang compilers */
|
||||
#ifndef __has_feature
|
||||
# define __has_feature(x) 0
|
||||
#endif
|
||||
|
||||
/* detects whether we are being compiled under msan */
|
||||
#ifndef ZSTD_MEMORY_SANITIZER
|
||||
# if __has_feature(memory_sanitizer)
|
||||
# define ZSTD_MEMORY_SANITIZER 1
|
||||
# else
|
||||
# define ZSTD_MEMORY_SANITIZER 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* detects whether we are being compiled under asan */
|
||||
#ifndef ZSTD_ADDRESS_SANITIZER
|
||||
# if __has_feature(address_sanitizer)
|
||||
# define ZSTD_ADDRESS_SANITIZER 1
|
||||
# elif defined(__SANITIZE_ADDRESS__)
|
||||
# define ZSTD_ADDRESS_SANITIZER 1
|
||||
# else
|
||||
# define ZSTD_ADDRESS_SANITIZER 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* detects whether we are being compiled under dfsan */
|
||||
#ifndef ZSTD_DATAFLOW_SANITIZER
|
||||
# if __has_feature(dataflow_sanitizer)
|
||||
# define ZSTD_DATAFLOW_SANITIZER 1
|
||||
# else
|
||||
# define ZSTD_DATAFLOW_SANITIZER 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Mark the internal assembly functions as hidden */
|
||||
#ifdef __ELF__
|
||||
# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
|
||||
#else
|
||||
# define ZSTD_HIDE_ASM_FUNCTION(func)
|
||||
#endif
|
||||
|
||||
/* Enable runtime BMI2 dispatch based on the CPU.
|
||||
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
|
||||
*/
|
||||
#ifndef DYNAMIC_BMI2
|
||||
#if ((defined(__clang__) && __has_attribute(__target__)) \
|
||||
|| (defined(__GNUC__) \
|
||||
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
|
||||
&& (defined(__x86_64__) || defined(_M_X64)) \
|
||||
&& !defined(__BMI2__)
|
||||
# define DYNAMIC_BMI2 1
|
||||
#else
|
||||
# define DYNAMIC_BMI2 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Only enable assembly for GNUC compatible compilers,
|
||||
* because other platforms may not support GAS assembly syntax.
|
||||
*
|
||||
* Only enable assembly for Linux / MacOS, other platforms may
|
||||
* work, but they haven't been tested. This could likely be
|
||||
* extended to BSD systems.
|
||||
*
|
||||
* Disable assembly when MSAN is enabled, because MSAN requires
|
||||
* 100% of code to be instrumented to work.
|
||||
*/
|
||||
#if defined(__GNUC__)
|
||||
# if defined(__linux__) || defined(__linux) || defined(__APPLE__)
|
||||
# if ZSTD_MEMORY_SANITIZER
|
||||
# define ZSTD_ASM_SUPPORTED 0
|
||||
# elif ZSTD_DATAFLOW_SANITIZER
|
||||
# define ZSTD_ASM_SUPPORTED 0
|
||||
# else
|
||||
# define ZSTD_ASM_SUPPORTED 1
|
||||
# endif
|
||||
# else
|
||||
# define ZSTD_ASM_SUPPORTED 0
|
||||
# endif
|
||||
#else
|
||||
# define ZSTD_ASM_SUPPORTED 0
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Determines whether we should enable assembly for x86-64
|
||||
* with BMI2.
|
||||
*
|
||||
* Enable if all of the following conditions hold:
|
||||
* - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM
|
||||
* - Assembly is supported
|
||||
* - We are compiling for x86-64 and either:
|
||||
* - DYNAMIC_BMI2 is enabled
|
||||
* - BMI2 is supported at compile time
|
||||
*/
|
||||
#if !defined(ZSTD_DISABLE_ASM) && \
|
||||
ZSTD_ASM_SUPPORTED && \
|
||||
defined(__x86_64__) && \
|
||||
(DYNAMIC_BMI2 || defined(__BMI2__))
|
||||
# define ZSTD_ENABLE_ASM_X86_64_BMI2 1
|
||||
#else
|
||||
# define ZSTD_ENABLE_ASM_X86_64_BMI2 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For x86 ELF targets, add .note.gnu.property section for Intel CET in
|
||||
* assembly sources when CET is enabled.
|
||||
*
|
||||
* Additionally, any function that may be called indirectly must begin
|
||||
* with ZSTD_CET_ENDBRANCH.
|
||||
*/
|
||||
#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \
|
||||
&& defined(__has_include)
|
||||
# if __has_include(<cet.h>)
|
||||
# include <cet.h>
|
||||
# define ZSTD_CET_ENDBRANCH _CET_ENDBR
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef ZSTD_CET_ENDBRANCH
|
||||
# define ZSTD_CET_ENDBRANCH
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_PORTABILITY_MACROS_H */
|
@ -1,176 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This file will hold wrapper for systems, which do not support pthreads
|
||||
*/
|
||||
|
||||
#include "threading.h"
|
||||
|
||||
/* create fake symbol to avoid empty translation unit warning */
|
||||
int g_ZSTD_threading_useless_symbol;
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
||||
|
||||
/**
|
||||
* Windows minimalist Pthread Wrapper
|
||||
*/
|
||||
|
||||
|
||||
/* === Dependencies === */
|
||||
#include <process.h>
|
||||
#include <errno.h>
|
||||
|
||||
|
||||
/* === Implementation === */
|
||||
|
||||
typedef struct {
|
||||
void* (*start_routine)(void*);
|
||||
void* arg;
|
||||
int initialized;
|
||||
ZSTD_pthread_cond_t initialized_cond;
|
||||
ZSTD_pthread_mutex_t initialized_mutex;
|
||||
} ZSTD_thread_params_t;
|
||||
|
||||
static unsigned __stdcall worker(void *arg)
|
||||
{
|
||||
void* (*start_routine)(void*);
|
||||
void* thread_arg;
|
||||
|
||||
/* Inialized thread_arg and start_routine and signal main thread that we don't need it
|
||||
* to wait any longer.
|
||||
*/
|
||||
{
|
||||
ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg;
|
||||
thread_arg = thread_param->arg;
|
||||
start_routine = thread_param->start_routine;
|
||||
|
||||
/* Signal main thread that we are running and do not depend on its memory anymore */
|
||||
ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex);
|
||||
thread_param->initialized = 1;
|
||||
ZSTD_pthread_cond_signal(&thread_param->initialized_cond);
|
||||
ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex);
|
||||
}
|
||||
|
||||
start_routine(thread_arg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
||||
void* (*start_routine) (void*), void* arg)
|
||||
{
|
||||
ZSTD_thread_params_t thread_param;
|
||||
(void)unused;
|
||||
|
||||
thread_param.start_routine = start_routine;
|
||||
thread_param.arg = arg;
|
||||
thread_param.initialized = 0;
|
||||
*thread = NULL;
|
||||
|
||||
/* Setup thread initialization synchronization */
|
||||
if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) {
|
||||
/* Should never happen on Windows */
|
||||
return -1;
|
||||
}
|
||||
if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) {
|
||||
/* Should never happen on Windows */
|
||||
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Spawn thread */
|
||||
*thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL);
|
||||
if (!thread) {
|
||||
ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
|
||||
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
||||
return errno;
|
||||
}
|
||||
|
||||
/* Wait for thread to be initialized */
|
||||
ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex);
|
||||
while(!thread_param.initialized) {
|
||||
ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex);
|
||||
}
|
||||
ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex);
|
||||
ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
|
||||
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ZSTD_pthread_join(ZSTD_pthread_t thread)
|
||||
{
|
||||
DWORD result;
|
||||
|
||||
if (!thread) return 0;
|
||||
|
||||
result = WaitForSingleObject(thread, INFINITE);
|
||||
CloseHandle(thread);
|
||||
|
||||
switch (result) {
|
||||
case WAIT_OBJECT_0:
|
||||
return 0;
|
||||
case WAIT_ABANDONED:
|
||||
return EINVAL;
|
||||
default:
|
||||
return GetLastError();
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
|
||||
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#include "zstd_deps.h"
|
||||
|
||||
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
|
||||
{
|
||||
*mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t));
|
||||
if (!*mutex)
|
||||
return 1;
|
||||
return pthread_mutex_init(*mutex, attr);
|
||||
}
|
||||
|
||||
int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
|
||||
{
|
||||
if (!*mutex)
|
||||
return 0;
|
||||
{
|
||||
int const ret = pthread_mutex_destroy(*mutex);
|
||||
ZSTD_free(*mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
|
||||
{
|
||||
*cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t));
|
||||
if (!*cond)
|
||||
return 1;
|
||||
return pthread_cond_init(*cond, attr);
|
||||
}
|
||||
|
||||
int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
|
||||
{
|
||||
if (!*cond)
|
||||
return 0;
|
||||
{
|
||||
int const ret = pthread_cond_destroy(*cond);
|
||||
ZSTD_free(*cond);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -1,150 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef THREADING_H_938743
|
||||
#define THREADING_H_938743
|
||||
|
||||
#include "debug.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
||||
|
||||
/**
|
||||
* Windows minimalist Pthread Wrapper
|
||||
*/
|
||||
#ifdef WINVER
|
||||
# undef WINVER
|
||||
#endif
|
||||
#define WINVER 0x0600
|
||||
|
||||
#ifdef _WIN32_WINNT
|
||||
# undef _WIN32_WINNT
|
||||
#endif
|
||||
#define _WIN32_WINNT 0x0600
|
||||
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
|
||||
#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
|
||||
#include <windows.h>
|
||||
#undef ERROR
|
||||
#define ERROR(name) ZSTD_ERROR(name)
|
||||
|
||||
|
||||
/* mutex */
|
||||
#define ZSTD_pthread_mutex_t CRITICAL_SECTION
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
|
||||
|
||||
/* condition variable */
|
||||
#define ZSTD_pthread_cond_t CONDITION_VARIABLE
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
|
||||
#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
|
||||
|
||||
/* ZSTD_pthread_create() and ZSTD_pthread_join() */
|
||||
typedef HANDLE ZSTD_pthread_t;
|
||||
|
||||
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
||||
void* (*start_routine) (void*), void* arg);
|
||||
|
||||
int ZSTD_pthread_join(ZSTD_pthread_t thread);
|
||||
|
||||
/**
|
||||
* add here more wrappers as required
|
||||
*/
|
||||
|
||||
|
||||
#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
|
||||
/* === POSIX Systems === */
|
||||
# include <pthread.h>
|
||||
|
||||
#if DEBUGLEVEL < 1
|
||||
|
||||
#define ZSTD_pthread_mutex_t pthread_mutex_t
|
||||
#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
|
||||
#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a))
|
||||
|
||||
#define ZSTD_pthread_cond_t pthread_cond_t
|
||||
#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b))
|
||||
#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b))
|
||||
#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a))
|
||||
|
||||
#define ZSTD_pthread_t pthread_t
|
||||
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
|
||||
#define ZSTD_pthread_join(a) pthread_join((a),NULL)
|
||||
|
||||
#else /* DEBUGLEVEL >= 1 */
|
||||
|
||||
/* Debug implementation of threading.
|
||||
* In this implementation we use pointers for mutexes and condition variables.
|
||||
* This way, if we forget to init/destroy them the program will crash or ASAN
|
||||
* will report leaks.
|
||||
*/
|
||||
|
||||
#define ZSTD_pthread_mutex_t pthread_mutex_t*
|
||||
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr);
|
||||
int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex);
|
||||
#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a))
|
||||
|
||||
#define ZSTD_pthread_cond_t pthread_cond_t*
|
||||
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr);
|
||||
int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
|
||||
#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b))
|
||||
#define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a))
|
||||
|
||||
#define ZSTD_pthread_t pthread_t
|
||||
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
|
||||
#define ZSTD_pthread_join(a) pthread_join((a),NULL)
|
||||
|
||||
#endif
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
/* No multithreading support */
|
||||
|
||||
typedef int ZSTD_pthread_mutex_t;
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_mutex_lock(a) ((void)(a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) ((void)(a))
|
||||
|
||||
typedef int ZSTD_pthread_cond_t;
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b))
|
||||
#define ZSTD_pthread_cond_signal(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) ((void)(a))
|
||||
|
||||
/* do not use ZSTD_pthread_t */
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* THREADING_H_938743 */
|
@ -1,24 +0,0 @@
|
||||
/*
|
||||
* xxHash - Fast Hash algorithm
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - xxHash homepage: https://cyan4973.github.io/xxHash/
|
||||
* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* xxhash.c instantiates functions defined in xxhash.h
|
||||
*/
|
||||
|
||||
#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */
|
||||
#define XXH_IMPLEMENTATION /* access definitions */
|
||||
|
||||
#include "xxhash.h"
|
File diff suppressed because it is too large
Load Diff
@ -1,48 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#include "error_private.h"
|
||||
#include "zstd_internal.h"
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Version
|
||||
******************************************/
|
||||
unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
|
||||
|
||||
const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* ZSTD Error Management
|
||||
******************************************/
|
||||
#undef ZSTD_isError /* defined within zstd_internal.h */
|
||||
/*! ZSTD_isError() :
|
||||
* tells if a return value is an error code
|
||||
* symbol is required for external callers */
|
||||
unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
|
||||
|
||||
/*! ZSTD_getErrorName() :
|
||||
* provides error code string from function result (useful for debugging) */
|
||||
const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
/*! ZSTD_getError() :
|
||||
* convert a `size_t` function result into a proper ZSTD_errorCode enum */
|
||||
ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
|
||||
|
||||
/*! ZSTD_getErrorString() :
|
||||
* provides error code string from enum */
|
||||
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
|
@ -1,111 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* This file provides common libc dependencies that zstd requires.
|
||||
* The purpose is to allow replacing this file with a custom implementation
|
||||
* to compile zstd without libc support.
|
||||
*/
|
||||
|
||||
/* Need:
|
||||
* NULL
|
||||
* INT_MAX
|
||||
* UINT_MAX
|
||||
* ZSTD_memcpy()
|
||||
* ZSTD_memset()
|
||||
* ZSTD_memmove()
|
||||
*/
|
||||
#ifndef ZSTD_DEPS_COMMON
|
||||
#define ZSTD_DEPS_COMMON
|
||||
|
||||
#include <limits.h>
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
|
||||
#if defined(__GNUC__) && __GNUC__ >= 4
|
||||
# define ZSTD_memcpy(d,s,l) __builtin_memcpy((d),(s),(l))
|
||||
# define ZSTD_memmove(d,s,l) __builtin_memmove((d),(s),(l))
|
||||
# define ZSTD_memset(p,v,l) __builtin_memset((p),(v),(l))
|
||||
#else
|
||||
# define ZSTD_memcpy(d,s,l) memcpy((d),(s),(l))
|
||||
# define ZSTD_memmove(d,s,l) memmove((d),(s),(l))
|
||||
# define ZSTD_memset(p,v,l) memset((p),(v),(l))
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_DEPS_COMMON */
|
||||
|
||||
/* Need:
|
||||
* ZSTD_malloc()
|
||||
* ZSTD_free()
|
||||
* ZSTD_calloc()
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_MALLOC
|
||||
#ifndef ZSTD_DEPS_MALLOC
|
||||
#define ZSTD_DEPS_MALLOC
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#define ZSTD_malloc(s) malloc(s)
|
||||
#define ZSTD_calloc(n,s) calloc((n), (s))
|
||||
#define ZSTD_free(p) free((p))
|
||||
|
||||
#endif /* ZSTD_DEPS_MALLOC */
|
||||
#endif /* ZSTD_DEPS_NEED_MALLOC */
|
||||
|
||||
/*
|
||||
* Provides 64-bit math support.
|
||||
* Need:
|
||||
* U64 ZSTD_div64(U64 dividend, U32 divisor)
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_MATH64
|
||||
#ifndef ZSTD_DEPS_MATH64
|
||||
#define ZSTD_DEPS_MATH64
|
||||
|
||||
#define ZSTD_div64(dividend, divisor) ((dividend) / (divisor))
|
||||
|
||||
#endif /* ZSTD_DEPS_MATH64 */
|
||||
#endif /* ZSTD_DEPS_NEED_MATH64 */
|
||||
|
||||
/* Need:
|
||||
* assert()
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_ASSERT
|
||||
#ifndef ZSTD_DEPS_ASSERT
|
||||
#define ZSTD_DEPS_ASSERT
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#endif /* ZSTD_DEPS_ASSERT */
|
||||
#endif /* ZSTD_DEPS_NEED_ASSERT */
|
||||
|
||||
/* Need:
|
||||
* ZSTD_DEBUG_PRINT()
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_IO
|
||||
#ifndef ZSTD_DEPS_IO
|
||||
#define ZSTD_DEPS_IO
|
||||
|
||||
#include <stdio.h>
|
||||
#define ZSTD_DEBUG_PRINT(...) fprintf(stderr, __VA_ARGS__)
|
||||
|
||||
#endif /* ZSTD_DEPS_IO */
|
||||
#endif /* ZSTD_DEPS_NEED_IO */
|
||||
|
||||
/* Only requested when <stdint.h> is known to be present.
|
||||
* Need:
|
||||
* intptr_t
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_STDINT
|
||||
#ifndef ZSTD_DEPS_STDINT
|
||||
#define ZSTD_DEPS_STDINT
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#endif /* ZSTD_DEPS_STDINT */
|
||||
#endif /* ZSTD_DEPS_NEED_STDINT */
|
@ -1,392 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CCOMMON_H_MODULE
|
||||
#define ZSTD_CCOMMON_H_MODULE
|
||||
|
||||
/* this module contains definitions which must be identical
|
||||
* across compression, decompression and dictBuilder.
|
||||
* It also contains a few functions useful to at least 2 of them
|
||||
* and which benefit from being inlined */
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "compiler.h"
|
||||
#include "cpu.h"
|
||||
#include "mem.h"
|
||||
#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
|
||||
#include "error_private.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "../zstd.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "huf.h"
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#endif
|
||||
#include "xxhash.h" /* XXH_reset, update, digest */
|
||||
#ifndef ZSTD_NO_TRACE
|
||||
# include "zstd_trace.h"
|
||||
#else
|
||||
# define ZSTD_TRACE 0
|
||||
#endif
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* ---- static assert (debug) --- */
|
||||
#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
|
||||
#define ZSTD_isError ERR_isError /* for inlining */
|
||||
#define FSE_isError ERR_isError
|
||||
#define HUF_isError ERR_isError
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* shared macros
|
||||
***************************************/
|
||||
#undef MIN
|
||||
#undef MAX
|
||||
#define MIN(a,b) ((a)<(b) ? (a) : (b))
|
||||
#define MAX(a,b) ((a)>(b) ? (a) : (b))
|
||||
#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Common constants
|
||||
***************************************/
|
||||
#define ZSTD_OPT_NUM (1<<12)
|
||||
|
||||
#define ZSTD_REP_NUM 3 /* number of repcodes */
|
||||
static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
|
||||
|
||||
#define KB *(1 <<10)
|
||||
#define MB *(1 <<20)
|
||||
#define GB *(1U<<30)
|
||||
|
||||
#define BIT7 128
|
||||
#define BIT6 64
|
||||
#define BIT5 32
|
||||
#define BIT4 16
|
||||
#define BIT1 2
|
||||
#define BIT0 1
|
||||
|
||||
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
|
||||
static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
|
||||
static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
|
||||
|
||||
#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
|
||||
|
||||
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
|
||||
static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
|
||||
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
|
||||
|
||||
#define ZSTD_FRAMECHECKSUMSIZE 4
|
||||
|
||||
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
|
||||
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
|
||||
#define MIN_LITERALS_FOR_4_STREAMS 6
|
||||
|
||||
typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
|
||||
|
||||
#define LONGNBSEQ 0x7F00
|
||||
|
||||
#define MINMATCH 3
|
||||
|
||||
#define Litbits 8
|
||||
#define LitHufLog 11
|
||||
#define MaxLit ((1<<Litbits) - 1)
|
||||
#define MaxML 52
|
||||
#define MaxLL 35
|
||||
#define DefaultMaxOff 28
|
||||
#define MaxOff 31
|
||||
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
|
||||
#define MLFSELog 9
|
||||
#define LLFSELog 9
|
||||
#define OffFSELog 8
|
||||
#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
|
||||
#define MaxMLBits 16
|
||||
#define MaxLLBits 16
|
||||
|
||||
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
|
||||
/* Each table cannot take more than #symbols * FSELog bits */
|
||||
#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
|
||||
|
||||
static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 2, 2, 3, 3,
|
||||
4, 6, 7, 8, 9,10,11,12,
|
||||
13,14,15,16
|
||||
};
|
||||
static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
|
||||
4, 3, 2, 2, 2, 2, 2, 2,
|
||||
2, 2, 2, 2, 2, 1, 1, 1,
|
||||
2, 2, 2, 2, 2, 2, 2, 2,
|
||||
2, 3, 2, 1, 1, 1, 1, 1,
|
||||
-1,-1,-1,-1
|
||||
};
|
||||
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
|
||||
static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
|
||||
|
||||
static UNUSED_ATTR const U8 ML_bits[MaxML+1] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 2, 2, 3, 3,
|
||||
4, 4, 5, 7, 8, 9,10,11,
|
||||
12,13,14,15,16
|
||||
};
|
||||
static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
|
||||
1, 4, 3, 2, 2, 2, 2, 2,
|
||||
2, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1,-1,-1,
|
||||
-1,-1,-1,-1,-1
|
||||
};
|
||||
#define ML_DEFAULTNORMLOG 6 /* for static allocation */
|
||||
static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
|
||||
|
||||
static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
|
||||
1, 1, 1, 1, 1, 1, 2, 2,
|
||||
2, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
-1,-1,-1,-1,-1
|
||||
};
|
||||
#define OF_DEFAULTNORMLOG 5 /* for static allocation */
|
||||
static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
|
||||
|
||||
|
||||
/*-*******************************************
|
||||
* Shared functions to include for inlining
|
||||
*********************************************/
|
||||
static void ZSTD_copy8(void* dst, const void* src) {
|
||||
#if defined(ZSTD_ARCH_ARM_NEON)
|
||||
vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
|
||||
#else
|
||||
ZSTD_memcpy(dst, src, 8);
|
||||
#endif
|
||||
}
|
||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||
|
||||
/* Need to use memmove here since the literal buffer can now be located within
|
||||
the dst buffer. In circumstances where the op "catches up" to where the
|
||||
literal buffer is, there can be partial overlaps in this call on the final
|
||||
copy if the literal is being shifted by less than 16 bytes. */
|
||||
static void ZSTD_copy16(void* dst, const void* src) {
|
||||
#if defined(ZSTD_ARCH_ARM_NEON)
|
||||
vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
|
||||
#elif defined(ZSTD_ARCH_X86_SSE2)
|
||||
_mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
|
||||
#elif defined(__clang__)
|
||||
ZSTD_memmove(dst, src, 16);
|
||||
#else
|
||||
/* ZSTD_memmove is not inlined properly by gcc */
|
||||
BYTE copy16_buf[16];
|
||||
ZSTD_memcpy(copy16_buf, src, 16);
|
||||
ZSTD_memcpy(dst, copy16_buf, 16);
|
||||
#endif
|
||||
}
|
||||
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
|
||||
|
||||
#define WILDCOPY_OVERLENGTH 32
|
||||
#define WILDCOPY_VECLEN 16
|
||||
|
||||
typedef enum {
|
||||
ZSTD_no_overlap,
|
||||
ZSTD_overlap_src_before_dst
|
||||
/* ZSTD_overlap_dst_before_src, */
|
||||
} ZSTD_overlap_e;
|
||||
|
||||
/*! ZSTD_wildcopy() :
|
||||
* Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
|
||||
* @param ovtype controls the overlap detection
|
||||
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
|
||||
* - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
|
||||
* The src buffer must be before the dst buffer.
|
||||
*/
|
||||
MEM_STATIC FORCE_INLINE_ATTR
|
||||
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
|
||||
{
|
||||
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = op + length;
|
||||
|
||||
if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
|
||||
/* Handle short offset copies. */
|
||||
do {
|
||||
COPY8(op, ip)
|
||||
} while (op < oend);
|
||||
} else {
|
||||
assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
|
||||
/* Separate out the first COPY16() call because the copy length is
|
||||
* almost certain to be short, so the branches have different
|
||||
* probabilities. Since it is almost certain to be short, only do
|
||||
* one COPY16() in the first call. Then, do two calls per loop since
|
||||
* at that point it is more likely to have a high trip count.
|
||||
*/
|
||||
ZSTD_copy16(op, ip);
|
||||
if (16 >= length) return;
|
||||
op += 16;
|
||||
ip += 16;
|
||||
do {
|
||||
COPY16(op, ip);
|
||||
COPY16(op, ip);
|
||||
}
|
||||
while (op < oend);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
size_t const length = MIN(dstCapacity, srcSize);
|
||||
if (length > 0) {
|
||||
ZSTD_memcpy(dst, src, length);
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
/* define "workspace is too large" as this number of times larger than needed */
|
||||
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
|
||||
|
||||
/* when workspace is continuously too large
|
||||
* during at least this number of times,
|
||||
* context's memory usage is considered wasteful,
|
||||
* because it's sized to handle a worst case scenario which rarely happens.
|
||||
* In which case, resize it down to free some memory */
|
||||
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
|
||||
|
||||
/* Controls whether the input/output buffer is buffered or stable. */
|
||||
typedef enum {
|
||||
ZSTD_bm_buffered = 0, /* Buffer the input/output */
|
||||
ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
|
||||
} ZSTD_bufferMode_e;
|
||||
|
||||
|
||||
/*-*******************************************
|
||||
* Private declarations
|
||||
*********************************************/
|
||||
typedef struct seqDef_s {
|
||||
U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
|
||||
U16 litLength;
|
||||
U16 mlBase; /* mlBase == matchLength - MINMATCH */
|
||||
} seqDef;
|
||||
|
||||
/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
|
||||
typedef enum {
|
||||
ZSTD_llt_none = 0, /* no longLengthType */
|
||||
ZSTD_llt_literalLength = 1, /* represents a long literal */
|
||||
ZSTD_llt_matchLength = 2 /* represents a long match */
|
||||
} ZSTD_longLengthType_e;
|
||||
|
||||
typedef struct {
|
||||
seqDef* sequencesStart;
|
||||
seqDef* sequences; /* ptr to end of sequences */
|
||||
BYTE* litStart;
|
||||
BYTE* lit; /* ptr to end of literals */
|
||||
BYTE* llCode;
|
||||
BYTE* mlCode;
|
||||
BYTE* ofCode;
|
||||
size_t maxNbSeq;
|
||||
size_t maxNbLit;
|
||||
|
||||
/* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
|
||||
* in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
|
||||
* the existing value of the litLength or matchLength by 0x10000.
|
||||
*/
|
||||
ZSTD_longLengthType_e longLengthType;
|
||||
U32 longLengthPos; /* Index of the sequence to apply long length modification to */
|
||||
} seqStore_t;
|
||||
|
||||
typedef struct {
|
||||
U32 litLength;
|
||||
U32 matchLength;
|
||||
} ZSTD_sequenceLength;
|
||||
|
||||
/**
|
||||
* Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
|
||||
* indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
|
||||
*/
|
||||
MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
|
||||
{
|
||||
ZSTD_sequenceLength seqLen;
|
||||
seqLen.litLength = seq->litLength;
|
||||
seqLen.matchLength = seq->mlBase + MINMATCH;
|
||||
if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
|
||||
if (seqStore->longLengthType == ZSTD_llt_literalLength) {
|
||||
seqLen.litLength += 0x10000;
|
||||
}
|
||||
if (seqStore->longLengthType == ZSTD_llt_matchLength) {
|
||||
seqLen.matchLength += 0x10000;
|
||||
}
|
||||
}
|
||||
return seqLen;
|
||||
}
|
||||
|
||||
/**
|
||||
* Contains the compressed frame size and an upper-bound for the decompressed frame size.
|
||||
* Note: before using `compressedSize`, check for errors using ZSTD_isError().
|
||||
* similarly, before using `decompressedBound`, check for errors using:
|
||||
* `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
|
||||
*/
|
||||
typedef struct {
|
||||
size_t nbBlocks;
|
||||
size_t compressedSize;
|
||||
unsigned long long decompressedBound;
|
||||
} ZSTD_frameSizeInfo; /* decompress & legacy */
|
||||
|
||||
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
|
||||
int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
|
||||
|
||||
|
||||
/* ZSTD_invalidateRepCodes() :
|
||||
* ensures next compression will not use repcodes from previous block.
|
||||
* Note : only works with regular variant;
|
||||
* do not use with extDict variant ! */
|
||||
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
|
||||
|
||||
|
||||
typedef struct {
|
||||
blockType_e blockType;
|
||||
U32 lastBlock;
|
||||
U32 origSize;
|
||||
} blockProperties_t; /* declared here for decompress and fullbench */
|
||||
|
||||
/*! ZSTD_getcBlockSize() :
|
||||
* Provides the size of compressed block from block header `src` */
|
||||
/* Used by: decompress, fullbench (does not get its definition from here) */
|
||||
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
|
||||
blockProperties_t* bpPtr);
|
||||
|
||||
/*! ZSTD_decodeSeqHeaders() :
|
||||
* decode sequence header from src */
|
||||
/* Used by: decompress, fullbench (does not get its definition from here) */
|
||||
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/**
|
||||
* @returns true iff the CPU supports dynamic BMI2 dispatch.
|
||||
*/
|
||||
MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
|
||||
{
|
||||
ZSTD_cpuid_t cpuid = ZSTD_cpuid();
|
||||
return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_CCOMMON_H_MODULE */
|
@ -1,163 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_TRACE_H
|
||||
#define ZSTD_TRACE_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
/* weak symbol support
|
||||
* For now, enable conservatively:
|
||||
* - Only GNUC
|
||||
* - Only ELF
|
||||
* - Only x86-64, i386 and aarch64
|
||||
* Also, explicitly disable on platforms known not to work so they aren't
|
||||
* forgotten in the future.
|
||||
*/
|
||||
#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \
|
||||
defined(__GNUC__) && defined(__ELF__) && \
|
||||
(defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) || defined(__aarch64__)) && \
|
||||
!defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \
|
||||
!defined(__CYGWIN__) && !defined(_AIX)
|
||||
# define ZSTD_HAVE_WEAK_SYMBOLS 1
|
||||
#else
|
||||
# define ZSTD_HAVE_WEAK_SYMBOLS 0
|
||||
#endif
|
||||
#if ZSTD_HAVE_WEAK_SYMBOLS
|
||||
# define ZSTD_WEAK_ATTR __attribute__((__weak__))
|
||||
#else
|
||||
# define ZSTD_WEAK_ATTR
|
||||
#endif
|
||||
|
||||
/* Only enable tracing when weak symbols are available. */
|
||||
#ifndef ZSTD_TRACE
|
||||
# define ZSTD_TRACE ZSTD_HAVE_WEAK_SYMBOLS
|
||||
#endif
|
||||
|
||||
#if ZSTD_TRACE
|
||||
|
||||
struct ZSTD_CCtx_s;
|
||||
struct ZSTD_DCtx_s;
|
||||
struct ZSTD_CCtx_params_s;
|
||||
|
||||
typedef struct {
|
||||
/**
|
||||
* ZSTD_VERSION_NUMBER
|
||||
*
|
||||
* This is guaranteed to be the first member of ZSTD_trace.
|
||||
* Otherwise, this struct is not stable between versions. If
|
||||
* the version number does not match your expectation, you
|
||||
* should not interpret the rest of the struct.
|
||||
*/
|
||||
unsigned version;
|
||||
/**
|
||||
* Non-zero if streaming (de)compression is used.
|
||||
*/
|
||||
unsigned streaming;
|
||||
/**
|
||||
* The dictionary ID.
|
||||
*/
|
||||
unsigned dictionaryID;
|
||||
/**
|
||||
* Is the dictionary cold?
|
||||
* Only set on decompression.
|
||||
*/
|
||||
unsigned dictionaryIsCold;
|
||||
/**
|
||||
* The dictionary size or zero if no dictionary.
|
||||
*/
|
||||
size_t dictionarySize;
|
||||
/**
|
||||
* The uncompressed size of the data.
|
||||
*/
|
||||
size_t uncompressedSize;
|
||||
/**
|
||||
* The compressed size of the data.
|
||||
*/
|
||||
size_t compressedSize;
|
||||
/**
|
||||
* The fully resolved CCtx parameters (NULL on decompression).
|
||||
*/
|
||||
struct ZSTD_CCtx_params_s const* params;
|
||||
/**
|
||||
* The ZSTD_CCtx pointer (NULL on decompression).
|
||||
*/
|
||||
struct ZSTD_CCtx_s const* cctx;
|
||||
/**
|
||||
* The ZSTD_DCtx pointer (NULL on compression).
|
||||
*/
|
||||
struct ZSTD_DCtx_s const* dctx;
|
||||
} ZSTD_Trace;
|
||||
|
||||
/**
|
||||
* A tracing context. It must be 0 when tracing is disabled.
|
||||
* Otherwise, any non-zero value returned by a tracing begin()
|
||||
* function is presented to any subsequent calls to end().
|
||||
*
|
||||
* Any non-zero value is treated as tracing is enabled and not
|
||||
* interpreted by the library.
|
||||
*
|
||||
* Two possible uses are:
|
||||
* * A timestamp for when the begin() function was called.
|
||||
* * A unique key identifying the (de)compression, like the
|
||||
* address of the [dc]ctx pointer if you need to track
|
||||
* more information than just a timestamp.
|
||||
*/
|
||||
typedef unsigned long long ZSTD_TraceCtx;
|
||||
|
||||
/**
|
||||
* Trace the beginning of a compression call.
|
||||
* @param cctx The dctx pointer for the compression.
|
||||
* It can be used as a key to map begin() to end().
|
||||
* @returns Non-zero if tracing is enabled. The return value is
|
||||
* passed to ZSTD_trace_compress_end().
|
||||
*/
|
||||
ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_compress_begin(
|
||||
struct ZSTD_CCtx_s const* cctx);
|
||||
|
||||
/**
|
||||
* Trace the end of a compression call.
|
||||
* @param ctx The return value of ZSTD_trace_compress_begin().
|
||||
* @param trace The zstd tracing info.
|
||||
*/
|
||||
ZSTD_WEAK_ATTR void ZSTD_trace_compress_end(
|
||||
ZSTD_TraceCtx ctx,
|
||||
ZSTD_Trace const* trace);
|
||||
|
||||
/**
|
||||
* Trace the beginning of a decompression call.
|
||||
* @param dctx The dctx pointer for the decompression.
|
||||
* It can be used as a key to map begin() to end().
|
||||
* @returns Non-zero if tracing is enabled. The return value is
|
||||
* passed to ZSTD_trace_compress_end().
|
||||
*/
|
||||
ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_decompress_begin(
|
||||
struct ZSTD_DCtx_s const* dctx);
|
||||
|
||||
/**
|
||||
* Trace the end of a decompression call.
|
||||
* @param ctx The return value of ZSTD_trace_decompress_begin().
|
||||
* @param trace The zstd tracing info.
|
||||
*/
|
||||
ZSTD_WEAK_ATTR void ZSTD_trace_decompress_end(
|
||||
ZSTD_TraceCtx ctx,
|
||||
ZSTD_Trace const* trace);
|
||||
|
||||
#endif /* ZSTD_TRACE */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_TRACE_H */
|
@ -1,134 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CLEVELS_H
|
||||
#define ZSTD_CLEVELS_H
|
||||
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
|
||||
#include "../zstd.h"
|
||||
|
||||
/*-===== Pre-defined compression levels =====-*/
|
||||
|
||||
#define ZSTD_MAX_CLEVEL 22
|
||||
|
||||
#ifdef __GNUC__
|
||||
__attribute__((__unused__))
|
||||
#endif
|
||||
|
||||
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
|
||||
{ /* "default" - for any srcSize > 256 KB */
|
||||
/* W, C, H, S, L, TL, strat */
|
||||
{ 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
|
||||
{ 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
|
||||
{ 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
|
||||
{ 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
|
||||
{ 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
|
||||
{ 21, 18, 19, 3, 5, 2, ZSTD_greedy }, /* level 5 */
|
||||
{ 21, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6 */
|
||||
{ 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */
|
||||
{ 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 8 */
|
||||
{ 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
|
||||
{ 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 10 */
|
||||
{ 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 11 */
|
||||
{ 22, 22, 23, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */
|
||||
{ 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */
|
||||
{ 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
|
||||
{ 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
|
||||
{ 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
|
||||
{ 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
|
||||
{ 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
|
||||
{ 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
|
||||
{ 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
|
||||
{ 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
|
||||
{ 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
|
||||
},
|
||||
{ /* for srcSize <= 256 KB */
|
||||
/* W, C, H, S, L, T, strat */
|
||||
{ 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||
{ 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
|
||||
{ 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
|
||||
{ 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
|
||||
{ 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/
|
||||
{ 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/
|
||||
{ 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
|
||||
{ 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
|
||||
{ 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
|
||||
{ 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
|
||||
{ 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
|
||||
{ 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
|
||||
{ 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
|
||||
{ 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
|
||||
{ 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
|
||||
{ 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
|
||||
{ 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
|
||||
{ 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
|
||||
{ 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
|
||||
{ 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||
{ 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||
{ 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||
{ 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||
},
|
||||
{ /* for srcSize <= 128 KB */
|
||||
/* W, C, H, S, L, T, strat */
|
||||
{ 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||
{ 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
|
||||
{ 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
|
||||
{ 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
|
||||
{ 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
|
||||
{ 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
|
||||
{ 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
|
||||
{ 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
|
||||
{ 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
|
||||
{ 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
|
||||
{ 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
|
||||
{ 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
|
||||
{ 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
|
||||
{ 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
|
||||
{ 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
|
||||
{ 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
|
||||
{ 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
|
||||
{ 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
|
||||
{ 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
|
||||
{ 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||
{ 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||
{ 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||
{ 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||
},
|
||||
{ /* for srcSize <= 16 KB */
|
||||
/* W, C, H, S, L, T, strat */
|
||||
{ 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||
{ 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
|
||||
{ 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
|
||||
{ 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
|
||||
{ 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
|
||||
{ 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
|
||||
{ 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
|
||||
{ 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
|
||||
{ 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
|
||||
{ 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
|
||||
{ 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
|
||||
{ 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
|
||||
{ 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
|
||||
{ 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
|
||||
{ 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
|
||||
{ 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
|
||||
{ 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
|
||||
{ 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
|
||||
{ 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
|
||||
{ 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||
{ 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||
{ 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||
{ 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
|
||||
#endif /* ZSTD_CLEVELS_H */
|
@ -1,624 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* FSE : Finite State Entropy encoder
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include "../common/compiler.h"
|
||||
#include "../common/mem.h" /* U32, U16, etc. */
|
||||
#include "../common/debug.h" /* assert, DEBUGLOG */
|
||||
#include "hist.h" /* HIST_count_wksp */
|
||||
#include "../common/bitstream.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "../common/fse.h"
|
||||
#include "../common/error_private.h"
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#define ZSTD_DEPS_NEED_MATH64
|
||||
#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
|
||||
#include "../common/bits.h" /* ZSTD_highbit32 */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
/*
|
||||
designed to be included
|
||||
for type-specific functions (template emulation in C)
|
||||
Objective is to write these functions only once, for improved maintenance
|
||||
*/
|
||||
|
||||
/* safety checks */
|
||||
#ifndef FSE_FUNCTION_EXTENSION
|
||||
# error "FSE_FUNCTION_EXTENSION must be defined"
|
||||
#endif
|
||||
#ifndef FSE_FUNCTION_TYPE
|
||||
# error "FSE_FUNCTION_TYPE must be defined"
|
||||
#endif
|
||||
|
||||
/* Function names */
|
||||
#define FSE_CAT(X,Y) X##Y
|
||||
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
||||
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
||||
|
||||
|
||||
/* Function templates */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
|
||||
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
|
||||
*/
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 const tableMask = tableSize - 1;
|
||||
void* const ptr = ct;
|
||||
U16* const tableU16 = ( (U16*) ptr) + 2;
|
||||
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
|
||||
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 const maxSV1 = maxSymbolValue+1;
|
||||
|
||||
U16* cumul = (U16*)workSpace; /* size = maxSV1 */
|
||||
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
|
||||
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */
|
||||
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
/* CTable header */
|
||||
tableU16[-2] = (U16) tableLog;
|
||||
tableU16[-1] = (U16) maxSymbolValue;
|
||||
assert(tableLog < 16); /* required for threshold strategy to work */
|
||||
|
||||
/* For explanations on how to distribute symbol values over the table :
|
||||
* https://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
|
||||
|
||||
#ifdef __clang_analyzer__
|
||||
ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
|
||||
#endif
|
||||
|
||||
/* symbol start positions */
|
||||
{ U32 u;
|
||||
cumul[0] = 0;
|
||||
for (u=1; u <= maxSV1; u++) {
|
||||
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
|
||||
cumul[u] = cumul[u-1] + 1;
|
||||
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
|
||||
} else {
|
||||
assert(normalizedCounter[u-1] >= 0);
|
||||
cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1];
|
||||
assert(cumul[u] >= cumul[u-1]); /* no overflow */
|
||||
} }
|
||||
cumul[maxSV1] = (U16)(tableSize+1);
|
||||
}
|
||||
|
||||
/* Spread symbols */
|
||||
if (highThreshold == tableSize - 1) {
|
||||
/* Case for no low prob count symbols. Lay down 8 bytes at a time
|
||||
* to reduce branch misses since we are operating on a small block
|
||||
*/
|
||||
BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */
|
||||
{ U64 const add = 0x0101010101010101ull;
|
||||
size_t pos = 0;
|
||||
U64 sv = 0;
|
||||
U32 s;
|
||||
for (s=0; s<maxSV1; ++s, sv += add) {
|
||||
int i;
|
||||
int const n = normalizedCounter[s];
|
||||
MEM_write64(spread + pos, sv);
|
||||
for (i = 8; i < n; i += 8) {
|
||||
MEM_write64(spread + pos + i, sv);
|
||||
}
|
||||
assert(n>=0);
|
||||
pos += (size_t)n;
|
||||
}
|
||||
}
|
||||
/* Spread symbols across the table. Lack of lowprob symbols means that
|
||||
* we don't need variable sized inner loop, so we can unroll the loop and
|
||||
* reduce branch misses.
|
||||
*/
|
||||
{ size_t position = 0;
|
||||
size_t s;
|
||||
size_t const unroll = 2; /* Experimentally determined optimal unroll */
|
||||
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
|
||||
for (s = 0; s < (size_t)tableSize; s += unroll) {
|
||||
size_t u;
|
||||
for (u = 0; u < unroll; ++u) {
|
||||
size_t const uPosition = (position + (u * step)) & tableMask;
|
||||
tableSymbol[uPosition] = spread[s + u];
|
||||
}
|
||||
position = (position + (unroll * step)) & tableMask;
|
||||
}
|
||||
assert(position == 0); /* Must have initialized all positions */
|
||||
}
|
||||
} else {
|
||||
U32 position = 0;
|
||||
U32 symbol;
|
||||
for (symbol=0; symbol<maxSV1; symbol++) {
|
||||
int nbOccurrences;
|
||||
int const freq = normalizedCounter[symbol];
|
||||
for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
|
||||
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold)
|
||||
position = (position + step) & tableMask; /* Low proba area */
|
||||
} }
|
||||
assert(position==0); /* Must have initialized all positions */
|
||||
}
|
||||
|
||||
/* Build table */
|
||||
{ U32 u; for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
|
||||
tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
|
||||
} }
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
{ unsigned total = 0;
|
||||
unsigned s;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
switch (normalizedCounter[s])
|
||||
{
|
||||
case 0:
|
||||
/* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
|
||||
symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
|
||||
break;
|
||||
|
||||
case -1:
|
||||
case 1:
|
||||
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
|
||||
assert(total <= INT_MAX);
|
||||
symbolTT[s].deltaFindState = (int)(total - 1);
|
||||
total ++;
|
||||
break;
|
||||
default :
|
||||
assert(normalizedCounter[s] > 1);
|
||||
{ U32 const maxBitsOut = tableLog - ZSTD_highbit32 ((U32)normalizedCounter[s]-1);
|
||||
U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
|
||||
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
|
||||
symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
|
||||
total += (unsigned)normalizedCounter[s];
|
||||
} } } }
|
||||
|
||||
#if 0 /* debug : symbol costs */
|
||||
DEBUGLOG(5, "\n --- table statistics : ");
|
||||
{ U32 symbol;
|
||||
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
|
||||
DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
|
||||
symbol, normalizedCounter[symbol],
|
||||
FSE_getMaxNbBits(symbolTT, symbol),
|
||||
(double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
|
||||
} }
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding
|
||||
****************************************************************/
|
||||
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
|
||||
+ 4 /* bitCount initialized at 4 */
|
||||
+ 2 /* first two symbols may use one additional bit each */) / 8)
|
||||
+ 1 /* round up to whole nb bytes */
|
||||
+ 2 /* additional two bytes for bitstream flush */;
|
||||
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
|
||||
}
|
||||
|
||||
static size_t
|
||||
FSE_writeNCount_generic (void* header, size_t headerBufferSize,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
|
||||
unsigned writeIsSafe)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) header;
|
||||
BYTE* out = ostart;
|
||||
BYTE* const oend = ostart + headerBufferSize;
|
||||
int nbBits;
|
||||
const int tableSize = 1 << tableLog;
|
||||
int remaining;
|
||||
int threshold;
|
||||
U32 bitStream = 0;
|
||||
int bitCount = 0;
|
||||
unsigned symbol = 0;
|
||||
unsigned const alphabetSize = maxSymbolValue + 1;
|
||||
int previousIs0 = 0;
|
||||
|
||||
/* Table Size */
|
||||
bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
|
||||
bitCount += 4;
|
||||
|
||||
/* Init */
|
||||
remaining = tableSize+1; /* +1 for extra accuracy */
|
||||
threshold = tableSize;
|
||||
nbBits = tableLog+1;
|
||||
|
||||
while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
|
||||
if (previousIs0) {
|
||||
unsigned start = symbol;
|
||||
while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
|
||||
if (symbol == alphabetSize) break; /* incorrect distribution */
|
||||
while (symbol >= start+24) {
|
||||
start+=24;
|
||||
bitStream += 0xFFFFU << bitCount;
|
||||
if ((!writeIsSafe) && (out > oend-2))
|
||||
return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE) bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out+=2;
|
||||
bitStream>>=16;
|
||||
}
|
||||
while (symbol >= start+3) {
|
||||
start+=3;
|
||||
bitStream += 3 << bitCount;
|
||||
bitCount += 2;
|
||||
}
|
||||
bitStream += (symbol-start) << bitCount;
|
||||
bitCount += 2;
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2))
|
||||
return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
{ int count = normalizedCounter[symbol++];
|
||||
int const max = (2*threshold-1) - remaining;
|
||||
remaining -= count < 0 ? -count : count;
|
||||
count++; /* +1 for extra accuracy */
|
||||
if (count>=threshold)
|
||||
count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
|
||||
bitStream += count << bitCount;
|
||||
bitCount += nbBits;
|
||||
bitCount -= (count<max);
|
||||
previousIs0 = (count==1);
|
||||
if (remaining<1) return ERROR(GENERIC);
|
||||
while (remaining<threshold) { nbBits--; threshold>>=1; }
|
||||
}
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2))
|
||||
return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
|
||||
if (remaining != 1)
|
||||
return ERROR(GENERIC); /* incorrect normalized distribution */
|
||||
assert(symbol <= alphabetSize);
|
||||
|
||||
/* flush remaining bitStream */
|
||||
if ((!writeIsSafe) && (out > oend - 2))
|
||||
return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out+= (bitCount+7) /8;
|
||||
|
||||
return (out-ostart);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_writeNCount (void* buffer, size_t bufferSize,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
|
||||
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
|
||||
|
||||
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
|
||||
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
|
||||
}
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE Compression Code
|
||||
****************************************************************/
|
||||
|
||||
/* provides the minimum logSize to safely represent a distribution */
|
||||
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
U32 minBitsSrc = ZSTD_highbit32((U32)(srcSize)) + 1;
|
||||
U32 minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2;
|
||||
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
|
||||
assert(srcSize > 1); /* Not supported, RLE should be used instead */
|
||||
return minBits;
|
||||
}
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
|
||||
{
|
||||
U32 maxBitsSrc = ZSTD_highbit32((U32)(srcSize - 1)) - minus;
|
||||
U32 tableLog = maxTableLog;
|
||||
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
|
||||
assert(srcSize > 1); /* Not supported, RLE should be used instead */
|
||||
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
|
||||
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
|
||||
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
|
||||
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
|
||||
}
|
||||
|
||||
/* Secondary normalization method.
|
||||
To be used when primary method fails. */
|
||||
|
||||
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
|
||||
{
|
||||
short const NOT_YET_ASSIGNED = -2;
|
||||
U32 s;
|
||||
U32 distributed = 0;
|
||||
U32 ToDistribute;
|
||||
|
||||
/* Init */
|
||||
U32 const lowThreshold = (U32)(total >> tableLog);
|
||||
U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (count[s] == 0) {
|
||||
norm[s]=0;
|
||||
continue;
|
||||
}
|
||||
if (count[s] <= lowThreshold) {
|
||||
norm[s] = lowProbCount;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
if (count[s] <= lowOne) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
|
||||
norm[s]=NOT_YET_ASSIGNED;
|
||||
}
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
|
||||
if (ToDistribute == 0)
|
||||
return 0;
|
||||
|
||||
if ((total / ToDistribute) > lowOne) {
|
||||
/* risk of rounding to zero */
|
||||
lowOne = (U32)((total * 3) / (ToDistribute * 2));
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
} }
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
}
|
||||
|
||||
if (distributed == maxSymbolValue+1) {
|
||||
/* all values are pretty poor;
|
||||
probably incompressible data (should have already been detected);
|
||||
find max, then give all remaining points to max */
|
||||
U32 maxV = 0, maxC = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
if (count[s] > maxC) { maxV=s; maxC=count[s]; }
|
||||
norm[maxV] += (short)ToDistribute;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (total == 0) {
|
||||
/* all of the symbols were low enough for the lowOne or lowThreshold */
|
||||
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
|
||||
if (norm[s] > 0) { ToDistribute--; norm[s]++; }
|
||||
return 0;
|
||||
}
|
||||
|
||||
{ U64 const vStepLog = 62 - tableLog;
|
||||
U64 const mid = (1ULL << (vStepLog-1)) - 1;
|
||||
U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
|
||||
U64 tmpTotal = mid;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (norm[s]==NOT_YET_ASSIGNED) {
|
||||
U64 const end = tmpTotal + (count[s] * rStep);
|
||||
U32 const sStart = (U32)(tmpTotal >> vStepLog);
|
||||
U32 const sEnd = (U32)(end >> vStepLog);
|
||||
U32 const weight = sEnd - sStart;
|
||||
if (weight < 1)
|
||||
return ERROR(GENERIC);
|
||||
norm[s] = (short)weight;
|
||||
tmpTotal = end;
|
||||
} } }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
||||
const unsigned* count, size_t total,
|
||||
unsigned maxSymbolValue, unsigned useLowProbCount)
|
||||
{
|
||||
/* Sanity checks */
|
||||
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
|
||||
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
|
||||
|
||||
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
|
||||
short const lowProbCount = useLowProbCount ? -1 : 1;
|
||||
U64 const scale = 62 - tableLog;
|
||||
U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
|
||||
U64 const vStep = 1ULL<<(scale-20);
|
||||
int stillToDistribute = 1<<tableLog;
|
||||
unsigned s;
|
||||
unsigned largest=0;
|
||||
short largestP=0;
|
||||
U32 lowThreshold = (U32)(total >> tableLog);
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (count[s] == total) return 0; /* rle special case */
|
||||
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
|
||||
if (count[s] <= lowThreshold) {
|
||||
normalizedCounter[s] = lowProbCount;
|
||||
stillToDistribute--;
|
||||
} else {
|
||||
short proba = (short)((count[s]*step) >> scale);
|
||||
if (proba<8) {
|
||||
U64 restToBeat = vStep * rtbTable[proba];
|
||||
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
|
||||
}
|
||||
if (proba > largestP) { largestP=proba; largest=s; }
|
||||
normalizedCounter[s] = proba;
|
||||
stillToDistribute -= proba;
|
||||
} }
|
||||
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
|
||||
/* corner case, need another normalization method */
|
||||
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
}
|
||||
else normalizedCounter[largest] += (short)stillToDistribute;
|
||||
}
|
||||
|
||||
#if 0
|
||||
{ /* Print Table (debug) */
|
||||
U32 s;
|
||||
U32 nTotal = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
nTotal += abs(normalizedCounter[s]);
|
||||
if (nTotal != (1U<<tableLog))
|
||||
RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
|
||||
getchar();
|
||||
}
|
||||
#endif
|
||||
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
/* fake FSE_CTable, for rle input (always same symbol) */
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
|
||||
{
|
||||
void* ptr = ct;
|
||||
U16* tableU16 = ( (U16*) ptr) + 2;
|
||||
void* FSCTptr = (U32*)ptr + 2;
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
|
||||
|
||||
/* header */
|
||||
tableU16[-2] = (U16) 0;
|
||||
tableU16[-1] = (U16) symbolValue;
|
||||
|
||||
/* Build table */
|
||||
tableU16[0] = 0;
|
||||
tableU16[1] = 0; /* just in case */
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
symbolTT[symbolValue].deltaNbBits = 0;
|
||||
symbolTT[symbolValue].deltaFindState = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct, const unsigned fast)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) src;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* ip=iend;
|
||||
|
||||
BIT_CStream_t bitC;
|
||||
FSE_CState_t CState1, CState2;
|
||||
|
||||
/* init */
|
||||
if (srcSize <= 2) return 0;
|
||||
{ size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
|
||||
if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
|
||||
|
||||
#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
|
||||
|
||||
if (srcSize & 1) {
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
} else {
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
}
|
||||
|
||||
/* join to mod 4 */
|
||||
srcSize -= 2;
|
||||
if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
/* 2 or 4 encoding per loop */
|
||||
while ( ip>istart ) {
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
|
||||
if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
|
||||
if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
}
|
||||
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
FSE_flushCState(&bitC, &CState2);
|
||||
FSE_flushCState(&bitC, &CState1);
|
||||
return BIT_closeCStream(&bitC);
|
||||
}
|
||||
|
||||
size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct)
|
||||
{
|
||||
unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
|
||||
|
||||
if (fast)
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
|
||||
else
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
|
||||
|
||||
#endif /* FSE_COMMONDEFS_ONLY */
|
@ -1,181 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* hist : Histogram functions
|
||||
* part of Finite State Entropy project
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
/* --- dependencies --- */
|
||||
#include "../common/mem.h" /* U32, BYTE, etc. */
|
||||
#include "../common/debug.h" /* assert, DEBUGLOG */
|
||||
#include "../common/error_private.h" /* ERROR */
|
||||
#include "hist.h"
|
||||
|
||||
|
||||
/* --- Error management --- */
|
||||
unsigned HIST_isError(size_t code) { return ERR_isError(code); }
|
||||
|
||||
/*-**************************************************************
|
||||
* Histogram functions
|
||||
****************************************************************/
|
||||
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
const BYTE* const end = ip + srcSize;
|
||||
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
||||
unsigned largestCount=0;
|
||||
|
||||
ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
|
||||
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
||||
|
||||
while (ip<end) {
|
||||
assert(*ip <= maxSymbolValue);
|
||||
count[*ip++]++;
|
||||
}
|
||||
|
||||
while (!count[maxSymbolValue]) maxSymbolValue--;
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
|
||||
{ U32 s;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
if (count[s] > largestCount) largestCount = count[s];
|
||||
}
|
||||
|
||||
return largestCount;
|
||||
}
|
||||
|
||||
typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
|
||||
|
||||
/* HIST_count_parallel_wksp() :
|
||||
* store histogram into 4 intermediate tables, recombined at the end.
|
||||
* this design makes better use of OoO cpus,
|
||||
* and is noticeably faster when some values are heavily repeated.
|
||||
* But it needs some additional workspace for intermediate tables.
|
||||
* `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
|
||||
* @return : largest histogram frequency,
|
||||
* or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
|
||||
static size_t HIST_count_parallel_wksp(
|
||||
unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
HIST_checkInput_e check,
|
||||
U32* const workSpace)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)source;
|
||||
const BYTE* const iend = ip+sourceSize;
|
||||
size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
|
||||
unsigned max=0;
|
||||
U32* const Counting1 = workSpace;
|
||||
U32* const Counting2 = Counting1 + 256;
|
||||
U32* const Counting3 = Counting2 + 256;
|
||||
U32* const Counting4 = Counting3 + 256;
|
||||
|
||||
/* safety checks */
|
||||
assert(*maxSymbolValuePtr <= 255);
|
||||
if (!sourceSize) {
|
||||
ZSTD_memset(count, 0, countSize);
|
||||
*maxSymbolValuePtr = 0;
|
||||
return 0;
|
||||
}
|
||||
ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
|
||||
|
||||
/* by stripes of 16 bytes */
|
||||
{ U32 cached = MEM_read32(ip); ip += 4;
|
||||
while (ip < iend-15) {
|
||||
U32 c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
}
|
||||
ip-=4;
|
||||
}
|
||||
|
||||
/* finish last symbols */
|
||||
while (ip<iend) Counting1[*ip++]++;
|
||||
|
||||
{ U32 s;
|
||||
for (s=0; s<256; s++) {
|
||||
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (Counting1[s] > max) max = Counting1[s];
|
||||
} }
|
||||
|
||||
{ unsigned maxSymbolValue = 255;
|
||||
while (!Counting1[maxSymbolValue]) maxSymbolValue--;
|
||||
if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
|
||||
}
|
||||
return (size_t)max;
|
||||
}
|
||||
|
||||
/* HIST_countFast_wksp() :
|
||||
* Same as HIST_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
||||
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
||||
*/
|
||||
size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
void* workSpace, size_t workSpaceSize)
|
||||
{
|
||||
if (sourceSize < 1500) /* heuristic threshold */
|
||||
return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
||||
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
||||
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
||||
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
|
||||
}
|
||||
|
||||
/* HIST_count_wksp() :
|
||||
* Same as HIST_count(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
|
||||
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
void* workSpace, size_t workSpaceSize)
|
||||
{
|
||||
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
||||
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
||||
if (*maxSymbolValuePtr < 255)
|
||||
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
|
||||
*maxSymbolValuePtr = 255;
|
||||
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
|
||||
}
|
||||
|
||||
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
|
||||
/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
|
||||
size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize)
|
||||
{
|
||||
unsigned tmpCounters[HIST_WKSP_SIZE_U32];
|
||||
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
|
||||
}
|
||||
|
||||
size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
unsigned tmpCounters[HIST_WKSP_SIZE_U32];
|
||||
return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
|
||||
}
|
||||
#endif
|
@ -1,75 +0,0 @@
|
||||
/* ******************************************************************
|
||||
* hist : Histogram functions
|
||||
* part of Finite State Entropy project
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
/* --- dependencies --- */
|
||||
#include "../common/zstd_deps.h" /* size_t */
|
||||
|
||||
|
||||
/* --- simple histogram functions --- */
|
||||
|
||||
/*! HIST_count():
|
||||
* Provides the precise count of each byte within a table 'count'.
|
||||
* 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
|
||||
* Updates *maxSymbolValuePtr with actual largest symbol value detected.
|
||||
* @return : count of the most frequent symbol (which isn't identified).
|
||||
* or an error code, which can be tested using HIST_isError().
|
||||
* note : if return == srcSize, there is only one symbol.
|
||||
*/
|
||||
size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
unsigned HIST_isError(size_t code); /**< tells if a return value is an error code */
|
||||
|
||||
|
||||
/* --- advanced histogram functions --- */
|
||||
|
||||
#define HIST_WKSP_SIZE_U32 1024
|
||||
#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
|
||||
/** HIST_count_wksp() :
|
||||
* Same as HIST_count(), but using an externally provided scratch buffer.
|
||||
* Benefit is this function will use very little stack space.
|
||||
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
||||
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
||||
*/
|
||||
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t workSpaceSize);
|
||||
|
||||
/** HIST_countFast() :
|
||||
* same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
|
||||
* This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
|
||||
*/
|
||||
size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/** HIST_countFast_wksp() :
|
||||
* Same as HIST_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
||||
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
||||
*/
|
||||
size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t workSpaceSize);
|
||||
|
||||
/*! HIST_count_simple() :
|
||||
* Same as HIST_countFast(), this function is unsafe,
|
||||
* and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
|
||||
* It is also a bit slower for large inputs.
|
||||
* However, it does not need any additional memory (not even on stack).
|
||||
* @return : count of the most frequent symbol.
|
||||
* Note this function doesn't produce any error (i.e. it must succeed).
|
||||
*/
|
||||
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize);
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,235 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_compress_literals.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Debug Traces
|
||||
****************************************************************/
|
||||
#if DEBUGLEVEL >= 2
|
||||
|
||||
static size_t showHexa(const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* const ip = (const BYTE*)src;
|
||||
size_t u;
|
||||
for (u=0; u<srcSize; u++) {
|
||||
RAWLOG(5, " %02X", ip[u]); (void)ip;
|
||||
}
|
||||
RAWLOG(5, " \n");
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Literals compression - special cases
|
||||
****************************************************************/
|
||||
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
||||
|
||||
DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity);
|
||||
|
||||
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
|
||||
|
||||
switch(flSize)
|
||||
{
|
||||
case 1: /* 2 - 1 - 5 */
|
||||
ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
|
||||
break;
|
||||
case 2: /* 2 - 2 - 12 */
|
||||
MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
|
||||
break;
|
||||
case 3: /* 2 - 2 - 20 */
|
||||
MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
|
||||
break;
|
||||
default: /* not necessary : flSize is {1,2,3} */
|
||||
assert(0);
|
||||
}
|
||||
|
||||
ZSTD_memcpy(ostart + flSize, src, srcSize);
|
||||
DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
|
||||
return srcSize + flSize;
|
||||
}
|
||||
|
||||
static int allBytesIdentical(const void* src, size_t srcSize)
|
||||
{
|
||||
assert(srcSize >= 1);
|
||||
assert(src != NULL);
|
||||
{ const BYTE b = ((const BYTE*)src)[0];
|
||||
size_t p;
|
||||
for (p=1; p<srcSize; p++) {
|
||||
if (((const BYTE*)src)[p] != b) return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
||||
|
||||
assert(dstCapacity >= 4); (void)dstCapacity;
|
||||
assert(allBytesIdentical(src, srcSize));
|
||||
|
||||
switch(flSize)
|
||||
{
|
||||
case 1: /* 2 - 1 - 5 */
|
||||
ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
|
||||
break;
|
||||
case 2: /* 2 - 2 - 12 */
|
||||
MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
|
||||
break;
|
||||
case 3: /* 2 - 2 - 20 */
|
||||
MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
|
||||
break;
|
||||
default: /* not necessary : flSize is {1,2,3} */
|
||||
assert(0);
|
||||
}
|
||||
|
||||
ostart[flSize] = *(const BYTE*)src;
|
||||
DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1);
|
||||
return flSize+1;
|
||||
}
|
||||
|
||||
/* ZSTD_minLiteralsToCompress() :
|
||||
* returns minimal amount of literals
|
||||
* for literal compression to even be attempted.
|
||||
* Minimum is made tighter as compression strategy increases.
|
||||
*/
|
||||
static size_t
|
||||
ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
|
||||
{
|
||||
assert((int)strategy >= 0);
|
||||
assert((int)strategy <= 9);
|
||||
/* btultra2 : min 8 bytes;
|
||||
* then 2x larger for each successive compression strategy
|
||||
* max threshold 64 bytes */
|
||||
{ int const shift = MIN(9-(int)strategy, 3);
|
||||
size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
|
||||
DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
|
||||
return mintc;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_compressLiterals (
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
||||
const ZSTD_hufCTables_t* prevHuf,
|
||||
ZSTD_hufCTables_t* nextHuf,
|
||||
ZSTD_strategy strategy,
|
||||
int disableLiteralCompression,
|
||||
int suspectUncompressible,
|
||||
int bmi2)
|
||||
{
|
||||
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
U32 singleStream = srcSize < 256;
|
||||
symbolEncodingType_e hType = set_compressed;
|
||||
size_t cLitSize;
|
||||
|
||||
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
|
||||
disableLiteralCompression, (U32)srcSize, dstCapacity);
|
||||
|
||||
DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
|
||||
|
||||
/* Prepare nextEntropy assuming reusing the existing table */
|
||||
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
||||
|
||||
if (disableLiteralCompression)
|
||||
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
|
||||
/* if too small, don't even attempt compression (speed opt) */
|
||||
if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
|
||||
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
|
||||
RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
|
||||
{ HUF_repeat repeat = prevHuf->repeatMode;
|
||||
int const flags = 0
|
||||
| (bmi2 ? HUF_flags_bmi2 : 0)
|
||||
| (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
|
||||
| (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
|
||||
| (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
|
||||
|
||||
typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
|
||||
huf_compress_f huf_compress;
|
||||
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
|
||||
huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
|
||||
cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
|
||||
src, srcSize,
|
||||
HUF_SYMBOLVALUE_MAX, LitHufLog,
|
||||
entropyWorkspace, entropyWorkspaceSize,
|
||||
(HUF_CElt*)nextHuf->CTable,
|
||||
&repeat, flags);
|
||||
DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
|
||||
if (repeat != HUF_repeat_none) {
|
||||
/* reused the existing table */
|
||||
DEBUGLOG(5, "reusing statistics from previous huffman block");
|
||||
hType = set_repeat;
|
||||
}
|
||||
}
|
||||
|
||||
{ size_t const minGain = ZSTD_minGain(srcSize, strategy);
|
||||
if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
|
||||
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
||||
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
} }
|
||||
if (cLitSize==1) {
|
||||
/* A return value of 1 signals that the alphabet consists of a single symbol.
|
||||
* However, in some rare circumstances, it could be the compressed size (a single byte).
|
||||
* For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
|
||||
* (it's also necessary to not generate statistics).
|
||||
* Therefore, in such a case, actively check that all bytes are identical. */
|
||||
if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
|
||||
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
||||
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
|
||||
} }
|
||||
|
||||
if (hType == set_compressed) {
|
||||
/* using a newly constructed table */
|
||||
nextHuf->repeatMode = HUF_repeat_check;
|
||||
}
|
||||
|
||||
/* Build header */
|
||||
switch(lhSize)
|
||||
{
|
||||
case 3: /* 2 - 2 - 10 - 10 */
|
||||
if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
||||
{ U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
|
||||
MEM_writeLE24(ostart, lhc);
|
||||
break;
|
||||
}
|
||||
case 4: /* 2 - 2 - 14 - 14 */
|
||||
assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
||||
{ U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
|
||||
MEM_writeLE32(ostart, lhc);
|
||||
break;
|
||||
}
|
||||
case 5: /* 2 - 2 - 18 - 18 */
|
||||
assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
||||
{ U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
|
||||
MEM_writeLE32(ostart, lhc);
|
||||
ostart[4] = (BYTE)(cLitSize >> 10);
|
||||
break;
|
||||
}
|
||||
default: /* not possible : lhSize is {3,4,5} */
|
||||
assert(0);
|
||||
}
|
||||
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
|
||||
return lhSize+cLitSize;
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPRESS_LITERALS_H
|
||||
#define ZSTD_COMPRESS_LITERALS_H
|
||||
|
||||
#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
|
||||
|
||||
|
||||
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
/* ZSTD_compressRleLiteralsBlock() :
|
||||
* Conditions :
|
||||
* - All bytes in @src are identical
|
||||
* - dstCapacity >= 4 */
|
||||
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
/* ZSTD_compressLiterals():
|
||||
* @entropyWorkspace: must be aligned on 4-bytes boundaries
|
||||
* @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE
|
||||
* @suspectUncompressible: sampling checks, to potentially skip huffman coding
|
||||
*/
|
||||
size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
||||
const ZSTD_hufCTables_t* prevHuf,
|
||||
ZSTD_hufCTables_t* nextHuf,
|
||||
ZSTD_strategy strategy, int disableLiteralCompression,
|
||||
int suspectUncompressible,
|
||||
int bmi2);
|
||||
|
||||
#endif /* ZSTD_COMPRESS_LITERALS_H */
|
@ -1,442 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_compress_sequences.h"
|
||||
|
||||
/**
|
||||
* -log2(x / 256) lookup table for x in [0, 256).
|
||||
* If x == 0: Return 0
|
||||
* Else: Return floor(-log2(x / 256) * 256)
|
||||
*/
|
||||
static unsigned const kInverseProbabilityLog256[256] = {
|
||||
0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
|
||||
1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
|
||||
874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
|
||||
724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
|
||||
618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
|
||||
535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
|
||||
468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
|
||||
411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
|
||||
362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
|
||||
318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
|
||||
279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
|
||||
244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
|
||||
212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
|
||||
182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
|
||||
155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
|
||||
130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
|
||||
106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
|
||||
83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
|
||||
62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
|
||||
42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
|
||||
23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
|
||||
5, 4, 2, 1,
|
||||
};
|
||||
|
||||
static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
|
||||
void const* ptr = ctable;
|
||||
U16 const* u16ptr = (U16 const*)ptr;
|
||||
U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
|
||||
return maxSymbolValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if we should use ncount=-1 else we should
|
||||
* use ncount=1 for low probability symbols instead.
|
||||
*/
|
||||
static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
|
||||
{
|
||||
/* Heuristic: This should cover most blocks <= 16K and
|
||||
* start to fade out after 16K to about 32K depending on
|
||||
* compressibility.
|
||||
*/
|
||||
return nbSeq >= 2048;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cost in bytes of encoding the normalized count header.
|
||||
* Returns an error if any of the helper functions return an error.
|
||||
*/
|
||||
static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
|
||||
size_t const nbSeq, unsigned const FSELog)
|
||||
{
|
||||
BYTE wksp[FSE_NCOUNTBOUND];
|
||||
S16 norm[MaxSeq + 1];
|
||||
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
|
||||
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
|
||||
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cost in bits of encoding the distribution described by count
|
||||
* using the entropy bound.
|
||||
*/
|
||||
static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
|
||||
{
|
||||
unsigned cost = 0;
|
||||
unsigned s;
|
||||
|
||||
assert(total > 0);
|
||||
for (s = 0; s <= max; ++s) {
|
||||
unsigned norm = (unsigned)((256 * count[s]) / total);
|
||||
if (count[s] != 0 && norm == 0)
|
||||
norm = 1;
|
||||
assert(count[s] < total);
|
||||
cost += count[s] * kInverseProbabilityLog256[norm];
|
||||
}
|
||||
return cost >> 8;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cost in bits of encoding the distribution in count using ctable.
|
||||
* Returns an error if ctable cannot represent all the symbols in count.
|
||||
*/
|
||||
size_t ZSTD_fseBitCost(
|
||||
FSE_CTable const* ctable,
|
||||
unsigned const* count,
|
||||
unsigned const max)
|
||||
{
|
||||
unsigned const kAccuracyLog = 8;
|
||||
size_t cost = 0;
|
||||
unsigned s;
|
||||
FSE_CState_t cstate;
|
||||
FSE_initCState(&cstate, ctable);
|
||||
if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
|
||||
DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
|
||||
ZSTD_getFSEMaxSymbolValue(ctable), max);
|
||||
return ERROR(GENERIC);
|
||||
}
|
||||
for (s = 0; s <= max; ++s) {
|
||||
unsigned const tableLog = cstate.stateLog;
|
||||
unsigned const badCost = (tableLog + 1) << kAccuracyLog;
|
||||
unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
|
||||
if (count[s] == 0)
|
||||
continue;
|
||||
if (bitCost >= badCost) {
|
||||
DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
|
||||
return ERROR(GENERIC);
|
||||
}
|
||||
cost += (size_t)count[s] * bitCost;
|
||||
}
|
||||
return cost >> kAccuracyLog;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cost in bits of encoding the distribution in count using the
|
||||
* table described by norm. The max symbol support by norm is assumed >= max.
|
||||
* norm must be valid for every symbol with non-zero probability in count.
|
||||
*/
|
||||
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
|
||||
unsigned const* count, unsigned const max)
|
||||
{
|
||||
unsigned const shift = 8 - accuracyLog;
|
||||
size_t cost = 0;
|
||||
unsigned s;
|
||||
assert(accuracyLog <= 8);
|
||||
for (s = 0; s <= max; ++s) {
|
||||
unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
|
||||
unsigned const norm256 = normAcc << shift;
|
||||
assert(norm256 > 0);
|
||||
assert(norm256 < 256);
|
||||
cost += count[s] * kInverseProbabilityLog256[norm256];
|
||||
}
|
||||
return cost >> 8;
|
||||
}
|
||||
|
||||
symbolEncodingType_e
|
||||
ZSTD_selectEncodingType(
|
||||
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
|
||||
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
|
||||
FSE_CTable const* prevCTable,
|
||||
short const* defaultNorm, U32 defaultNormLog,
|
||||
ZSTD_defaultPolicy_e const isDefaultAllowed,
|
||||
ZSTD_strategy const strategy)
|
||||
{
|
||||
ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
|
||||
if (mostFrequent == nbSeq) {
|
||||
*repeatMode = FSE_repeat_none;
|
||||
if (isDefaultAllowed && nbSeq <= 2) {
|
||||
/* Prefer set_basic over set_rle when there are 2 or fewer symbols,
|
||||
* since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
|
||||
* If basic encoding isn't possible, always choose RLE.
|
||||
*/
|
||||
DEBUGLOG(5, "Selected set_basic");
|
||||
return set_basic;
|
||||
}
|
||||
DEBUGLOG(5, "Selected set_rle");
|
||||
return set_rle;
|
||||
}
|
||||
if (strategy < ZSTD_lazy) {
|
||||
if (isDefaultAllowed) {
|
||||
size_t const staticFse_nbSeq_max = 1000;
|
||||
size_t const mult = 10 - strategy;
|
||||
size_t const baseLog = 3;
|
||||
size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
|
||||
assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
|
||||
assert(mult <= 9 && mult >= 7);
|
||||
if ( (*repeatMode == FSE_repeat_valid)
|
||||
&& (nbSeq < staticFse_nbSeq_max) ) {
|
||||
DEBUGLOG(5, "Selected set_repeat");
|
||||
return set_repeat;
|
||||
}
|
||||
if ( (nbSeq < dynamicFse_nbSeq_min)
|
||||
|| (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
|
||||
DEBUGLOG(5, "Selected set_basic");
|
||||
/* The format allows default tables to be repeated, but it isn't useful.
|
||||
* When using simple heuristics to select encoding type, we don't want
|
||||
* to confuse these tables with dictionaries. When running more careful
|
||||
* analysis, we don't need to waste time checking both repeating tables
|
||||
* and default tables.
|
||||
*/
|
||||
*repeatMode = FSE_repeat_none;
|
||||
return set_basic;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
|
||||
size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
|
||||
size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
|
||||
size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
|
||||
|
||||
if (isDefaultAllowed) {
|
||||
assert(!ZSTD_isError(basicCost));
|
||||
assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
|
||||
}
|
||||
assert(!ZSTD_isError(NCountCost));
|
||||
assert(compressedCost < ERROR(maxCode));
|
||||
DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
|
||||
(unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
|
||||
if (basicCost <= repeatCost && basicCost <= compressedCost) {
|
||||
DEBUGLOG(5, "Selected set_basic");
|
||||
assert(isDefaultAllowed);
|
||||
*repeatMode = FSE_repeat_none;
|
||||
return set_basic;
|
||||
}
|
||||
if (repeatCost <= compressedCost) {
|
||||
DEBUGLOG(5, "Selected set_repeat");
|
||||
assert(!ZSTD_isError(repeatCost));
|
||||
return set_repeat;
|
||||
}
|
||||
assert(compressedCost < basicCost && compressedCost < repeatCost);
|
||||
}
|
||||
DEBUGLOG(5, "Selected set_compressed");
|
||||
*repeatMode = FSE_repeat_check;
|
||||
return set_compressed;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
S16 norm[MaxSeq + 1];
|
||||
U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
|
||||
} ZSTD_BuildCTableWksp;
|
||||
|
||||
size_t
|
||||
ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
||||
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
|
||||
unsigned* count, U32 max,
|
||||
const BYTE* codeTable, size_t nbSeq,
|
||||
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||
const FSE_CTable* prevCTable, size_t prevCTableSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize)
|
||||
{
|
||||
BYTE* op = (BYTE*)dst;
|
||||
const BYTE* const oend = op + dstCapacity;
|
||||
DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
|
||||
|
||||
switch (type) {
|
||||
case set_rle:
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
|
||||
RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
|
||||
*op = codeTable[0];
|
||||
return 1;
|
||||
case set_repeat:
|
||||
ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
|
||||
return 0;
|
||||
case set_basic:
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
|
||||
return 0;
|
||||
case set_compressed: {
|
||||
ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
|
||||
size_t nbSeq_1 = nbSeq;
|
||||
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
|
||||
if (count[codeTable[nbSeq-1]] > 1) {
|
||||
count[codeTable[nbSeq-1]]--;
|
||||
nbSeq_1--;
|
||||
}
|
||||
assert(nbSeq_1 > 1);
|
||||
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
|
||||
(void)entropyWorkspaceSize;
|
||||
FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
|
||||
assert(oend >= op);
|
||||
{ size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
|
||||
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
|
||||
return NCountSize;
|
||||
}
|
||||
}
|
||||
default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
|
||||
}
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
ZSTD_encodeSequences_body(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets)
|
||||
{
|
||||
BIT_CStream_t blockStream;
|
||||
FSE_CState_t stateMatchLength;
|
||||
FSE_CState_t stateOffsetBits;
|
||||
FSE_CState_t stateLitLength;
|
||||
|
||||
RETURN_ERROR_IF(
|
||||
ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
|
||||
dstSize_tooSmall, "not enough space remaining");
|
||||
DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
|
||||
(int)(blockStream.endPtr - blockStream.startPtr),
|
||||
(unsigned)dstCapacity);
|
||||
|
||||
/* first symbols */
|
||||
FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
|
||||
FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
|
||||
FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
||||
if (longOffsets) {
|
||||
U32 const ofBits = ofCodeTable[nbSeq-1];
|
||||
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
||||
if (extraBits) {
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
|
||||
BIT_flushBits(&blockStream);
|
||||
}
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
|
||||
ofBits - extraBits);
|
||||
} else {
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
|
||||
}
|
||||
BIT_flushBits(&blockStream);
|
||||
|
||||
{ size_t n;
|
||||
for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
|
||||
BYTE const llCode = llCodeTable[n];
|
||||
BYTE const ofCode = ofCodeTable[n];
|
||||
BYTE const mlCode = mlCodeTable[n];
|
||||
U32 const llBits = LL_bits[llCode];
|
||||
U32 const ofBits = ofCode;
|
||||
U32 const mlBits = ML_bits[mlCode];
|
||||
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
|
||||
(unsigned)sequences[n].litLength,
|
||||
(unsigned)sequences[n].mlBase + MINMATCH,
|
||||
(unsigned)sequences[n].offBase);
|
||||
/* 32b*/ /* 64b*/
|
||||
/* (7)*/ /* (7)*/
|
||||
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
|
||||
FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
|
||||
FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
|
||||
if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
|
||||
BIT_flushBits(&blockStream); /* (7)*/
|
||||
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
|
||||
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
|
||||
BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
|
||||
if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
|
||||
if (longOffsets) {
|
||||
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
||||
if (extraBits) {
|
||||
BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
|
||||
BIT_flushBits(&blockStream); /* (7)*/
|
||||
}
|
||||
BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
|
||||
ofBits - extraBits); /* 31 */
|
||||
} else {
|
||||
BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
|
||||
}
|
||||
BIT_flushBits(&blockStream); /* (7)*/
|
||||
DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
|
||||
} }
|
||||
|
||||
DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
|
||||
FSE_flushCState(&blockStream, &stateMatchLength);
|
||||
DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
|
||||
FSE_flushCState(&blockStream, &stateOffsetBits);
|
||||
DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
|
||||
FSE_flushCState(&blockStream, &stateLitLength);
|
||||
|
||||
{ size_t const streamSize = BIT_closeCStream(&blockStream);
|
||||
RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
|
||||
return streamSize;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t
|
||||
ZSTD_encodeSequences_default(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets)
|
||||
{
|
||||
return ZSTD_encodeSequences_body(dst, dstCapacity,
|
||||
CTable_MatchLength, mlCodeTable,
|
||||
CTable_OffsetBits, ofCodeTable,
|
||||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq, longOffsets);
|
||||
}
|
||||
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
|
||||
static BMI2_TARGET_ATTRIBUTE size_t
|
||||
ZSTD_encodeSequences_bmi2(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets)
|
||||
{
|
||||
return ZSTD_encodeSequences_body(dst, dstCapacity,
|
||||
CTable_MatchLength, mlCodeTable,
|
||||
CTable_OffsetBits, ofCodeTable,
|
||||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq, longOffsets);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
size_t ZSTD_encodeSequences(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
|
||||
{
|
||||
DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
|
||||
#if DYNAMIC_BMI2
|
||||
if (bmi2) {
|
||||
return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
|
||||
CTable_MatchLength, mlCodeTable,
|
||||
CTable_OffsetBits, ofCodeTable,
|
||||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq, longOffsets);
|
||||
}
|
||||
#endif
|
||||
(void)bmi2;
|
||||
return ZSTD_encodeSequences_default(dst, dstCapacity,
|
||||
CTable_MatchLength, mlCodeTable,
|
||||
CTable_OffsetBits, ofCodeTable,
|
||||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq, longOffsets);
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPRESS_SEQUENCES_H
|
||||
#define ZSTD_COMPRESS_SEQUENCES_H
|
||||
|
||||
#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
|
||||
#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
|
||||
|
||||
typedef enum {
|
||||
ZSTD_defaultDisallowed = 0,
|
||||
ZSTD_defaultAllowed = 1
|
||||
} ZSTD_defaultPolicy_e;
|
||||
|
||||
symbolEncodingType_e
|
||||
ZSTD_selectEncodingType(
|
||||
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
|
||||
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
|
||||
FSE_CTable const* prevCTable,
|
||||
short const* defaultNorm, U32 defaultNormLog,
|
||||
ZSTD_defaultPolicy_e const isDefaultAllowed,
|
||||
ZSTD_strategy const strategy);
|
||||
|
||||
size_t
|
||||
ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
||||
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
|
||||
unsigned* count, U32 max,
|
||||
const BYTE* codeTable, size_t nbSeq,
|
||||
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||
const FSE_CTable* prevCTable, size_t prevCTableSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize);
|
||||
|
||||
size_t ZSTD_encodeSequences(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
|
||||
|
||||
size_t ZSTD_fseBitCost(
|
||||
FSE_CTable const* ctable,
|
||||
unsigned const* count,
|
||||
unsigned const max);
|
||||
|
||||
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
|
||||
unsigned const* count, unsigned const max);
|
||||
#endif /* ZSTD_COMPRESS_SEQUENCES_H */
|
@ -1,577 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_compress_superblock.h"
|
||||
|
||||
#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
|
||||
#include "hist.h" /* HIST_countFast_wksp */
|
||||
#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
|
||||
#include "zstd_compress_sequences.h"
|
||||
#include "zstd_compress_literals.h"
|
||||
|
||||
/** ZSTD_compressSubBlock_literal() :
|
||||
* Compresses literals section for a sub-block.
|
||||
* When we have to write the Huffman table we will sometimes choose a header
|
||||
* size larger than necessary. This is because we have to pick the header size
|
||||
* before we know the table size + compressed size, so we have a bound on the
|
||||
* table size. If we guessed incorrectly, we fall back to uncompressed literals.
|
||||
*
|
||||
* We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
|
||||
* in writing the header, otherwise it is set to 0.
|
||||
*
|
||||
* hufMetadata->hType has literals block type info.
|
||||
* If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
|
||||
* If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
|
||||
* If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
|
||||
* If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
|
||||
* and the following sub-blocks' literals sections will be Treeless_Literals_Block.
|
||||
* @return : compressed size of literals section of a sub-block
|
||||
* Or 0 if unable to compress.
|
||||
* Or error code */
|
||||
static size_t
|
||||
ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
|
||||
const ZSTD_hufCTablesMetadata_t* hufMetadata,
|
||||
const BYTE* literals, size_t litSize,
|
||||
void* dst, size_t dstSize,
|
||||
const int bmi2, int writeEntropy, int* entropyWritten)
|
||||
{
|
||||
size_t const header = writeEntropy ? 200 : 0;
|
||||
size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart + lhSize;
|
||||
U32 const singleStream = lhSize == 3;
|
||||
symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
|
||||
size_t cLitSize = 0;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
|
||||
|
||||
*entropyWritten = 0;
|
||||
if (litSize == 0 || hufMetadata->hType == set_basic) {
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
|
||||
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
||||
} else if (hufMetadata->hType == set_rle) {
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
|
||||
return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
|
||||
}
|
||||
|
||||
assert(litSize > 0);
|
||||
assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
|
||||
|
||||
if (writeEntropy && hufMetadata->hType == set_compressed) {
|
||||
ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
|
||||
op += hufMetadata->hufDesSize;
|
||||
cLitSize += hufMetadata->hufDesSize;
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
|
||||
}
|
||||
|
||||
{ int const flags = bmi2 ? HUF_flags_bmi2 : 0;
|
||||
const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable, flags)
|
||||
: HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable, flags);
|
||||
op += cSize;
|
||||
cLitSize += cSize;
|
||||
if (cSize == 0 || ERR_isError(cSize)) {
|
||||
DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
|
||||
return 0;
|
||||
}
|
||||
/* If we expand and we aren't writing a header then emit uncompressed */
|
||||
if (!writeEntropy && cLitSize >= litSize) {
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
|
||||
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
||||
}
|
||||
/* If we are writing headers then allow expansion that doesn't change our header size. */
|
||||
if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
|
||||
assert(cLitSize > litSize);
|
||||
DEBUGLOG(5, "Literals expanded beyond allowed header size");
|
||||
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
||||
}
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
|
||||
}
|
||||
|
||||
/* Build header */
|
||||
switch(lhSize)
|
||||
{
|
||||
case 3: /* 2 - 2 - 10 - 10 */
|
||||
{ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
|
||||
MEM_writeLE24(ostart, lhc);
|
||||
break;
|
||||
}
|
||||
case 4: /* 2 - 2 - 14 - 14 */
|
||||
{ U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
|
||||
MEM_writeLE32(ostart, lhc);
|
||||
break;
|
||||
}
|
||||
case 5: /* 2 - 2 - 18 - 18 */
|
||||
{ U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
|
||||
MEM_writeLE32(ostart, lhc);
|
||||
ostart[4] = (BYTE)(cLitSize >> 10);
|
||||
break;
|
||||
}
|
||||
default: /* not possible : lhSize is {3,4,5} */
|
||||
assert(0);
|
||||
}
|
||||
*entropyWritten = 1;
|
||||
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
static size_t
|
||||
ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
|
||||
const seqDef* sequences, size_t nbSeq,
|
||||
size_t litSize, int lastSequence)
|
||||
{
|
||||
const seqDef* const sstart = sequences;
|
||||
const seqDef* const send = sequences + nbSeq;
|
||||
const seqDef* sp = sstart;
|
||||
size_t matchLengthSum = 0;
|
||||
size_t litLengthSum = 0;
|
||||
(void)(litLengthSum); /* suppress unused variable warning on some environments */
|
||||
while (send-sp > 0) {
|
||||
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
|
||||
litLengthSum += seqLen.litLength;
|
||||
matchLengthSum += seqLen.matchLength;
|
||||
sp++;
|
||||
}
|
||||
assert(litLengthSum <= litSize);
|
||||
if (!lastSequence) {
|
||||
assert(litLengthSum == litSize);
|
||||
}
|
||||
return matchLengthSum + litSize;
|
||||
}
|
||||
|
||||
/** ZSTD_compressSubBlock_sequences() :
|
||||
* Compresses sequences section for a sub-block.
|
||||
* fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
|
||||
* symbol compression modes for the super-block.
|
||||
* The first successfully compressed block will have these in its header.
|
||||
* We set entropyWritten=1 when we succeed in compressing the sequences.
|
||||
* The following sub-blocks will always have repeat mode.
|
||||
* @return : compressed size of sequences section of a sub-block
|
||||
* Or 0 if it is unable to compress
|
||||
* Or error code. */
|
||||
static size_t
|
||||
ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
|
||||
const ZSTD_fseCTablesMetadata_t* fseMetadata,
|
||||
const seqDef* sequences, size_t nbSeq,
|
||||
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
|
||||
const ZSTD_CCtx_params* cctxParams,
|
||||
void* dst, size_t dstCapacity,
|
||||
const int bmi2, int writeEntropy, int* entropyWritten)
|
||||
{
|
||||
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstCapacity;
|
||||
BYTE* op = ostart;
|
||||
BYTE* seqHead;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
|
||||
|
||||
*entropyWritten = 0;
|
||||
/* Sequences Header */
|
||||
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
|
||||
dstSize_tooSmall, "");
|
||||
if (nbSeq < 0x7F)
|
||||
*op++ = (BYTE)nbSeq;
|
||||
else if (nbSeq < LONGNBSEQ)
|
||||
op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
|
||||
else
|
||||
op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
|
||||
if (nbSeq==0) {
|
||||
return op - ostart;
|
||||
}
|
||||
|
||||
/* seqHead : flags for FSE encoding type */
|
||||
seqHead = op++;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
|
||||
|
||||
if (writeEntropy) {
|
||||
const U32 LLtype = fseMetadata->llType;
|
||||
const U32 Offtype = fseMetadata->ofType;
|
||||
const U32 MLtype = fseMetadata->mlType;
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
|
||||
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
|
||||
ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
|
||||
op += fseMetadata->fseTablesSize;
|
||||
} else {
|
||||
const U32 repeat = set_repeat;
|
||||
*seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
|
||||
}
|
||||
|
||||
{ size_t const bitstreamSize = ZSTD_encodeSequences(
|
||||
op, oend - op,
|
||||
fseTables->matchlengthCTable, mlCode,
|
||||
fseTables->offcodeCTable, ofCode,
|
||||
fseTables->litlengthCTable, llCode,
|
||||
sequences, nbSeq,
|
||||
longOffsets, bmi2);
|
||||
FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
|
||||
op += bitstreamSize;
|
||||
/* zstd versions <= 1.3.4 mistakenly report corruption when
|
||||
* FSE_readNCount() receives a buffer < 4 bytes.
|
||||
* Fixed by https://github.com/facebook/zstd/pull/1146.
|
||||
* This can happen when the last set_compressed table present is 2
|
||||
* bytes and the bitstream is only one byte.
|
||||
* In this exceedingly rare case, we will simply emit an uncompressed
|
||||
* block, since it isn't worth optimizing.
|
||||
*/
|
||||
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
|
||||
/* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
|
||||
assert(fseMetadata->lastCountSize + bitstreamSize == 3);
|
||||
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
|
||||
"emitting an uncompressed block.");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
|
||||
}
|
||||
|
||||
/* zstd versions <= 1.4.0 mistakenly report error when
|
||||
* sequences section body size is less than 3 bytes.
|
||||
* Fixed by https://github.com/facebook/zstd/pull/1664.
|
||||
* This can happen when the previous sequences section block is compressed
|
||||
* with rle mode and the current block's sequences section is compressed
|
||||
* with repeat mode where sequences section body size can be 1 byte.
|
||||
*/
|
||||
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
if (op-seqHead < 4) {
|
||||
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
|
||||
"an uncompressed block when sequences are < 4 bytes");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
*entropyWritten = 1;
|
||||
return op - ostart;
|
||||
}
|
||||
|
||||
/** ZSTD_compressSubBlock() :
|
||||
* Compresses a single sub-block.
|
||||
* @return : compressed size of the sub-block
|
||||
* Or 0 if it failed to compress. */
|
||||
static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
|
||||
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
||||
const seqDef* sequences, size_t nbSeq,
|
||||
const BYTE* literals, size_t litSize,
|
||||
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
|
||||
const ZSTD_CCtx_params* cctxParams,
|
||||
void* dst, size_t dstCapacity,
|
||||
const int bmi2,
|
||||
int writeLitEntropy, int writeSeqEntropy,
|
||||
int* litEntropyWritten, int* seqEntropyWritten,
|
||||
U32 lastBlock)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstCapacity;
|
||||
BYTE* op = ostart + ZSTD_blockHeaderSize;
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
|
||||
litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
|
||||
{ size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
|
||||
&entropyMetadata->hufMetadata, literals, litSize,
|
||||
op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
|
||||
FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
|
||||
if (cLitSize == 0) return 0;
|
||||
op += cLitSize;
|
||||
}
|
||||
{ size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
|
||||
&entropyMetadata->fseMetadata,
|
||||
sequences, nbSeq,
|
||||
llCode, mlCode, ofCode,
|
||||
cctxParams,
|
||||
op, oend-op,
|
||||
bmi2, writeSeqEntropy, seqEntropyWritten);
|
||||
FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
|
||||
if (cSeqSize == 0) return 0;
|
||||
op += cSeqSize;
|
||||
}
|
||||
/* Write block header */
|
||||
{ size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
|
||||
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
|
||||
MEM_writeLE24(ostart, cBlockHeader24);
|
||||
}
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
|
||||
const ZSTD_hufCTables_t* huf,
|
||||
const ZSTD_hufCTablesMetadata_t* hufMetadata,
|
||||
void* workspace, size_t wkspSize,
|
||||
int writeEntropy)
|
||||
{
|
||||
unsigned* const countWksp = (unsigned*)workspace;
|
||||
unsigned maxSymbolValue = 255;
|
||||
size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
|
||||
|
||||
if (hufMetadata->hType == set_basic) return litSize;
|
||||
else if (hufMetadata->hType == set_rle) return 1;
|
||||
else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
|
||||
size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
|
||||
if (ZSTD_isError(largest)) return litSize;
|
||||
{ size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
|
||||
if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
|
||||
return cLitSizeEstimate + literalSectionHeaderSize;
|
||||
} }
|
||||
assert(0); /* impossible */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
|
||||
const BYTE* codeTable, unsigned maxCode,
|
||||
size_t nbSeq, const FSE_CTable* fseCTable,
|
||||
const U8* additionalBits,
|
||||
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||
void* workspace, size_t wkspSize)
|
||||
{
|
||||
unsigned* const countWksp = (unsigned*)workspace;
|
||||
const BYTE* ctp = codeTable;
|
||||
const BYTE* const ctStart = ctp;
|
||||
const BYTE* const ctEnd = ctStart + nbSeq;
|
||||
size_t cSymbolTypeSizeEstimateInBits = 0;
|
||||
unsigned max = maxCode;
|
||||
|
||||
HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
|
||||
if (type == set_basic) {
|
||||
/* We selected this encoding type, so it must be valid. */
|
||||
assert(max <= defaultMax);
|
||||
cSymbolTypeSizeEstimateInBits = max <= defaultMax
|
||||
? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
|
||||
: ERROR(GENERIC);
|
||||
} else if (type == set_rle) {
|
||||
cSymbolTypeSizeEstimateInBits = 0;
|
||||
} else if (type == set_compressed || type == set_repeat) {
|
||||
cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
|
||||
}
|
||||
if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
|
||||
while (ctp < ctEnd) {
|
||||
if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
|
||||
else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
|
||||
ctp++;
|
||||
}
|
||||
return cSymbolTypeSizeEstimateInBits / 8;
|
||||
}
|
||||
|
||||
static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
|
||||
const BYTE* llCodeTable,
|
||||
const BYTE* mlCodeTable,
|
||||
size_t nbSeq,
|
||||
const ZSTD_fseCTables_t* fseTables,
|
||||
const ZSTD_fseCTablesMetadata_t* fseMetadata,
|
||||
void* workspace, size_t wkspSize,
|
||||
int writeEntropy)
|
||||
{
|
||||
size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
|
||||
size_t cSeqSizeEstimate = 0;
|
||||
if (nbSeq == 0) return sequencesSectionHeaderSize;
|
||||
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
|
||||
nbSeq, fseTables->offcodeCTable, NULL,
|
||||
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
|
||||
workspace, wkspSize);
|
||||
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
|
||||
nbSeq, fseTables->litlengthCTable, LL_bits,
|
||||
LL_defaultNorm, LL_defaultNormLog, MaxLL,
|
||||
workspace, wkspSize);
|
||||
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
|
||||
nbSeq, fseTables->matchlengthCTable, ML_bits,
|
||||
ML_defaultNorm, ML_defaultNormLog, MaxML,
|
||||
workspace, wkspSize);
|
||||
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
|
||||
return cSeqSizeEstimate + sequencesSectionHeaderSize;
|
||||
}
|
||||
|
||||
static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
|
||||
const BYTE* ofCodeTable,
|
||||
const BYTE* llCodeTable,
|
||||
const BYTE* mlCodeTable,
|
||||
size_t nbSeq,
|
||||
const ZSTD_entropyCTables_t* entropy,
|
||||
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
||||
void* workspace, size_t wkspSize,
|
||||
int writeLitEntropy, int writeSeqEntropy) {
|
||||
size_t cSizeEstimate = 0;
|
||||
cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
|
||||
&entropy->huf, &entropyMetadata->hufMetadata,
|
||||
workspace, wkspSize, writeLitEntropy);
|
||||
cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
|
||||
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
|
||||
workspace, wkspSize, writeSeqEntropy);
|
||||
return cSizeEstimate + ZSTD_blockHeaderSize;
|
||||
}
|
||||
|
||||
static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
|
||||
{
|
||||
if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
|
||||
return 1;
|
||||
if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
|
||||
return 1;
|
||||
if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** ZSTD_compressSubBlock_multi() :
|
||||
* Breaks super-block into multiple sub-blocks and compresses them.
|
||||
* Entropy will be written to the first block.
|
||||
* The following blocks will use repeat mode to compress.
|
||||
* All sub-blocks are compressed blocks (no raw or rle blocks).
|
||||
* @return : compressed size of the super block (which is multiple ZSTD blocks)
|
||||
* Or 0 if it failed to compress. */
|
||||
static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
|
||||
const ZSTD_compressedBlockState_t* prevCBlock,
|
||||
ZSTD_compressedBlockState_t* nextCBlock,
|
||||
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
||||
const ZSTD_CCtx_params* cctxParams,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const int bmi2, U32 lastBlock,
|
||||
void* workspace, size_t wkspSize)
|
||||
{
|
||||
const seqDef* const sstart = seqStorePtr->sequencesStart;
|
||||
const seqDef* const send = seqStorePtr->sequences;
|
||||
const seqDef* sp = sstart;
|
||||
const BYTE* const lstart = seqStorePtr->litStart;
|
||||
const BYTE* const lend = seqStorePtr->lit;
|
||||
const BYTE* lp = lstart;
|
||||
BYTE const* ip = (BYTE const*)src;
|
||||
BYTE const* const iend = ip + srcSize;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstCapacity;
|
||||
BYTE* op = ostart;
|
||||
const BYTE* llCodePtr = seqStorePtr->llCode;
|
||||
const BYTE* mlCodePtr = seqStorePtr->mlCode;
|
||||
const BYTE* ofCodePtr = seqStorePtr->ofCode;
|
||||
size_t targetCBlockSize = cctxParams->targetCBlockSize;
|
||||
size_t litSize, seqCount;
|
||||
int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
|
||||
int writeSeqEntropy = 1;
|
||||
int lastSequence = 0;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
|
||||
(unsigned)(lend-lp), (unsigned)(send-sstart));
|
||||
|
||||
litSize = 0;
|
||||
seqCount = 0;
|
||||
do {
|
||||
size_t cBlockSizeEstimate = 0;
|
||||
if (sstart == send) {
|
||||
lastSequence = 1;
|
||||
} else {
|
||||
const seqDef* const sequence = sp + seqCount;
|
||||
lastSequence = sequence == send - 1;
|
||||
litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
|
||||
seqCount++;
|
||||
}
|
||||
if (lastSequence) {
|
||||
assert(lp <= lend);
|
||||
assert(litSize <= (size_t)(lend - lp));
|
||||
litSize = (size_t)(lend - lp);
|
||||
}
|
||||
/* I think there is an optimization opportunity here.
|
||||
* Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
|
||||
* since it recalculates estimate from scratch.
|
||||
* For example, it would recount literal distribution and symbol codes every time.
|
||||
*/
|
||||
cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
|
||||
&nextCBlock->entropy, entropyMetadata,
|
||||
workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
|
||||
if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
|
||||
int litEntropyWritten = 0;
|
||||
int seqEntropyWritten = 0;
|
||||
const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
|
||||
const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
|
||||
sp, seqCount,
|
||||
lp, litSize,
|
||||
llCodePtr, mlCodePtr, ofCodePtr,
|
||||
cctxParams,
|
||||
op, oend-op,
|
||||
bmi2, writeLitEntropy, writeSeqEntropy,
|
||||
&litEntropyWritten, &seqEntropyWritten,
|
||||
lastBlock && lastSequence);
|
||||
FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
|
||||
if (cSize > 0 && cSize < decompressedSize) {
|
||||
DEBUGLOG(5, "Committed the sub-block");
|
||||
assert(ip + decompressedSize <= iend);
|
||||
ip += decompressedSize;
|
||||
sp += seqCount;
|
||||
lp += litSize;
|
||||
op += cSize;
|
||||
llCodePtr += seqCount;
|
||||
mlCodePtr += seqCount;
|
||||
ofCodePtr += seqCount;
|
||||
litSize = 0;
|
||||
seqCount = 0;
|
||||
/* Entropy only needs to be written once */
|
||||
if (litEntropyWritten) {
|
||||
writeLitEntropy = 0;
|
||||
}
|
||||
if (seqEntropyWritten) {
|
||||
writeSeqEntropy = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (!lastSequence);
|
||||
if (writeLitEntropy) {
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
|
||||
ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
|
||||
}
|
||||
if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
|
||||
/* If we haven't written our entropy tables, then we've violated our contract and
|
||||
* must emit an uncompressed block.
|
||||
*/
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
|
||||
return 0;
|
||||
}
|
||||
if (ip < iend) {
|
||||
size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
|
||||
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
|
||||
assert(cSize != 0);
|
||||
op += cSize;
|
||||
/* We have to regenerate the repcodes because we've skipped some sequences */
|
||||
if (sp < send) {
|
||||
seqDef const* seq;
|
||||
repcodes_t rep;
|
||||
ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
|
||||
for (seq = sstart; seq < sp; ++seq) {
|
||||
ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
|
||||
}
|
||||
ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
|
||||
}
|
||||
}
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
|
||||
void* dst, size_t dstCapacity,
|
||||
void const* src, size_t srcSize,
|
||||
unsigned lastBlock) {
|
||||
ZSTD_entropyCTablesMetadata_t entropyMetadata;
|
||||
|
||||
FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
|
||||
&zc->blockState.prevCBlock->entropy,
|
||||
&zc->blockState.nextCBlock->entropy,
|
||||
&zc->appliedParams,
|
||||
&entropyMetadata,
|
||||
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
|
||||
|
||||
return ZSTD_compressSubBlock_multi(&zc->seqStore,
|
||||
zc->blockState.prevCBlock,
|
||||
zc->blockState.nextCBlock,
|
||||
&entropyMetadata,
|
||||
&zc->appliedParams,
|
||||
dst, dstCapacity,
|
||||
src, srcSize,
|
||||
zc->bmi2, lastBlock,
|
||||
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPRESS_ADVANCED_H
|
||||
#define ZSTD_COMPRESS_ADVANCED_H
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
|
||||
#include "../zstd.h" /* ZSTD_CCtx */
|
||||
|
||||
/*-*************************************
|
||||
* Target Compressed Block Size
|
||||
***************************************/
|
||||
|
||||
/* ZSTD_compressSuperBlock() :
|
||||
* Used to compress a super block when targetCBlockSize is being used.
|
||||
* The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
|
||||
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
|
||||
void* dst, size_t dstCapacity,
|
||||
void const* src, size_t srcSize,
|
||||
unsigned lastBlock);
|
||||
|
||||
#endif /* ZSTD_COMPRESS_ADVANCED_H */
|
@ -1,679 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CWKSP_H
|
||||
#define ZSTD_CWKSP_H
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
||||
#include "../common/zstd_internal.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
|
||||
/* Since the workspace is effectively its own little malloc implementation /
|
||||
* arena, when we run under ASAN, we should similarly insert redzones between
|
||||
* each internal element of the workspace, so ASAN will catch overruns that
|
||||
* reach outside an object but that stay inside the workspace.
|
||||
*
|
||||
* This defines the size of that redzone.
|
||||
*/
|
||||
#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
|
||||
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
|
||||
#endif
|
||||
|
||||
|
||||
/* Set our tables and aligneds to align by 64 bytes */
|
||||
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
|
||||
|
||||
/*-*************************************
|
||||
* Structures
|
||||
***************************************/
|
||||
typedef enum {
|
||||
ZSTD_cwksp_alloc_objects,
|
||||
ZSTD_cwksp_alloc_buffers,
|
||||
ZSTD_cwksp_alloc_aligned
|
||||
} ZSTD_cwksp_alloc_phase_e;
|
||||
|
||||
/**
|
||||
* Used to describe whether the workspace is statically allocated (and will not
|
||||
* necessarily ever be freed), or if it's dynamically allocated and we can
|
||||
* expect a well-formed caller to free this.
|
||||
*/
|
||||
typedef enum {
|
||||
ZSTD_cwksp_dynamic_alloc,
|
||||
ZSTD_cwksp_static_alloc
|
||||
} ZSTD_cwksp_static_alloc_e;
|
||||
|
||||
/**
|
||||
* Zstd fits all its internal datastructures into a single continuous buffer,
|
||||
* so that it only needs to perform a single OS allocation (or so that a buffer
|
||||
* can be provided to it and it can perform no allocations at all). This buffer
|
||||
* is called the workspace.
|
||||
*
|
||||
* Several optimizations complicate that process of allocating memory ranges
|
||||
* from this workspace for each internal datastructure:
|
||||
*
|
||||
* - These different internal datastructures have different setup requirements:
|
||||
*
|
||||
* - The static objects need to be cleared once and can then be trivially
|
||||
* reused for each compression.
|
||||
*
|
||||
* - Various buffers don't need to be initialized at all--they are always
|
||||
* written into before they're read.
|
||||
*
|
||||
* - The matchstate tables have a unique requirement that they don't need
|
||||
* their memory to be totally cleared, but they do need the memory to have
|
||||
* some bound, i.e., a guarantee that all values in the memory they've been
|
||||
* allocated is less than some maximum value (which is the starting value
|
||||
* for the indices that they will then use for compression). When this
|
||||
* guarantee is provided to them, they can use the memory without any setup
|
||||
* work. When it can't, they have to clear the area.
|
||||
*
|
||||
* - These buffers also have different alignment requirements.
|
||||
*
|
||||
* - We would like to reuse the objects in the workspace for multiple
|
||||
* compressions without having to perform any expensive reallocation or
|
||||
* reinitialization work.
|
||||
*
|
||||
* - We would like to be able to efficiently reuse the workspace across
|
||||
* multiple compressions **even when the compression parameters change** and
|
||||
* we need to resize some of the objects (where possible).
|
||||
*
|
||||
* To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
|
||||
* abstraction was created. It works as follows:
|
||||
*
|
||||
* Workspace Layout:
|
||||
*
|
||||
* [ ... workspace ... ]
|
||||
* [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
|
||||
*
|
||||
* The various objects that live in the workspace are divided into the
|
||||
* following categories, and are allocated separately:
|
||||
*
|
||||
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
|
||||
* so that literally everything fits in a single buffer. Note: if present,
|
||||
* this must be the first object in the workspace, since ZSTD_customFree{CCtx,
|
||||
* CDict}() rely on a pointer comparison to see whether one or two frees are
|
||||
* required.
|
||||
*
|
||||
* - Fixed size objects: these are fixed-size, fixed-count objects that are
|
||||
* nonetheless "dynamically" allocated in the workspace so that we can
|
||||
* control how they're initialized separately from the broader ZSTD_CCtx.
|
||||
* Examples:
|
||||
* - Entropy Workspace
|
||||
* - 2 x ZSTD_compressedBlockState_t
|
||||
* - CDict dictionary contents
|
||||
*
|
||||
* - Tables: these are any of several different datastructures (hash tables,
|
||||
* chain tables, binary trees) that all respect a common format: they are
|
||||
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
||||
* Their sizes depend on the cparams. These tables are 64-byte aligned.
|
||||
*
|
||||
* - Aligned: these buffers are used for various purposes that require 4 byte
|
||||
* alignment, but don't require any initialization before they're used. These
|
||||
* buffers are each aligned to 64 bytes.
|
||||
*
|
||||
* - Buffers: these buffers are used for various purposes that don't require
|
||||
* any alignment or initialization before they're used. This means they can
|
||||
* be moved around at no cost for a new compression.
|
||||
*
|
||||
* Allocating Memory:
|
||||
*
|
||||
* The various types of objects must be allocated in order, so they can be
|
||||
* correctly packed into the workspace buffer. That order is:
|
||||
*
|
||||
* 1. Objects
|
||||
* 2. Buffers
|
||||
* 3. Aligned/Tables
|
||||
*
|
||||
* Attempts to reserve objects of different types out of order will fail.
|
||||
*/
|
||||
typedef struct {
|
||||
void* workspace;
|
||||
void* workspaceEnd;
|
||||
|
||||
void* objectEnd;
|
||||
void* tableEnd;
|
||||
void* tableValidEnd;
|
||||
void* allocStart;
|
||||
|
||||
BYTE allocFailed;
|
||||
int workspaceOversizedDuration;
|
||||
ZSTD_cwksp_alloc_phase_e phase;
|
||||
ZSTD_cwksp_static_alloc_e isStatic;
|
||||
} ZSTD_cwksp;
|
||||
|
||||
/*-*************************************
|
||||
* Functions
|
||||
***************************************/
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
||||
(void)ws;
|
||||
assert(ws->workspace <= ws->objectEnd);
|
||||
assert(ws->objectEnd <= ws->tableEnd);
|
||||
assert(ws->objectEnd <= ws->tableValidEnd);
|
||||
assert(ws->tableEnd <= ws->allocStart);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
assert(ws->allocStart <= ws->workspaceEnd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Align must be a power of 2.
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
|
||||
size_t const mask = align - 1;
|
||||
assert((align & mask) == 0);
|
||||
return (size + mask) & ~mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this to determine how much space in the workspace we will consume to
|
||||
* allocate this object. (Normally it should be exactly the size of the object,
|
||||
* but under special conditions, like ASAN, where we pad each object, it might
|
||||
* be larger.)
|
||||
*
|
||||
* Since tables aren't currently redzoned, you don't need to call through this
|
||||
* to figure out how much space you need for the matchState tables. Everything
|
||||
* else is though.
|
||||
*
|
||||
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
|
||||
if (size == 0)
|
||||
return 0;
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
#else
|
||||
return size;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
|
||||
* Used to determine the number of bytes required for a given "aligned".
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
||||
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the amount of additional space the cwksp must allocate
|
||||
* for internal purposes (currently only alignment).
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
||||
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
|
||||
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
|
||||
* to align the beginning of the aligned section.
|
||||
*
|
||||
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
|
||||
* aligneds being sized in multiples of 64 bytes.
|
||||
*/
|
||||
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
|
||||
return slackSpace;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return the number of additional bytes required to align a pointer to the given number of bytes.
|
||||
* alignBytes must be a power of two.
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
|
||||
size_t const alignBytesMask = alignBytes - 1;
|
||||
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
|
||||
assert((alignBytes & alignBytesMask) == 0);
|
||||
assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal function. Do not use directly.
|
||||
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
||||
* which counts from the end of the wksp (as opposed to the object/table segment).
|
||||
*
|
||||
* Returns a pointer to the beginning of that space.
|
||||
*/
|
||||
MEM_STATIC void*
|
||||
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
|
||||
{
|
||||
void* const alloc = (BYTE*)ws->allocStart - bytes;
|
||||
void* const bottom = ws->tableEnd;
|
||||
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
|
||||
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
assert(alloc >= bottom);
|
||||
if (alloc < bottom) {
|
||||
DEBUGLOG(4, "cwksp: alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
/* the area is reserved from the end of wksp.
|
||||
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
|
||||
if (alloc < ws->tableValidEnd) {
|
||||
ws->tableValidEnd = alloc;
|
||||
}
|
||||
ws->allocStart = alloc;
|
||||
return alloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the cwksp to the next phase, and does any necessary allocations.
|
||||
* cwksp initialization must necessarily go through each phase in order.
|
||||
* Returns a 0 on success, or zstd error
|
||||
*/
|
||||
MEM_STATIC size_t
|
||||
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
|
||||
{
|
||||
assert(phase >= ws->phase);
|
||||
if (phase > ws->phase) {
|
||||
/* Going from allocating objects to allocating buffers */
|
||||
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
|
||||
phase >= ZSTD_cwksp_alloc_buffers) {
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
}
|
||||
|
||||
/* Going from allocating buffers to allocating aligneds/tables */
|
||||
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
|
||||
phase >= ZSTD_cwksp_alloc_aligned) {
|
||||
{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
|
||||
size_t const bytesToAlign =
|
||||
ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
|
||||
ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
|
||||
RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
|
||||
memory_allocation, "aligned phase - alignment initial allocation failed!");
|
||||
}
|
||||
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
||||
void* const alloc = ws->objectEnd;
|
||||
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
void* const objectEnd = (BYTE*)alloc + bytesToAlign;
|
||||
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
||||
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
|
||||
"table phase - alignment initial allocation failed!");
|
||||
ws->objectEnd = objectEnd;
|
||||
ws->tableEnd = objectEnd; /* table area starts being empty */
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
ws->tableValidEnd = ws->tableEnd;
|
||||
} } }
|
||||
ws->phase = phase;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether this object/buffer/etc was allocated in this workspace.
|
||||
*/
|
||||
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
||||
{
|
||||
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal function. Do not use directly.
|
||||
*/
|
||||
MEM_STATIC void*
|
||||
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
|
||||
{
|
||||
void* alloc;
|
||||
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* over-reserve space */
|
||||
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
#endif
|
||||
|
||||
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
||||
* either size. */
|
||||
if (alloc) {
|
||||
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
/* We need to keep the redzone poisoned while unpoisoning the bytes that
|
||||
* are actually allocated. */
|
||||
__asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return alloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns unaligned memory.
|
||||
*/
|
||||
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
|
||||
ZSTD_cwksp_alloc_aligned);
|
||||
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aligned on 64 bytes. These buffers have the special property that
|
||||
* their values remain constrained, allowing us to re-use them without
|
||||
* memset()-ing them.
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
|
||||
void* alloc;
|
||||
void* end;
|
||||
void* top;
|
||||
|
||||
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
||||
return NULL;
|
||||
}
|
||||
alloc = ws->tableEnd;
|
||||
end = (BYTE *)alloc + bytes;
|
||||
top = ws->allocStart;
|
||||
|
||||
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
|
||||
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
||||
assert((bytes & (sizeof(U32)-1)) == 0);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
assert(end <= top);
|
||||
if (end > top) {
|
||||
DEBUGLOG(4, "cwksp: table alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
ws->tableEnd = end;
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
__asan_unpoison_memory_region(alloc, bytes);
|
||||
}
|
||||
#endif
|
||||
|
||||
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
|
||||
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
||||
return alloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aligned on sizeof(void*).
|
||||
* Note : should happen only once, at workspace first initialization
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
|
||||
void* alloc = ws->objectEnd;
|
||||
void* end = (BYTE*)alloc + roundedBytes;
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* over-reserve space */
|
||||
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
#endif
|
||||
|
||||
DEBUGLOG(4,
|
||||
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
||||
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
||||
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
||||
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
/* we must be in the first phase, no advance is possible */
|
||||
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
||||
DEBUGLOG(3, "cwksp: object alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
ws->objectEnd = end;
|
||||
ws->tableEnd = end;
|
||||
ws->tableValidEnd = end;
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
||||
* either size. */
|
||||
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
__asan_unpoison_memory_region(alloc, bytes);
|
||||
}
|
||||
#endif
|
||||
|
||||
return alloc;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
||||
{
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
||||
|
||||
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* To validate that the table re-use logic is sound, and that we don't
|
||||
* access table space that we haven't cleaned, we re-"poison" the table
|
||||
* space every time we mark it dirty. */
|
||||
{
|
||||
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
||||
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
|
||||
__msan_poison(ws->objectEnd, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
ws->tableValidEnd = ws->tableEnd;
|
||||
}
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Zero the part of the allocated tables not already marked clean.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
|
||||
}
|
||||
ZSTD_cwksp_mark_tables_clean(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates table allocations.
|
||||
* All other allocations remain valid.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: clearing tables!");
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* We don't do this when the workspace is statically allocated, because
|
||||
* when that is the case, we have no capability to hook into the end of the
|
||||
* workspace's lifecycle to unpoison the memory.
|
||||
*/
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
||||
__asan_poison_memory_region(ws->objectEnd, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
ws->tableEnd = ws->objectEnd;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates all buffer, aligned, and table allocations.
|
||||
* Object allocations remain valid.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: clearing!");
|
||||
|
||||
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* To validate that the context re-use logic is sound, and that we don't
|
||||
* access stuff that this compression hasn't initialized, we re-"poison"
|
||||
* the workspace (or at least the non-static, non-table parts of it)
|
||||
* every time we start a new compression. */
|
||||
{
|
||||
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
|
||||
__msan_poison(ws->tableValidEnd, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* We don't do this when the workspace is statically allocated, because
|
||||
* when that is the case, we have no capability to hook into the end of the
|
||||
* workspace's lifecycle to unpoison the memory.
|
||||
*/
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
|
||||
__asan_poison_memory_region(ws->objectEnd, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
ws->tableEnd = ws->objectEnd;
|
||||
ws->allocStart = ws->workspaceEnd;
|
||||
ws->allocFailed = 0;
|
||||
if (ws->phase > ZSTD_cwksp_alloc_buffers) {
|
||||
ws->phase = ZSTD_cwksp_alloc_buffers;
|
||||
}
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* The provided workspace takes ownership of the buffer [start, start+size).
|
||||
* Any existing values in the workspace are ignored (the previously managed
|
||||
* buffer, if present, must be separately freed).
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
|
||||
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
|
||||
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
|
||||
ws->workspace = start;
|
||||
ws->workspaceEnd = (BYTE*)start + size;
|
||||
ws->objectEnd = ws->workspace;
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
ws->phase = ZSTD_cwksp_alloc_objects;
|
||||
ws->isStatic = isStatic;
|
||||
ZSTD_cwksp_clear(ws);
|
||||
ws->workspaceOversizedDuration = 0;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
|
||||
void* workspace = ZSTD_customMalloc(size, customMem);
|
||||
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
|
||||
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
|
||||
ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
|
||||
void *ptr = ws->workspace;
|
||||
DEBUGLOG(4, "cwksp: freeing workspace");
|
||||
ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
|
||||
ZSTD_customFree(ptr, customMem);
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the management of a workspace from one cwksp to another. The src cwksp
|
||||
* is left in an invalid state (src must be re-init()'ed before it's used again).
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
|
||||
*dst = *src;
|
||||
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
|
||||
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
|
||||
return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
|
||||
+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
||||
return ws->allocFailed;
|
||||
}
|
||||
|
||||
/*-*************************************
|
||||
* Functions Checking Free Space
|
||||
***************************************/
|
||||
|
||||
/* ZSTD_alignmentSpaceWithinBounds() :
|
||||
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
|
||||
* actual amount of space used.
|
||||
*/
|
||||
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
|
||||
size_t const estimatedSpace, int resizedWorkspace) {
|
||||
if (resizedWorkspace) {
|
||||
/* Resized/newly allocated wksp should have exact bounds */
|
||||
return ZSTD_cwksp_used(ws) == estimatedSpace;
|
||||
} else {
|
||||
/* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
|
||||
* than estimatedSpace. See the comments in zstd_cwksp.h for details.
|
||||
*/
|
||||
return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
|
||||
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_check_available(
|
||||
ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
|
||||
&& ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
|
||||
ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
|
||||
ws->workspaceOversizedDuration++;
|
||||
} else {
|
||||
ws->workspaceOversizedDuration = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_CWKSP_H */
|
@ -1,758 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
#include "zstd_double_fast.h"
|
||||
|
||||
static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
|
||||
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashLarge = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
U32 const mls = cParams->minMatch;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash tables.
|
||||
* Insert the other positions into the large hash table if their entry
|
||||
* is empty.
|
||||
*/
|
||||
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
|
||||
U32 const curr = (U32)(ip - base);
|
||||
U32 i;
|
||||
for (i = 0; i < fastHashFillStep; ++i) {
|
||||
size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls);
|
||||
size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8);
|
||||
if (i == 0) {
|
||||
ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i);
|
||||
}
|
||||
if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) {
|
||||
ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i);
|
||||
}
|
||||
/* Only load extra positions for ZSTD_dtlm_full */
|
||||
if (dtlm == ZSTD_dtlm_fast)
|
||||
break;
|
||||
} }
|
||||
}
|
||||
|
||||
static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
|
||||
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashLarge = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog;
|
||||
U32 const mls = cParams->minMatch;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash tables.
|
||||
* Insert the other positions into the large hash table if their entry
|
||||
* is empty.
|
||||
*/
|
||||
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
|
||||
U32 const curr = (U32)(ip - base);
|
||||
U32 i;
|
||||
for (i = 0; i < fastHashFillStep; ++i) {
|
||||
size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
|
||||
size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
|
||||
if (i == 0)
|
||||
hashSmall[smHash] = curr + i;
|
||||
if (i == 0 || hashLarge[lgHash] == 0)
|
||||
hashLarge[lgHash] = curr + i;
|
||||
/* Only load extra positions for ZSTD_dtlm_full */
|
||||
if (dtlm == ZSTD_dtlm_fast)
|
||||
break;
|
||||
} }
|
||||
}
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
||||
const void* const end,
|
||||
ZSTD_dictTableLoadMethod_e dtlm,
|
||||
ZSTD_tableFillPurpose_e tfp)
|
||||
{
|
||||
if (tfp == ZSTD_tfp_forCDict) {
|
||||
ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm);
|
||||
} else {
|
||||
ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize, U32 const mls /* template */)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32* const hashLong = ms->hashTable;
|
||||
const U32 hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
const U32 hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
/* presumes that, if there is a dictionary, it must be using Attach mode */
|
||||
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
||||
const BYTE* const prefixLowest = base + prefixLowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
U32 offsetSaved1 = 0, offsetSaved2 = 0;
|
||||
|
||||
size_t mLength;
|
||||
U32 offset;
|
||||
U32 curr;
|
||||
|
||||
/* how many positions to search before increasing step size */
|
||||
const size_t kStepIncr = 1 << kSearchStrength;
|
||||
/* the position at which to increment the step size if no match is found */
|
||||
const BYTE* nextStep;
|
||||
size_t step; /* the current step size */
|
||||
|
||||
size_t hl0; /* the long hash at ip */
|
||||
size_t hl1; /* the long hash at ip1 */
|
||||
|
||||
U32 idxl0; /* the long match index for ip */
|
||||
U32 idxl1; /* the long match index for ip1 */
|
||||
|
||||
const BYTE* matchl0; /* the long match for ip */
|
||||
const BYTE* matchs0; /* the short match for ip */
|
||||
const BYTE* matchl1; /* the long match for ip1 */
|
||||
|
||||
const BYTE* ip = istart; /* the current position */
|
||||
const BYTE* ip1; /* the next position */
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
|
||||
|
||||
/* init */
|
||||
ip += ((ip - prefixLowest) == 0);
|
||||
{
|
||||
U32 const current = (U32)(ip - base);
|
||||
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
|
||||
U32 const maxRep = current - windowLow;
|
||||
if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* Outer Loop: one iteration per match found and stored */
|
||||
while (1) {
|
||||
step = 1;
|
||||
nextStep = ip + kStepIncr;
|
||||
ip1 = ip + step;
|
||||
|
||||
if (ip1 > ilimit) {
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
idxl0 = hashLong[hl0];
|
||||
matchl0 = base + idxl0;
|
||||
|
||||
/* Inner Loop: one iteration per search / position */
|
||||
do {
|
||||
const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
const U32 idxs0 = hashSmall[hs0];
|
||||
curr = (U32)(ip-base);
|
||||
matchs0 = base + idxs0;
|
||||
|
||||
hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
|
||||
|
||||
/* check noDict repcode */
|
||||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
|
||||
goto _match_stored;
|
||||
}
|
||||
|
||||
hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
|
||||
|
||||
if (idxl0 > prefixLowestIndex) {
|
||||
/* check prefix long match */
|
||||
if (MEM_read64(matchl0) == MEM_read64(ip)) {
|
||||
mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
|
||||
offset = (U32)(ip-matchl0);
|
||||
while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
}
|
||||
}
|
||||
|
||||
idxl1 = hashLong[hl1];
|
||||
matchl1 = base + idxl1;
|
||||
|
||||
if (idxs0 > prefixLowestIndex) {
|
||||
/* check prefix short match */
|
||||
if (MEM_read32(matchs0) == MEM_read32(ip)) {
|
||||
goto _search_next_long;
|
||||
}
|
||||
}
|
||||
|
||||
if (ip1 >= nextStep) {
|
||||
PREFETCH_L1(ip1 + 64);
|
||||
PREFETCH_L1(ip1 + 128);
|
||||
step++;
|
||||
nextStep += kStepIncr;
|
||||
}
|
||||
ip = ip1;
|
||||
ip1 += step;
|
||||
|
||||
hl0 = hl1;
|
||||
idxl0 = idxl1;
|
||||
matchl0 = matchl1;
|
||||
#if defined(__aarch64__)
|
||||
PREFETCH_L1(ip+256);
|
||||
#endif
|
||||
} while (ip1 <= ilimit);
|
||||
|
||||
_cleanup:
|
||||
/* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
|
||||
* rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
|
||||
offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1 ? offset_1 : offsetSaved1;
|
||||
rep[1] = offset_2 ? offset_2 : offsetSaved2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
|
||||
_search_next_long:
|
||||
|
||||
/* check prefix long +1 match */
|
||||
if (idxl1 > prefixLowestIndex) {
|
||||
if (MEM_read64(matchl1) == MEM_read64(ip1)) {
|
||||
ip = ip1;
|
||||
mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
|
||||
offset = (U32)(ip-matchl1);
|
||||
while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
}
|
||||
}
|
||||
|
||||
/* if no long +1 match, explore the short match we found */
|
||||
mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
|
||||
offset = (U32)(ip - matchs0);
|
||||
while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
|
||||
|
||||
/* fall-through */
|
||||
|
||||
_match_found: /* requires ip, offset, mLength */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
if (step < 4) {
|
||||
/* It is unsafe to write this value back to the hashtable when ip1 is
|
||||
* greater than or equal to the new ip we will have after we're done
|
||||
* processing this match. Rather than perform that test directly
|
||||
* (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
|
||||
* more predictable test. The minmatch even if we take a short match is
|
||||
* 4 bytes, so as long as step, the distance between ip and ip1
|
||||
* (initially) is less than 4, we know ip1 < new ip. */
|
||||
hashLong[hl1] = (U32)(ip1 - base);
|
||||
}
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
|
||||
_match_stored:
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Complementary insertion */
|
||||
/* done after iLimit test, as candidates could be > iend-8 */
|
||||
{ U32 const indexToInsert = curr+2;
|
||||
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
|
||||
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while ( (ip <= ilimit)
|
||||
&& ( (offset_2>0)
|
||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const mls /* template */)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32* const hashLong = ms->hashTable;
|
||||
const U32 hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
const U32 hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
/* presumes that, if there is a dictionary, it must be using Attach mode */
|
||||
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
||||
const BYTE* const prefixLowest = base + prefixLowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
const ZSTD_matchState_t* const dms = ms->dictMatchState;
|
||||
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
|
||||
const U32* const dictHashLong = dms->hashTable;
|
||||
const U32* const dictHashSmall = dms->chainTable;
|
||||
const U32 dictStartIndex = dms->window.dictLimit;
|
||||
const BYTE* const dictBase = dms->window.base;
|
||||
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||
const BYTE* const dictEnd = dms->window.nextSrc;
|
||||
const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
|
||||
const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
|
||||
|
||||
/* if a dictionary is attached, it must be within window range */
|
||||
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
|
||||
|
||||
if (ms->prefetchCDictTables) {
|
||||
size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
|
||||
size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32);
|
||||
PREFETCH_AREA(dictHashLong, hashTableBytes)
|
||||
PREFETCH_AREA(dictHashSmall, chainTableBytes)
|
||||
}
|
||||
|
||||
/* init */
|
||||
ip += (dictAndPrefixLength == 0);
|
||||
|
||||
/* dictMatchState repCode checks don't currently handle repCode == 0
|
||||
* disabling. */
|
||||
assert(offset_1 <= dictAndPrefixLength);
|
||||
assert(offset_2 <= dictAndPrefixLength);
|
||||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mLength;
|
||||
U32 offset;
|
||||
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8);
|
||||
size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls);
|
||||
U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL);
|
||||
int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS);
|
||||
U32 const curr = (U32)(ip-base);
|
||||
U32 const matchIndexL = hashLong[h2];
|
||||
U32 matchIndexS = hashSmall[h];
|
||||
const BYTE* matchLong = base + matchIndexL;
|
||||
const BYTE* match = base + matchIndexS;
|
||||
const U32 repIndex = curr + 1 - offset_1;
|
||||
const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
|
||||
dictBase + (repIndex - dictIndexDelta) :
|
||||
base + repIndex;
|
||||
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
|
||||
|
||||
/* check repcode */
|
||||
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
|
||||
goto _match_stored;
|
||||
}
|
||||
|
||||
if (matchIndexL > prefixLowestIndex) {
|
||||
/* check prefix long match */
|
||||
if (MEM_read64(matchLong) == MEM_read64(ip)) {
|
||||
mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
|
||||
offset = (U32)(ip-matchLong);
|
||||
while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
}
|
||||
} else if (dictTagsMatchL) {
|
||||
/* check dictMatchState long match */
|
||||
U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const BYTE* dictMatchL = dictBase + dictMatchIndexL;
|
||||
assert(dictMatchL < dictEnd);
|
||||
|
||||
if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
|
||||
mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
|
||||
offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
|
||||
while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
} }
|
||||
|
||||
if (matchIndexS > prefixLowestIndex) {
|
||||
/* check prefix short match */
|
||||
if (MEM_read32(match) == MEM_read32(ip)) {
|
||||
goto _search_next_long;
|
||||
}
|
||||
} else if (dictTagsMatchS) {
|
||||
/* check dictMatchState short match */
|
||||
U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
match = dictBase + dictMatchIndexS;
|
||||
matchIndexS = dictMatchIndexS + dictIndexDelta;
|
||||
|
||||
if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
|
||||
goto _search_next_long;
|
||||
} }
|
||||
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
#if defined(__aarch64__)
|
||||
PREFETCH_L1(ip+256);
|
||||
#endif
|
||||
continue;
|
||||
|
||||
_search_next_long:
|
||||
{ size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
|
||||
U32 const matchIndexL3 = hashLong[hl3];
|
||||
U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3);
|
||||
const BYTE* matchL3 = base + matchIndexL3;
|
||||
hashLong[hl3] = curr + 1;
|
||||
|
||||
/* check prefix long +1 match */
|
||||
if (matchIndexL3 > prefixLowestIndex) {
|
||||
if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
|
||||
mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
|
||||
ip++;
|
||||
offset = (U32)(ip-matchL3);
|
||||
while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
}
|
||||
} else if (dictTagsMatchL3) {
|
||||
/* check dict long +1 match */
|
||||
U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
|
||||
assert(dictMatchL3 < dictEnd);
|
||||
if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
|
||||
mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
|
||||
ip++;
|
||||
offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
|
||||
while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
} } }
|
||||
|
||||
/* if no long +1 match, explore the short match we found */
|
||||
if (matchIndexS < prefixLowestIndex) {
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
|
||||
offset = (U32)(curr - matchIndexS);
|
||||
while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
} else {
|
||||
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
|
||||
offset = (U32)(ip - match);
|
||||
while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
|
||||
_match_found:
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
|
||||
_match_stored:
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Complementary insertion */
|
||||
/* done after iLimit test, as candidates could be > iend-8 */
|
||||
{ U32 const indexToInsert = curr+2;
|
||||
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
|
||||
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
|
||||
dictBase + repIndex2 - dictIndexDelta :
|
||||
base + repIndex2;
|
||||
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} /* while (ip < ilimit) */
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
}
|
||||
|
||||
#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
|
||||
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
|
||||
void const* src, size_t srcSize) \
|
||||
{ \
|
||||
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
|
||||
}
|
||||
|
||||
ZSTD_GEN_DFAST_FN(noDict, 4)
|
||||
ZSTD_GEN_DFAST_FN(noDict, 5)
|
||||
ZSTD_GEN_DFAST_FN(noDict, 6)
|
||||
ZSTD_GEN_DFAST_FN(noDict, 7)
|
||||
|
||||
ZSTD_GEN_DFAST_FN(dictMatchState, 4)
|
||||
ZSTD_GEN_DFAST_FN(dictMatchState, 5)
|
||||
ZSTD_GEN_DFAST_FN(dictMatchState, 6)
|
||||
ZSTD_GEN_DFAST_FN(dictMatchState, 7)
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = ms->cParams.minMatch;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = ms->cParams.minMatch;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const mls /* template */)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32* const hashLong = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
|
||||
const U32 dictStartIndex = lowLimit;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
|
||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||
const BYTE* const dictEnd = dictBase + prefixStartIndex;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
|
||||
|
||||
/* if extDict is invalidated due to maxDistance, switch to "regular" variant */
|
||||
if (prefixStartIndex == dictStartIndex)
|
||||
return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
const U32 matchIndex = hashSmall[hSmall];
|
||||
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* match = matchBase + matchIndex;
|
||||
|
||||
const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
const U32 matchLongIndex = hashLong[hLong];
|
||||
const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* matchLong = matchLongBase + matchLongIndex;
|
||||
|
||||
const U32 curr = (U32)(ip-base);
|
||||
const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
|
||||
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
size_t mLength;
|
||||
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
|
||||
|
||||
if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
|
||||
& (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
|
||||
} else {
|
||||
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
|
||||
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
|
||||
U32 offset;
|
||||
mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
|
||||
offset = curr - matchLongIndex;
|
||||
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
|
||||
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
|
||||
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
U32 const matchIndex3 = hashLong[h3];
|
||||
const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* match3 = match3Base + matchIndex3;
|
||||
U32 offset;
|
||||
hashLong[h3] = curr + 1;
|
||||
if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
|
||||
const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
|
||||
mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
|
||||
ip++;
|
||||
offset = curr+1 - matchIndex3;
|
||||
while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
|
||||
} else {
|
||||
const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
|
||||
offset = curr - matchIndex;
|
||||
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
|
||||
} else {
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
continue;
|
||||
} }
|
||||
|
||||
/* move to next sequence start */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Complementary insertion */
|
||||
/* done after iLimit test, as candidates could be > iend-8 */
|
||||
{ U32 const indexToInsert = curr+2;
|
||||
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
|
||||
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
|
||||
& (offset_2 <= current2 - dictStartIndex))
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
}
|
||||
|
||||
ZSTD_GEN_DFAST_FN(extDict, 4)
|
||||
ZSTD_GEN_DFAST_FN(extDict, 5)
|
||||
ZSTD_GEN_DFAST_FN(extDict, 6)
|
||||
ZSTD_GEN_DFAST_FN(extDict, 7)
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_DOUBLE_FAST_H
|
||||
#define ZSTD_DOUBLE_FAST_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "../common/mem.h" /* U32 */
|
||||
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
||||
void const* end, ZSTD_dictTableLoadMethod_e dtlm,
|
||||
ZSTD_tableFillPurpose_e tfp);
|
||||
size_t ZSTD_compressBlock_doubleFast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_DOUBLE_FAST_H */
|
@ -1,960 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
|
||||
#include "zstd_fast.h"
|
||||
|
||||
static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
|
||||
const void* const end,
|
||||
ZSTD_dictTableLoadMethod_e dtlm)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
U32 const mls = cParams->minMatch;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
|
||||
* Feel free to remove this assert if there's a good reason! */
|
||||
assert(dtlm == ZSTD_dtlm_full);
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash table.
|
||||
* Insert the other positions if their hash entry is empty.
|
||||
*/
|
||||
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
|
||||
U32 const curr = (U32)(ip - base);
|
||||
{ size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
|
||||
ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
|
||||
|
||||
if (dtlm == ZSTD_dtlm_fast) continue;
|
||||
/* Only load extra positions for ZSTD_dtlm_full */
|
||||
{ U32 p;
|
||||
for (p = 1; p < fastHashFillStep; ++p) {
|
||||
size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
|
||||
if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
|
||||
ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
|
||||
} } } }
|
||||
}
|
||||
|
||||
static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
|
||||
const void* const end,
|
||||
ZSTD_dictTableLoadMethod_e dtlm)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hBits = cParams->hashLog;
|
||||
U32 const mls = cParams->minMatch;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
|
||||
* Feel free to remove this assert if there's a good reason! */
|
||||
assert(dtlm == ZSTD_dtlm_fast);
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash table.
|
||||
* Insert the other positions if their hash entry is empty.
|
||||
*/
|
||||
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
|
||||
U32 const curr = (U32)(ip - base);
|
||||
size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
|
||||
hashTable[hash0] = curr;
|
||||
if (dtlm == ZSTD_dtlm_fast) continue;
|
||||
/* Only load extra positions for ZSTD_dtlm_full */
|
||||
{ U32 p;
|
||||
for (p = 1; p < fastHashFillStep; ++p) {
|
||||
size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
|
||||
if (hashTable[hash] == 0) { /* not yet filled */
|
||||
hashTable[hash] = curr + p;
|
||||
} } } }
|
||||
}
|
||||
|
||||
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
||||
const void* const end,
|
||||
ZSTD_dictTableLoadMethod_e dtlm,
|
||||
ZSTD_tableFillPurpose_e tfp)
|
||||
{
|
||||
if (tfp == ZSTD_tfp_forCDict) {
|
||||
ZSTD_fillHashTableForCDict(ms, end, dtlm);
|
||||
} else {
|
||||
ZSTD_fillHashTableForCCtx(ms, end, dtlm);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* If you squint hard enough (and ignore repcodes), the search operation at any
|
||||
* given position is broken into 4 stages:
|
||||
*
|
||||
* 1. Hash (map position to hash value via input read)
|
||||
* 2. Lookup (map hash val to index via hashtable read)
|
||||
* 3. Load (map index to value at that position via input read)
|
||||
* 4. Compare
|
||||
*
|
||||
* Each of these steps involves a memory read at an address which is computed
|
||||
* from the previous step. This means these steps must be sequenced and their
|
||||
* latencies are cumulative.
|
||||
*
|
||||
* Rather than do 1->2->3->4 sequentially for a single position before moving
|
||||
* onto the next, this implementation interleaves these operations across the
|
||||
* next few positions:
|
||||
*
|
||||
* R = Repcode Read & Compare
|
||||
* H = Hash
|
||||
* T = Table Lookup
|
||||
* M = Match Read & Compare
|
||||
*
|
||||
* Pos | Time -->
|
||||
* ----+-------------------
|
||||
* N | ... M
|
||||
* N+1 | ... TM
|
||||
* N+2 | R H T M
|
||||
* N+3 | H TM
|
||||
* N+4 | R H T M
|
||||
* N+5 | H ...
|
||||
* N+6 | R ...
|
||||
*
|
||||
* This is very much analogous to the pipelining of execution in a CPU. And just
|
||||
* like a CPU, we have to dump the pipeline when we find a match (i.e., take a
|
||||
* branch).
|
||||
*
|
||||
* When this happens, we throw away our current state, and do the following prep
|
||||
* to re-enter the loop:
|
||||
*
|
||||
* Pos | Time -->
|
||||
* ----+-------------------
|
||||
* N | H T
|
||||
* N+1 | H
|
||||
*
|
||||
* This is also the work we do at the beginning to enter the loop initially.
|
||||
*/
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
ZSTD_compressBlock_fast_noDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const mls, U32 const hasStep)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hlog = cParams->hashLog;
|
||||
/* support stepSize of 0 */
|
||||
size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* ip0 = istart;
|
||||
const BYTE* ip1;
|
||||
const BYTE* ip2;
|
||||
const BYTE* ip3;
|
||||
U32 current0;
|
||||
|
||||
U32 rep_offset1 = rep[0];
|
||||
U32 rep_offset2 = rep[1];
|
||||
U32 offsetSaved1 = 0, offsetSaved2 = 0;
|
||||
|
||||
size_t hash0; /* hash for ip0 */
|
||||
size_t hash1; /* hash for ip1 */
|
||||
U32 idx; /* match idx for ip0 */
|
||||
U32 mval; /* src value at match idx */
|
||||
|
||||
U32 offcode;
|
||||
const BYTE* match0;
|
||||
size_t mLength;
|
||||
|
||||
/* ip0 and ip1 are always adjacent. The targetLength skipping and
|
||||
* uncompressibility acceleration is applied to every other position,
|
||||
* matching the behavior of #1562. step therefore represents the gap
|
||||
* between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
|
||||
size_t step;
|
||||
const BYTE* nextStep;
|
||||
const size_t kStepIncr = (1 << (kSearchStrength - 1));
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
|
||||
ip0 += (ip0 == prefixStart);
|
||||
{ U32 const curr = (U32)(ip0 - base);
|
||||
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
|
||||
U32 const maxRep = curr - windowLow;
|
||||
if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
|
||||
if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
|
||||
}
|
||||
|
||||
/* start each op */
|
||||
_start: /* Requires: ip0 */
|
||||
|
||||
step = stepSize;
|
||||
nextStep = ip0 + kStepIncr;
|
||||
|
||||
/* calculate positions, ip0 - anchor == 0, so we skip step calc */
|
||||
ip1 = ip0 + 1;
|
||||
ip2 = ip0 + step;
|
||||
ip3 = ip2 + 1;
|
||||
|
||||
if (ip3 >= ilimit) {
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
|
||||
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
|
||||
|
||||
idx = hashTable[hash0];
|
||||
|
||||
do {
|
||||
/* load repcode match for ip[2]*/
|
||||
const U32 rval = MEM_read32(ip2 - rep_offset1);
|
||||
|
||||
/* write back hash table entry */
|
||||
current0 = (U32)(ip0 - base);
|
||||
hashTable[hash0] = current0;
|
||||
|
||||
/* check repcode at ip[2] */
|
||||
if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
|
||||
ip0 = ip2;
|
||||
match0 = ip0 - rep_offset1;
|
||||
mLength = ip0[-1] == match0[-1];
|
||||
ip0 -= mLength;
|
||||
match0 -= mLength;
|
||||
offcode = REPCODE1_TO_OFFBASE;
|
||||
mLength += 4;
|
||||
|
||||
/* First write next hash table entry; we've already calculated it.
|
||||
* This write is known to be safe because the ip1 is before the
|
||||
* repcode (ip2). */
|
||||
hashTable[hash1] = (U32)(ip1 - base);
|
||||
|
||||
goto _match;
|
||||
}
|
||||
|
||||
/* load match for ip[0] */
|
||||
if (idx >= prefixStartIndex) {
|
||||
mval = MEM_read32(base + idx);
|
||||
} else {
|
||||
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
|
||||
}
|
||||
|
||||
/* check match at ip[0] */
|
||||
if (MEM_read32(ip0) == mval) {
|
||||
/* found a match! */
|
||||
|
||||
/* First write next hash table entry; we've already calculated it.
|
||||
* This write is known to be safe because the ip1 == ip0 + 1, so
|
||||
* we know we will resume searching after ip1 */
|
||||
hashTable[hash1] = (U32)(ip1 - base);
|
||||
|
||||
goto _offset;
|
||||
}
|
||||
|
||||
/* lookup ip[1] */
|
||||
idx = hashTable[hash1];
|
||||
|
||||
/* hash ip[2] */
|
||||
hash0 = hash1;
|
||||
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||
|
||||
/* advance to next positions */
|
||||
ip0 = ip1;
|
||||
ip1 = ip2;
|
||||
ip2 = ip3;
|
||||
|
||||
/* write back hash table entry */
|
||||
current0 = (U32)(ip0 - base);
|
||||
hashTable[hash0] = current0;
|
||||
|
||||
/* load match for ip[0] */
|
||||
if (idx >= prefixStartIndex) {
|
||||
mval = MEM_read32(base + idx);
|
||||
} else {
|
||||
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
|
||||
}
|
||||
|
||||
/* check match at ip[0] */
|
||||
if (MEM_read32(ip0) == mval) {
|
||||
/* found a match! */
|
||||
|
||||
/* first write next hash table entry; we've already calculated it */
|
||||
if (step <= 4) {
|
||||
/* We need to avoid writing an index into the hash table >= the
|
||||
* position at which we will pick up our searching after we've
|
||||
* taken this match.
|
||||
*
|
||||
* The minimum possible match has length 4, so the earliest ip0
|
||||
* can be after we take this match will be the current ip0 + 4.
|
||||
* ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
|
||||
* write this position.
|
||||
*/
|
||||
hashTable[hash1] = (U32)(ip1 - base);
|
||||
}
|
||||
|
||||
goto _offset;
|
||||
}
|
||||
|
||||
/* lookup ip[1] */
|
||||
idx = hashTable[hash1];
|
||||
|
||||
/* hash ip[2] */
|
||||
hash0 = hash1;
|
||||
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||
|
||||
/* advance to next positions */
|
||||
ip0 = ip1;
|
||||
ip1 = ip2;
|
||||
ip2 = ip0 + step;
|
||||
ip3 = ip1 + step;
|
||||
|
||||
/* calculate step */
|
||||
if (ip2 >= nextStep) {
|
||||
step++;
|
||||
PREFETCH_L1(ip1 + 64);
|
||||
PREFETCH_L1(ip1 + 128);
|
||||
nextStep += kStepIncr;
|
||||
}
|
||||
} while (ip3 < ilimit);
|
||||
|
||||
_cleanup:
|
||||
/* Note that there are probably still a couple positions we could search.
|
||||
* However, it seems to be a meaningful performance hit to try to search
|
||||
* them. So let's not. */
|
||||
|
||||
/* When the repcodes are outside of the prefix, we set them to zero before the loop.
|
||||
* When the offsets are still zero, we need to restore them after the block to have a correct
|
||||
* repcode history. If only one offset was invalid, it is easy. The tricky case is when both
|
||||
* offsets were invalid. We need to figure out which offset to refill with.
|
||||
* - If both offsets are zero they are in the same order.
|
||||
* - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
|
||||
* - If only one is zero, we need to decide which offset to restore.
|
||||
* - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
|
||||
* - It is impossible for rep_offset2 to be non-zero.
|
||||
*
|
||||
* So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
|
||||
* set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
|
||||
*/
|
||||
offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
|
||||
rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
|
||||
_offset: /* Requires: ip0, idx */
|
||||
|
||||
/* Compute the offset code. */
|
||||
match0 = base + idx;
|
||||
rep_offset2 = rep_offset1;
|
||||
rep_offset1 = (U32)(ip0-match0);
|
||||
offcode = OFFSET_TO_OFFBASE(rep_offset1);
|
||||
mLength = 4;
|
||||
|
||||
/* Count the backwards match length. */
|
||||
while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
|
||||
ip0--;
|
||||
match0--;
|
||||
mLength++;
|
||||
}
|
||||
|
||||
_match: /* Requires: ip0, match0, offcode */
|
||||
|
||||
/* Count the forward length. */
|
||||
mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
|
||||
|
||||
ip0 += mLength;
|
||||
anchor = ip0;
|
||||
|
||||
/* Fill table and check for immediate repcode. */
|
||||
if (ip0 <= ilimit) {
|
||||
/* Fill Table */
|
||||
assert(base+current0+2 > istart); /* check base overflow */
|
||||
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
||||
|
||||
if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
|
||||
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
|
||||
{ U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
|
||||
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
|
||||
ip0 += rLength;
|
||||
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
|
||||
anchor = ip0;
|
||||
continue; /* faster when present (confirmed on gcc-8) ... (?) */
|
||||
} } }
|
||||
|
||||
goto _start;
|
||||
}
|
||||
|
||||
#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
|
||||
static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
|
||||
void const* src, size_t srcSize) \
|
||||
{ \
|
||||
return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
|
||||
}
|
||||
|
||||
ZSTD_GEN_FAST_FN(noDict, 4, 1)
|
||||
ZSTD_GEN_FAST_FN(noDict, 5, 1)
|
||||
ZSTD_GEN_FAST_FN(noDict, 6, 1)
|
||||
ZSTD_GEN_FAST_FN(noDict, 7, 1)
|
||||
|
||||
ZSTD_GEN_FAST_FN(noDict, 4, 0)
|
||||
ZSTD_GEN_FAST_FN(noDict, 5, 0)
|
||||
ZSTD_GEN_FAST_FN(noDict, 6, 0)
|
||||
ZSTD_GEN_FAST_FN(noDict, 7, 0)
|
||||
|
||||
size_t ZSTD_compressBlock_fast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
assert(ms->dictMatchState == NULL);
|
||||
if (ms->cParams.targetLength > 1) {
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
} else {
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hlog = cParams->hashLog;
|
||||
/* support stepSize of 0 */
|
||||
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip0 = istart;
|
||||
const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
|
||||
const BYTE* anchor = istart;
|
||||
const U32 prefixStartIndex = ms->window.dictLimit;
|
||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
const ZSTD_matchState_t* const dms = ms->dictMatchState;
|
||||
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
|
||||
const U32* const dictHashTable = dms->hashTable;
|
||||
const U32 dictStartIndex = dms->window.dictLimit;
|
||||
const BYTE* const dictBase = dms->window.base;
|
||||
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||
const BYTE* const dictEnd = dms->window.nextSrc;
|
||||
const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
|
||||
const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
|
||||
const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
|
||||
/* if a dictionary is still attached, it necessarily means that
|
||||
* it is within window size. So we just check it. */
|
||||
const U32 maxDistance = 1U << cParams->windowLog;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
assert(endIndex - prefixStartIndex <= maxDistance);
|
||||
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
|
||||
|
||||
(void)hasStep; /* not currently specialized on whether it's accelerated */
|
||||
|
||||
/* ensure there will be no underflow
|
||||
* when translating a dict index into a local index */
|
||||
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
|
||||
|
||||
if (ms->prefetchCDictTables) {
|
||||
size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
|
||||
PREFETCH_AREA(dictHashTable, hashTableBytes)
|
||||
}
|
||||
|
||||
/* init */
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
|
||||
ip0 += (dictAndPrefixLength == 0);
|
||||
/* dictMatchState repCode checks don't currently handle repCode == 0
|
||||
* disabling. */
|
||||
assert(offset_1 <= dictAndPrefixLength);
|
||||
assert(offset_2 <= dictAndPrefixLength);
|
||||
|
||||
/* Outer search loop */
|
||||
assert(stepSize >= 1);
|
||||
while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
|
||||
size_t mLength;
|
||||
size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
|
||||
|
||||
size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
|
||||
U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
|
||||
|
||||
U32 matchIndex = hashTable[hash0];
|
||||
U32 curr = (U32)(ip0 - base);
|
||||
size_t step = stepSize;
|
||||
const size_t kStepIncr = 1 << kSearchStrength;
|
||||
const BYTE* nextStep = ip0 + kStepIncr;
|
||||
|
||||
/* Inner search loop */
|
||||
while (1) {
|
||||
const BYTE* match = base + matchIndex;
|
||||
const U32 repIndex = curr + 1 - offset_1;
|
||||
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
|
||||
dictBase + (repIndex - dictIndexDelta) :
|
||||
base + repIndex;
|
||||
const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
|
||||
size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
|
||||
hashTable[hash0] = curr; /* update hash table */
|
||||
|
||||
if (((U32) ((prefixStartIndex - 1) - repIndex) >=
|
||||
3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
|
||||
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
|
||||
ip0++;
|
||||
ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
|
||||
break;
|
||||
}
|
||||
|
||||
if (dictTagsMatch) {
|
||||
/* Found a possible dict match */
|
||||
const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const BYTE* dictMatch = dictBase + dictMatchIndex;
|
||||
if (dictMatchIndex > dictStartIndex &&
|
||||
MEM_read32(dictMatch) == MEM_read32(ip0)) {
|
||||
/* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
|
||||
if (matchIndex <= prefixStartIndex) {
|
||||
U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
|
||||
mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
|
||||
while (((ip0 > anchor) & (dictMatch > dictStart))
|
||||
&& (ip0[-1] == dictMatch[-1])) {
|
||||
ip0--;
|
||||
dictMatch--;
|
||||
mLength++;
|
||||
} /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
|
||||
/* found a regular match */
|
||||
U32 const offset = (U32) (ip0 - match);
|
||||
mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
|
||||
while (((ip0 > anchor) & (match > prefixStart))
|
||||
&& (ip0[-1] == match[-1])) {
|
||||
ip0--;
|
||||
match--;
|
||||
mLength++;
|
||||
} /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Prepare for next iteration */
|
||||
dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
|
||||
matchIndex = hashTable[hash1];
|
||||
|
||||
if (ip1 >= nextStep) {
|
||||
step++;
|
||||
nextStep += kStepIncr;
|
||||
}
|
||||
ip0 = ip1;
|
||||
ip1 = ip1 + step;
|
||||
if (ip1 > ilimit) goto _cleanup;
|
||||
|
||||
curr = (U32)(ip0 - base);
|
||||
hash0 = hash1;
|
||||
} /* end inner search loop */
|
||||
|
||||
/* match found */
|
||||
assert(mLength);
|
||||
ip0 += mLength;
|
||||
anchor = ip0;
|
||||
|
||||
if (ip0 <= ilimit) {
|
||||
/* Fill Table */
|
||||
assert(base+curr+2 > istart); /* check base overflow */
|
||||
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip0 <= ilimit) {
|
||||
U32 const current2 = (U32)(ip0-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
|
||||
dictBase - dictIndexDelta + repIndex2 :
|
||||
base + repIndex2;
|
||||
if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip0))) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
|
||||
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
|
||||
ip0 += repLength2;
|
||||
anchor = ip0;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare for next iteration */
|
||||
assert(ip0 == anchor);
|
||||
ip1 = ip0 + stepSize;
|
||||
}
|
||||
|
||||
_cleanup:
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
}
|
||||
|
||||
|
||||
ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
|
||||
ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
|
||||
ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
|
||||
ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
|
||||
|
||||
size_t ZSTD_compressBlock_fast_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
assert(ms->dictMatchState != NULL);
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_fast_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hlog = cParams->hashLog;
|
||||
/* support stepSize of 0 */
|
||||
size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
|
||||
const U32 dictStartIndex = lowLimit;
|
||||
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
|
||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||
const BYTE* const dictEnd = dictBase + prefixStartIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
U32 offsetSaved1 = 0, offsetSaved2 = 0;
|
||||
|
||||
const BYTE* ip0 = istart;
|
||||
const BYTE* ip1;
|
||||
const BYTE* ip2;
|
||||
const BYTE* ip3;
|
||||
U32 current0;
|
||||
|
||||
|
||||
size_t hash0; /* hash for ip0 */
|
||||
size_t hash1; /* hash for ip1 */
|
||||
U32 idx; /* match idx for ip0 */
|
||||
const BYTE* idxBase; /* base pointer for idx */
|
||||
|
||||
U32 offcode;
|
||||
const BYTE* match0;
|
||||
size_t mLength;
|
||||
const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
|
||||
|
||||
size_t step;
|
||||
const BYTE* nextStep;
|
||||
const size_t kStepIncr = (1 << (kSearchStrength - 1));
|
||||
|
||||
(void)hasStep; /* not currently specialized on whether it's accelerated */
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
|
||||
|
||||
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
|
||||
if (prefixStartIndex == dictStartIndex)
|
||||
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
|
||||
|
||||
{ U32 const curr = (U32)(ip0 - base);
|
||||
U32 const maxRep = curr - dictStartIndex;
|
||||
if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
|
||||
if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* start each op */
|
||||
_start: /* Requires: ip0 */
|
||||
|
||||
step = stepSize;
|
||||
nextStep = ip0 + kStepIncr;
|
||||
|
||||
/* calculate positions, ip0 - anchor == 0, so we skip step calc */
|
||||
ip1 = ip0 + 1;
|
||||
ip2 = ip0 + step;
|
||||
ip3 = ip2 + 1;
|
||||
|
||||
if (ip3 >= ilimit) {
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
|
||||
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
|
||||
|
||||
idx = hashTable[hash0];
|
||||
idxBase = idx < prefixStartIndex ? dictBase : base;
|
||||
|
||||
do {
|
||||
{ /* load repcode match for ip[2] */
|
||||
U32 const current2 = (U32)(ip2 - base);
|
||||
U32 const repIndex = current2 - offset_1;
|
||||
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
|
||||
U32 rval;
|
||||
if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
|
||||
& (offset_1 > 0) ) {
|
||||
rval = MEM_read32(repBase + repIndex);
|
||||
} else {
|
||||
rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
|
||||
}
|
||||
|
||||
/* write back hash table entry */
|
||||
current0 = (U32)(ip0 - base);
|
||||
hashTable[hash0] = current0;
|
||||
|
||||
/* check repcode at ip[2] */
|
||||
if (MEM_read32(ip2) == rval) {
|
||||
ip0 = ip2;
|
||||
match0 = repBase + repIndex;
|
||||
matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
assert((match0 != prefixStart) & (match0 != dictStart));
|
||||
mLength = ip0[-1] == match0[-1];
|
||||
ip0 -= mLength;
|
||||
match0 -= mLength;
|
||||
offcode = REPCODE1_TO_OFFBASE;
|
||||
mLength += 4;
|
||||
goto _match;
|
||||
} }
|
||||
|
||||
{ /* load match for ip[0] */
|
||||
U32 const mval = idx >= dictStartIndex ?
|
||||
MEM_read32(idxBase + idx) :
|
||||
MEM_read32(ip0) ^ 1; /* guaranteed not to match */
|
||||
|
||||
/* check match at ip[0] */
|
||||
if (MEM_read32(ip0) == mval) {
|
||||
/* found a match! */
|
||||
goto _offset;
|
||||
} }
|
||||
|
||||
/* lookup ip[1] */
|
||||
idx = hashTable[hash1];
|
||||
idxBase = idx < prefixStartIndex ? dictBase : base;
|
||||
|
||||
/* hash ip[2] */
|
||||
hash0 = hash1;
|
||||
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||
|
||||
/* advance to next positions */
|
||||
ip0 = ip1;
|
||||
ip1 = ip2;
|
||||
ip2 = ip3;
|
||||
|
||||
/* write back hash table entry */
|
||||
current0 = (U32)(ip0 - base);
|
||||
hashTable[hash0] = current0;
|
||||
|
||||
{ /* load match for ip[0] */
|
||||
U32 const mval = idx >= dictStartIndex ?
|
||||
MEM_read32(idxBase + idx) :
|
||||
MEM_read32(ip0) ^ 1; /* guaranteed not to match */
|
||||
|
||||
/* check match at ip[0] */
|
||||
if (MEM_read32(ip0) == mval) {
|
||||
/* found a match! */
|
||||
goto _offset;
|
||||
} }
|
||||
|
||||
/* lookup ip[1] */
|
||||
idx = hashTable[hash1];
|
||||
idxBase = idx < prefixStartIndex ? dictBase : base;
|
||||
|
||||
/* hash ip[2] */
|
||||
hash0 = hash1;
|
||||
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||
|
||||
/* advance to next positions */
|
||||
ip0 = ip1;
|
||||
ip1 = ip2;
|
||||
ip2 = ip0 + step;
|
||||
ip3 = ip1 + step;
|
||||
|
||||
/* calculate step */
|
||||
if (ip2 >= nextStep) {
|
||||
step++;
|
||||
PREFETCH_L1(ip1 + 64);
|
||||
PREFETCH_L1(ip1 + 128);
|
||||
nextStep += kStepIncr;
|
||||
}
|
||||
} while (ip3 < ilimit);
|
||||
|
||||
_cleanup:
|
||||
/* Note that there are probably still a couple positions we could search.
|
||||
* However, it seems to be a meaningful performance hit to try to search
|
||||
* them. So let's not. */
|
||||
|
||||
/* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
|
||||
* rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
|
||||
offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1 ? offset_1 : offsetSaved1;
|
||||
rep[1] = offset_2 ? offset_2 : offsetSaved2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
|
||||
_offset: /* Requires: ip0, idx, idxBase */
|
||||
|
||||
/* Compute the offset code. */
|
||||
{ U32 const offset = current0 - idx;
|
||||
const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
|
||||
matchEnd = idx < prefixStartIndex ? dictEnd : iend;
|
||||
match0 = idxBase + idx;
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
offcode = OFFSET_TO_OFFBASE(offset);
|
||||
mLength = 4;
|
||||
|
||||
/* Count the backwards match length. */
|
||||
while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
|
||||
ip0--;
|
||||
match0--;
|
||||
mLength++;
|
||||
} }
|
||||
|
||||
_match: /* Requires: ip0, match0, offcode, matchEnd */
|
||||
|
||||
/* Count the forward length. */
|
||||
assert(matchEnd != 0);
|
||||
mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
|
||||
|
||||
ip0 += mLength;
|
||||
anchor = ip0;
|
||||
|
||||
/* write next hash table entry */
|
||||
if (ip1 < ip0) {
|
||||
hashTable[hash1] = (U32)(ip1 - base);
|
||||
}
|
||||
|
||||
/* Fill table and check for immediate repcode. */
|
||||
if (ip0 <= ilimit) {
|
||||
/* Fill Table */
|
||||
assert(base+current0+2 > istart); /* check base overflow */
|
||||
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
||||
|
||||
while (ip0 <= ilimit) {
|
||||
U32 const repIndex2 = (U32)(ip0-base) - offset_2;
|
||||
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
|
||||
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
|
||||
ip0 += repLength2;
|
||||
anchor = ip0;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} }
|
||||
|
||||
goto _start;
|
||||
}
|
||||
|
||||
ZSTD_GEN_FAST_FN(extDict, 4, 0)
|
||||
ZSTD_GEN_FAST_FN(extDict, 5, 0)
|
||||
ZSTD_GEN_FAST_FN(extDict, 6, 0)
|
||||
ZSTD_GEN_FAST_FN(extDict, 7, 0)
|
||||
|
||||
size_t ZSTD_compressBlock_fast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
assert(ms->dictMatchState == NULL);
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_FAST_H
|
||||
#define ZSTD_FAST_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "../common/mem.h" /* U32 */
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
||||
void const* end, ZSTD_dictTableLoadMethod_e dtlm,
|
||||
ZSTD_tableFillPurpose_e tfp);
|
||||
size_t ZSTD_compressBlock_fast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_fast_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_fast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_FAST_H */
|
File diff suppressed because it is too large
Load Diff
@ -1,127 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LAZY_H
|
||||
#define ZSTD_LAZY_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
/**
|
||||
* Dedicated Dictionary Search Structure bucket log. In the
|
||||
* ZSTD_dedicatedDictSearch mode, the hashTable has
|
||||
* 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
|
||||
* one.
|
||||
*/
|
||||
#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
|
||||
|
||||
#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
|
||||
|
||||
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
|
||||
void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
|
||||
|
||||
void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
|
||||
|
||||
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_dictMatchState_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_extDict_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_extDict_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_extDict_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_LAZY_H */
|
@ -1,724 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_ldm.h"
|
||||
|
||||
#include "../common/debug.h"
|
||||
#include "../common/xxhash.h"
|
||||
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
|
||||
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
|
||||
#include "zstd_ldm_geartab.h"
|
||||
|
||||
#define LDM_BUCKET_SIZE_LOG 3
|
||||
#define LDM_MIN_MATCH_LENGTH 64
|
||||
#define LDM_HASH_RLOG 7
|
||||
|
||||
typedef struct {
|
||||
U64 rolling;
|
||||
U64 stopMask;
|
||||
} ldmRollingHashState_t;
|
||||
|
||||
/** ZSTD_ldm_gear_init():
|
||||
*
|
||||
* Initializes the rolling hash state such that it will honor the
|
||||
* settings in params. */
|
||||
static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
|
||||
{
|
||||
unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
|
||||
unsigned hashRateLog = params->hashRateLog;
|
||||
|
||||
state->rolling = ~(U32)0;
|
||||
|
||||
/* The choice of the splitting criterion is subject to two conditions:
|
||||
* 1. it has to trigger on average every 2^(hashRateLog) bytes;
|
||||
* 2. ideally, it has to depend on a window of minMatchLength bytes.
|
||||
*
|
||||
* In the gear hash algorithm, bit n depends on the last n bytes;
|
||||
* so in order to obtain a good quality splitting criterion it is
|
||||
* preferable to use bits with high weight.
|
||||
*
|
||||
* To match condition 1 we use a mask with hashRateLog bits set
|
||||
* and, because of the previous remark, we make sure these bits
|
||||
* have the highest possible weight while still respecting
|
||||
* condition 2.
|
||||
*/
|
||||
if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
|
||||
state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
|
||||
} else {
|
||||
/* In this degenerate case we simply honor the hash rate. */
|
||||
state->stopMask = ((U64)1 << hashRateLog) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_gear_reset()
|
||||
* Feeds [data, data + minMatchLength) into the hash without registering any
|
||||
* splits. This effectively resets the hash state. This is used when skipping
|
||||
* over data, either at the beginning of a block, or skipping sections.
|
||||
*/
|
||||
static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
|
||||
BYTE const* data, size_t minMatchLength)
|
||||
{
|
||||
U64 hash = state->rolling;
|
||||
size_t n = 0;
|
||||
|
||||
#define GEAR_ITER_ONCE() do { \
|
||||
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
|
||||
n += 1; \
|
||||
} while (0)
|
||||
while (n + 3 < minMatchLength) {
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
}
|
||||
while (n < minMatchLength) {
|
||||
GEAR_ITER_ONCE();
|
||||
}
|
||||
#undef GEAR_ITER_ONCE
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_gear_feed():
|
||||
*
|
||||
* Registers in the splits array all the split points found in the first
|
||||
* size bytes following the data pointer. This function terminates when
|
||||
* either all the data has been processed or LDM_BATCH_SIZE splits are
|
||||
* present in the splits array.
|
||||
*
|
||||
* Precondition: The splits array must not be full.
|
||||
* Returns: The number of bytes processed. */
|
||||
static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
|
||||
BYTE const* data, size_t size,
|
||||
size_t* splits, unsigned* numSplits)
|
||||
{
|
||||
size_t n;
|
||||
U64 hash, mask;
|
||||
|
||||
hash = state->rolling;
|
||||
mask = state->stopMask;
|
||||
n = 0;
|
||||
|
||||
#define GEAR_ITER_ONCE() do { \
|
||||
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
|
||||
n += 1; \
|
||||
if (UNLIKELY((hash & mask) == 0)) { \
|
||||
splits[*numSplits] = n; \
|
||||
*numSplits += 1; \
|
||||
if (*numSplits == LDM_BATCH_SIZE) \
|
||||
goto done; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
while (n + 3 < size) {
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
}
|
||||
while (n < size) {
|
||||
GEAR_ITER_ONCE();
|
||||
}
|
||||
|
||||
#undef GEAR_ITER_ONCE
|
||||
|
||||
done:
|
||||
state->rolling = hash;
|
||||
return n;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
||||
ZSTD_compressionParameters const* cParams)
|
||||
{
|
||||
params->windowLog = cParams->windowLog;
|
||||
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
|
||||
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
|
||||
if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
|
||||
if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
|
||||
if (params->hashLog == 0) {
|
||||
params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
|
||||
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
|
||||
}
|
||||
if (params->hashRateLog == 0) {
|
||||
params->hashRateLog = params->windowLog < params->hashLog
|
||||
? 0
|
||||
: params->windowLog - params->hashLog;
|
||||
}
|
||||
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getTableSize(ldmParams_t params)
|
||||
{
|
||||
size_t const ldmHSize = ((size_t)1) << params.hashLog;
|
||||
size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
|
||||
size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
|
||||
size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
|
||||
+ ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
|
||||
return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
|
||||
{
|
||||
return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getBucket() :
|
||||
* Returns a pointer to the start of the bucket associated with hash. */
|
||||
static ldmEntry_t* ZSTD_ldm_getBucket(
|
||||
ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
|
||||
{
|
||||
return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_insertEntry() :
|
||||
* Insert the entry with corresponding hash into the hash table */
|
||||
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
|
||||
size_t const hash, const ldmEntry_t entry,
|
||||
ldmParams_t const ldmParams)
|
||||
{
|
||||
BYTE* const pOffset = ldmState->bucketOffsets + hash;
|
||||
unsigned const offset = *pOffset;
|
||||
|
||||
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
|
||||
*pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
|
||||
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_countBackwardsMatch() :
|
||||
* Returns the number of bytes that match backwards before pIn and pMatch.
|
||||
*
|
||||
* We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
|
||||
static size_t ZSTD_ldm_countBackwardsMatch(
|
||||
const BYTE* pIn, const BYTE* pAnchor,
|
||||
const BYTE* pMatch, const BYTE* pMatchBase)
|
||||
{
|
||||
size_t matchLength = 0;
|
||||
while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
|
||||
pIn--;
|
||||
pMatch--;
|
||||
matchLength++;
|
||||
}
|
||||
return matchLength;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_countBackwardsMatch_2segments() :
|
||||
* Returns the number of bytes that match backwards from pMatch,
|
||||
* even with the backwards match spanning 2 different segments.
|
||||
*
|
||||
* On reaching `pMatchBase`, start counting from mEnd */
|
||||
static size_t ZSTD_ldm_countBackwardsMatch_2segments(
|
||||
const BYTE* pIn, const BYTE* pAnchor,
|
||||
const BYTE* pMatch, const BYTE* pMatchBase,
|
||||
const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
|
||||
{
|
||||
size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
|
||||
if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
|
||||
/* If backwards match is entirely in the extDict or prefix, immediately return */
|
||||
return matchLength;
|
||||
}
|
||||
DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
|
||||
matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
|
||||
DEBUGLOG(7, "final backwards match length = %zu", matchLength);
|
||||
return matchLength;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_fillFastTables() :
|
||||
*
|
||||
* Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
|
||||
* This is similar to ZSTD_loadDictionaryContent.
|
||||
*
|
||||
* The tables for the other strategies are filled within their
|
||||
* block compressors. */
|
||||
static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
|
||||
void const* end)
|
||||
{
|
||||
const BYTE* const iend = (const BYTE*)end;
|
||||
|
||||
switch(ms->cParams.strategy)
|
||||
{
|
||||
case ZSTD_fast:
|
||||
ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
|
||||
break;
|
||||
|
||||
case ZSTD_dfast:
|
||||
ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
|
||||
break;
|
||||
|
||||
case ZSTD_greedy:
|
||||
case ZSTD_lazy:
|
||||
case ZSTD_lazy2:
|
||||
case ZSTD_btlazy2:
|
||||
case ZSTD_btopt:
|
||||
case ZSTD_btultra:
|
||||
case ZSTD_btultra2:
|
||||
break;
|
||||
default:
|
||||
assert(0); /* not possible : not a valid strategy id */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_fillHashTable(
|
||||
ldmState_t* ldmState, const BYTE* ip,
|
||||
const BYTE* iend, ldmParams_t const* params)
|
||||
{
|
||||
U32 const minMatchLength = params->minMatchLength;
|
||||
U32 const hBits = params->hashLog - params->bucketSizeLog;
|
||||
BYTE const* const base = ldmState->window.base;
|
||||
BYTE const* const istart = ip;
|
||||
ldmRollingHashState_t hashState;
|
||||
size_t* const splits = ldmState->splitIndices;
|
||||
unsigned numSplits;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
|
||||
|
||||
ZSTD_ldm_gear_init(&hashState, params);
|
||||
while (ip < iend) {
|
||||
size_t hashed;
|
||||
unsigned n;
|
||||
|
||||
numSplits = 0;
|
||||
hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
|
||||
|
||||
for (n = 0; n < numSplits; n++) {
|
||||
if (ip + splits[n] >= istart + minMatchLength) {
|
||||
BYTE const* const split = ip + splits[n] - minMatchLength;
|
||||
U64 const xxhash = XXH64(split, minMatchLength, 0);
|
||||
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
|
||||
ldmEntry_t entry;
|
||||
|
||||
entry.offset = (U32)(split - base);
|
||||
entry.checksum = (U32)(xxhash >> 32);
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
|
||||
}
|
||||
}
|
||||
|
||||
ip += hashed;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_ldm_limitTableUpdate() :
|
||||
*
|
||||
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
|
||||
* if it is far way
|
||||
* (after a long match, only update tables a limited amount). */
|
||||
static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
|
||||
{
|
||||
U32 const curr = (U32)(anchor - ms->window.base);
|
||||
if (curr > ms->nextToUpdate + 1024) {
|
||||
ms->nextToUpdate =
|
||||
curr - MIN(512, curr - ms->nextToUpdate - 1024);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t ZSTD_ldm_generateSequences_internal(
|
||||
ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize)
|
||||
{
|
||||
/* LDM parameters */
|
||||
int const extDict = ZSTD_window_hasExtDict(ldmState->window);
|
||||
U32 const minMatchLength = params->minMatchLength;
|
||||
U32 const entsPerBucket = 1U << params->bucketSizeLog;
|
||||
U32 const hBits = params->hashLog - params->bucketSizeLog;
|
||||
/* Prefix and extDict parameters */
|
||||
U32 const dictLimit = ldmState->window.dictLimit;
|
||||
U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
|
||||
BYTE const* const base = ldmState->window.base;
|
||||
BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
|
||||
BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
|
||||
BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
|
||||
BYTE const* const lowPrefixPtr = base + dictLimit;
|
||||
/* Input bounds */
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
BYTE const* const ilimit = iend - HASH_READ_SIZE;
|
||||
/* Input positions */
|
||||
BYTE const* anchor = istart;
|
||||
BYTE const* ip = istart;
|
||||
/* Rolling hash state */
|
||||
ldmRollingHashState_t hashState;
|
||||
/* Arrays for staged-processing */
|
||||
size_t* const splits = ldmState->splitIndices;
|
||||
ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
|
||||
unsigned numSplits;
|
||||
|
||||
if (srcSize < minMatchLength)
|
||||
return iend - anchor;
|
||||
|
||||
/* Initialize the rolling hash state with the first minMatchLength bytes */
|
||||
ZSTD_ldm_gear_init(&hashState, params);
|
||||
ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
|
||||
ip += minMatchLength;
|
||||
|
||||
while (ip < ilimit) {
|
||||
size_t hashed;
|
||||
unsigned n;
|
||||
|
||||
numSplits = 0;
|
||||
hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
|
||||
splits, &numSplits);
|
||||
|
||||
for (n = 0; n < numSplits; n++) {
|
||||
BYTE const* const split = ip + splits[n] - minMatchLength;
|
||||
U64 const xxhash = XXH64(split, minMatchLength, 0);
|
||||
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
|
||||
|
||||
candidates[n].split = split;
|
||||
candidates[n].hash = hash;
|
||||
candidates[n].checksum = (U32)(xxhash >> 32);
|
||||
candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
|
||||
PREFETCH_L1(candidates[n].bucket);
|
||||
}
|
||||
|
||||
for (n = 0; n < numSplits; n++) {
|
||||
size_t forwardMatchLength = 0, backwardMatchLength = 0,
|
||||
bestMatchLength = 0, mLength;
|
||||
U32 offset;
|
||||
BYTE const* const split = candidates[n].split;
|
||||
U32 const checksum = candidates[n].checksum;
|
||||
U32 const hash = candidates[n].hash;
|
||||
ldmEntry_t* const bucket = candidates[n].bucket;
|
||||
ldmEntry_t const* cur;
|
||||
ldmEntry_t const* bestEntry = NULL;
|
||||
ldmEntry_t newEntry;
|
||||
|
||||
newEntry.offset = (U32)(split - base);
|
||||
newEntry.checksum = checksum;
|
||||
|
||||
/* If a split point would generate a sequence overlapping with
|
||||
* the previous one, we merely register it in the hash table and
|
||||
* move on */
|
||||
if (split < anchor) {
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
|
||||
size_t curForwardMatchLength, curBackwardMatchLength,
|
||||
curTotalMatchLength;
|
||||
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
|
||||
continue;
|
||||
}
|
||||
if (extDict) {
|
||||
BYTE const* const curMatchBase =
|
||||
cur->offset < dictLimit ? dictBase : base;
|
||||
BYTE const* const pMatch = curMatchBase + cur->offset;
|
||||
BYTE const* const matchEnd =
|
||||
cur->offset < dictLimit ? dictEnd : iend;
|
||||
BYTE const* const lowMatchPtr =
|
||||
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
|
||||
curForwardMatchLength =
|
||||
ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
|
||||
if (curForwardMatchLength < minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
|
||||
split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
|
||||
} else { /* !extDict */
|
||||
BYTE const* const pMatch = base + cur->offset;
|
||||
curForwardMatchLength = ZSTD_count(split, pMatch, iend);
|
||||
if (curForwardMatchLength < minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength =
|
||||
ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
|
||||
}
|
||||
curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
|
||||
|
||||
if (curTotalMatchLength > bestMatchLength) {
|
||||
bestMatchLength = curTotalMatchLength;
|
||||
forwardMatchLength = curForwardMatchLength;
|
||||
backwardMatchLength = curBackwardMatchLength;
|
||||
bestEntry = cur;
|
||||
}
|
||||
}
|
||||
|
||||
/* No match found -- insert an entry into the hash table
|
||||
* and process the next candidate match */
|
||||
if (bestEntry == NULL) {
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Match found */
|
||||
offset = (U32)(split - base) - bestEntry->offset;
|
||||
mLength = forwardMatchLength + backwardMatchLength;
|
||||
{
|
||||
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
|
||||
|
||||
/* Out of sequence storage */
|
||||
if (rawSeqStore->size == rawSeqStore->capacity)
|
||||
return ERROR(dstSize_tooSmall);
|
||||
seq->litLength = (U32)(split - backwardMatchLength - anchor);
|
||||
seq->matchLength = (U32)mLength;
|
||||
seq->offset = offset;
|
||||
rawSeqStore->size++;
|
||||
}
|
||||
|
||||
/* Insert the current entry into the hash table --- it must be
|
||||
* done after the previous block to avoid clobbering bestEntry */
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
||||
|
||||
anchor = split + forwardMatchLength;
|
||||
|
||||
/* If we find a match that ends after the data that we've hashed
|
||||
* then we have a repeating, overlapping, pattern. E.g. all zeros.
|
||||
* If one repetition of the pattern matches our `stopMask` then all
|
||||
* repetitions will. We don't need to insert them all into out table,
|
||||
* only the first one. So skip over overlapping matches.
|
||||
* This is a major speed boost (20x) for compressing a single byte
|
||||
* repeated, when that byte ends up in the table.
|
||||
*/
|
||||
if (anchor > ip + hashed) {
|
||||
ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
|
||||
/* Continue the outer loop at anchor (ip + hashed == anchor). */
|
||||
ip = anchor - hashed;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ip += hashed;
|
||||
}
|
||||
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
/*! ZSTD_ldm_reduceTable() :
|
||||
* reduce table indexes by `reducerValue` */
|
||||
static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
|
||||
U32 const reducerValue)
|
||||
{
|
||||
U32 u;
|
||||
for (u = 0; u < size; u++) {
|
||||
if (table[u].offset < reducerValue) table[u].offset = 0;
|
||||
else table[u].offset -= reducerValue;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_generateSequences(
|
||||
ldmState_t* ldmState, rawSeqStore_t* sequences,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const maxDist = 1U << params->windowLog;
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
size_t const kMaxChunkSize = 1 << 20;
|
||||
size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
|
||||
size_t chunk;
|
||||
size_t leftoverSize = 0;
|
||||
|
||||
assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
|
||||
/* Check that ZSTD_window_update() has been called for this chunk prior
|
||||
* to passing it to this function.
|
||||
*/
|
||||
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
|
||||
/* The input could be very large (in zstdmt), so it must be broken up into
|
||||
* chunks to enforce the maximum distance and handle overflow correction.
|
||||
*/
|
||||
assert(sequences->pos <= sequences->size);
|
||||
assert(sequences->size <= sequences->capacity);
|
||||
for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
|
||||
BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
|
||||
size_t const remaining = (size_t)(iend - chunkStart);
|
||||
BYTE const *const chunkEnd =
|
||||
(remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
|
||||
size_t const chunkSize = chunkEnd - chunkStart;
|
||||
size_t newLeftoverSize;
|
||||
size_t const prevSize = sequences->size;
|
||||
|
||||
assert(chunkStart < iend);
|
||||
/* 1. Perform overflow correction if necessary. */
|
||||
if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
|
||||
U32 const ldmHSize = 1U << params->hashLog;
|
||||
U32 const correction = ZSTD_window_correctOverflow(
|
||||
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
|
||||
ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
|
||||
/* invalidate dictionaries on overflow correction */
|
||||
ldmState->loadedDictEnd = 0;
|
||||
}
|
||||
/* 2. We enforce the maximum offset allowed.
|
||||
*
|
||||
* kMaxChunkSize should be small enough that we don't lose too much of
|
||||
* the window through early invalidation.
|
||||
* TODO: * Test the chunk size.
|
||||
* * Try invalidation after the sequence generation and test the
|
||||
* offset against maxDist directly.
|
||||
*
|
||||
* NOTE: Because of dictionaries + sequence splitting we MUST make sure
|
||||
* that any offset used is valid at the END of the sequence, since it may
|
||||
* be split into two sequences. This condition holds when using
|
||||
* ZSTD_window_enforceMaxDist(), but if we move to checking offsets
|
||||
* against maxDist directly, we'll have to carefully handle that case.
|
||||
*/
|
||||
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
|
||||
/* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
|
||||
newLeftoverSize = ZSTD_ldm_generateSequences_internal(
|
||||
ldmState, sequences, params, chunkStart, chunkSize);
|
||||
if (ZSTD_isError(newLeftoverSize))
|
||||
return newLeftoverSize;
|
||||
/* 4. We add the leftover literals from previous iterations to the first
|
||||
* newly generated sequence, or add the `newLeftoverSize` if none are
|
||||
* generated.
|
||||
*/
|
||||
/* Prepend the leftover literals from the last call */
|
||||
if (prevSize < sequences->size) {
|
||||
sequences->seq[prevSize].litLength += (U32)leftoverSize;
|
||||
leftoverSize = newLeftoverSize;
|
||||
} else {
|
||||
assert(newLeftoverSize == chunkSize);
|
||||
leftoverSize += chunkSize;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
|
||||
{
|
||||
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
|
||||
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
|
||||
if (srcSize <= seq->litLength) {
|
||||
/* Skip past srcSize literals */
|
||||
seq->litLength -= (U32)srcSize;
|
||||
return;
|
||||
}
|
||||
srcSize -= seq->litLength;
|
||||
seq->litLength = 0;
|
||||
if (srcSize < seq->matchLength) {
|
||||
/* Skip past the first srcSize of the match */
|
||||
seq->matchLength -= (U32)srcSize;
|
||||
if (seq->matchLength < minMatch) {
|
||||
/* The match is too short, omit it */
|
||||
if (rawSeqStore->pos + 1 < rawSeqStore->size) {
|
||||
seq[1].litLength += seq[0].matchLength;
|
||||
}
|
||||
rawSeqStore->pos++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
srcSize -= seq->matchLength;
|
||||
seq->matchLength = 0;
|
||||
rawSeqStore->pos++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If the sequence length is longer than remaining then the sequence is split
|
||||
* between this block and the next.
|
||||
*
|
||||
* Returns the current sequence to handle, or if the rest of the block should
|
||||
* be literals, it returns a sequence with offset == 0.
|
||||
*/
|
||||
static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
|
||||
U32 const remaining, U32 const minMatch)
|
||||
{
|
||||
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
|
||||
assert(sequence.offset > 0);
|
||||
/* Likely: No partial sequence */
|
||||
if (remaining >= sequence.litLength + sequence.matchLength) {
|
||||
rawSeqStore->pos++;
|
||||
return sequence;
|
||||
}
|
||||
/* Cut the sequence short (offset == 0 ==> rest is literals). */
|
||||
if (remaining <= sequence.litLength) {
|
||||
sequence.offset = 0;
|
||||
} else if (remaining < sequence.litLength + sequence.matchLength) {
|
||||
sequence.matchLength = remaining - sequence.litLength;
|
||||
if (sequence.matchLength < minMatch) {
|
||||
sequence.offset = 0;
|
||||
}
|
||||
}
|
||||
/* Skip past `remaining` bytes for the future sequences. */
|
||||
ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
|
||||
return sequence;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
|
||||
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
|
||||
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
|
||||
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
|
||||
if (currPos >= currSeq.litLength + currSeq.matchLength) {
|
||||
currPos -= currSeq.litLength + currSeq.matchLength;
|
||||
rawSeqStore->pos++;
|
||||
} else {
|
||||
rawSeqStore->posInSequence = currPos;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
|
||||
rawSeqStore->posInSequence = 0;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_paramSwitch_e useRowMatchFinder,
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
unsigned const minMatch = cParams->minMatch;
|
||||
ZSTD_blockCompressor const blockCompressor =
|
||||
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
|
||||
/* Input bounds */
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
/* Input positions */
|
||||
BYTE const* ip = istart;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
|
||||
/* If using opt parser, use LDMs only as candidates rather than always accepting them */
|
||||
if (cParams->strategy >= ZSTD_btopt) {
|
||||
size_t lastLLSize;
|
||||
ms->ldmSeqStore = rawSeqStore;
|
||||
lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
|
||||
ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
|
||||
return lastLLSize;
|
||||
}
|
||||
|
||||
assert(rawSeqStore->pos <= rawSeqStore->size);
|
||||
assert(rawSeqStore->size <= rawSeqStore->capacity);
|
||||
/* Loop through each sequence and apply the block compressor to the literals */
|
||||
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
|
||||
/* maybeSplitSequence updates rawSeqStore->pos */
|
||||
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
|
||||
(U32)(iend - ip), minMatch);
|
||||
int i;
|
||||
/* End signal */
|
||||
if (sequence.offset == 0)
|
||||
break;
|
||||
|
||||
assert(ip + sequence.litLength + sequence.matchLength <= iend);
|
||||
|
||||
/* Fill tables for block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ms, ip);
|
||||
ZSTD_ldm_fillFastTables(ms, ip);
|
||||
/* Run the block compressor */
|
||||
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
|
||||
{
|
||||
size_t const newLitLength =
|
||||
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
|
||||
ip += sequence.litLength;
|
||||
/* Update the repcodes */
|
||||
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
|
||||
rep[i] = rep[i-1];
|
||||
rep[0] = sequence.offset;
|
||||
/* Store the sequence */
|
||||
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
|
||||
OFFSET_TO_OFFBASE(sequence.offset),
|
||||
sequence.matchLength);
|
||||
ip += sequence.matchLength;
|
||||
}
|
||||
}
|
||||
/* Fill the tables for the block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ms, ip);
|
||||
ZSTD_ldm_fillFastTables(ms, ip);
|
||||
/* Compress the last literals */
|
||||
return blockCompressor(ms, seqStore, rep, ip, iend - ip);
|
||||
}
|
@ -1,117 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LDM_H
|
||||
#define ZSTD_LDM_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
|
||||
#include "../zstd.h" /* ZSTD_CCtx, size_t */
|
||||
|
||||
/*-*************************************
|
||||
* Long distance matching
|
||||
***************************************/
|
||||
|
||||
#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
|
||||
|
||||
void ZSTD_ldm_fillHashTable(
|
||||
ldmState_t* state, const BYTE* ip,
|
||||
const BYTE* iend, ldmParams_t const* params);
|
||||
|
||||
/**
|
||||
* ZSTD_ldm_generateSequences():
|
||||
*
|
||||
* Generates the sequences using the long distance match finder.
|
||||
* Generates long range matching sequences in `sequences`, which parse a prefix
|
||||
* of the source. `sequences` must be large enough to store every sequence,
|
||||
* which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
|
||||
* @returns 0 or an error code.
|
||||
*
|
||||
* NOTE: The user must have called ZSTD_window_update() for all of the input
|
||||
* they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
|
||||
* NOTE: This function returns an error if it runs out of space to store
|
||||
* sequences.
|
||||
*/
|
||||
size_t ZSTD_ldm_generateSequences(
|
||||
ldmState_t* ldms, rawSeqStore_t* sequences,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize);
|
||||
|
||||
/**
|
||||
* ZSTD_ldm_blockCompress():
|
||||
*
|
||||
* Compresses a block using the predefined sequences, along with a secondary
|
||||
* block compressor. The literals section of every sequence is passed to the
|
||||
* secondary block compressor, and those sequences are interspersed with the
|
||||
* predefined sequences. Returns the length of the last literals.
|
||||
* Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
|
||||
* `rawSeqStore.seq` may also be updated to split the last sequence between two
|
||||
* blocks.
|
||||
* @return The length of the last literals.
|
||||
*
|
||||
* NOTE: The source must be at most the maximum block size, but the predefined
|
||||
* sequences can be any size, and may be longer than the block. In the case that
|
||||
* they are longer than the block, the last sequences may need to be split into
|
||||
* two. We handle that case correctly, and update `rawSeqStore` appropriately.
|
||||
* NOTE: This function does not return any errors.
|
||||
*/
|
||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_paramSwitch_e useRowMatchFinder,
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
/**
|
||||
* ZSTD_ldm_skipSequences():
|
||||
*
|
||||
* Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
|
||||
* Avoids emitting matches less than `minMatch` bytes.
|
||||
* Must be called for data that is not passed to ZSTD_ldm_blockCompress().
|
||||
*/
|
||||
void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
|
||||
U32 const minMatch);
|
||||
|
||||
/* ZSTD_ldm_skipRawSeqStoreBytes():
|
||||
* Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
|
||||
* Not to be used in conjunction with ZSTD_ldm_skipSequences().
|
||||
* Must be called for data with is not passed to ZSTD_ldm_blockCompress().
|
||||
*/
|
||||
void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
|
||||
|
||||
/** ZSTD_ldm_getTableSize() :
|
||||
* Estimate the space needed for long distance matching tables or 0 if LDM is
|
||||
* disabled.
|
||||
*/
|
||||
size_t ZSTD_ldm_getTableSize(ldmParams_t params);
|
||||
|
||||
/** ZSTD_ldm_getSeqSpace() :
|
||||
* Return an upper bound on the number of sequences that can be produced by
|
||||
* the long distance matcher, or 0 if LDM is disabled.
|
||||
*/
|
||||
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
|
||||
|
||||
/** ZSTD_ldm_adjustParameters() :
|
||||
* If the params->hashRateLog is not set, set it to its default value based on
|
||||
* windowLog and params->hashLog.
|
||||
*
|
||||
* Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
|
||||
* params->hashLog if it is not).
|
||||
*
|
||||
* Ensures that the minMatchLength >= targetLength during optimal parsing.
|
||||
*/
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
||||
ZSTD_compressionParameters const* cParams);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_FAST_H */
|
@ -1,106 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LDM_GEARTAB_H
|
||||
#define ZSTD_LDM_GEARTAB_H
|
||||
|
||||
#include "../common/compiler.h" /* UNUSED_ATTR */
|
||||
#include "../common/mem.h" /* U64 */
|
||||
|
||||
static UNUSED_ATTR const U64 ZSTD_ldm_gearTab[256] = {
|
||||
0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
|
||||
0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
|
||||
0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
|
||||
0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
|
||||
0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
|
||||
0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140,
|
||||
0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e,
|
||||
0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
|
||||
0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
|
||||
0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
|
||||
0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
|
||||
0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
|
||||
0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
|
||||
0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
|
||||
0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
|
||||
0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
|
||||
0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
|
||||
0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
|
||||
0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
|
||||
0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
|
||||
0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
|
||||
0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
|
||||
0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
|
||||
0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
|
||||
0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
|
||||
0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
|
||||
0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
|
||||
0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b,
|
||||
0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
|
||||
0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
|
||||
0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
|
||||
0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
|
||||
0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
|
||||
0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
|
||||
0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
|
||||
0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
|
||||
0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec,
|
||||
0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
|
||||
0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab,
|
||||
0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
|
||||
0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
|
||||
0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
|
||||
0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
|
||||
0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
|
||||
0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
|
||||
0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
|
||||
0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
|
||||
0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
|
||||
0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
|
||||
0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
|
||||
0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c,
|
||||
0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
|
||||
0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
|
||||
0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
|
||||
0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a,
|
||||
0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
|
||||
0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
|
||||
0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
|
||||
0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
|
||||
0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
|
||||
0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
|
||||
0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
|
||||
0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
|
||||
0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
|
||||
0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756,
|
||||
0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
|
||||
0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
|
||||
0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
|
||||
0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
|
||||
0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
|
||||
0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
|
||||
0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
|
||||
0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
|
||||
0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
|
||||
0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
|
||||
0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
|
||||
0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
|
||||
0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
|
||||
0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
|
||||
0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
|
||||
0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
|
||||
0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
|
||||
0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
|
||||
0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
|
||||
0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
|
||||
0x2b4da14f2613d8f4
|
||||
};
|
||||
|
||||
#endif /* ZSTD_LDM_GEARTAB_H */
|
File diff suppressed because it is too large
Load Diff
@ -1,56 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_OPT_H
|
||||
#define ZSTD_OPT_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
/* used in ZSTD_loadDictionaryContent() */
|
||||
void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
|
||||
|
||||
size_t ZSTD_compressBlock_btopt(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
/* note : no btultra2 variant for extDict nor dictMatchState,
|
||||
* because btultra2 is not meant to work with dictionaries
|
||||
* and is only specific for the first block (no prefix) */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_OPT_H */
|
File diff suppressed because it is too large
Load Diff
@ -1,113 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDMT_COMPRESS_H
|
||||
#define ZSTDMT_COMPRESS_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* Note : This is an internal API.
|
||||
* These APIs used to be exposed with ZSTDLIB_API,
|
||||
* because it used to be the only way to invoke MT compression.
|
||||
* Now, you must use ZSTD_compress2 and ZSTD_compressStream2() instead.
|
||||
*
|
||||
* This API requires ZSTD_MULTITHREAD to be defined during compilation,
|
||||
* otherwise ZSTDMT_createCCtx*() will fail.
|
||||
*/
|
||||
|
||||
/* === Dependencies === */
|
||||
#include "../common/zstd_deps.h" /* size_t */
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
|
||||
#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
|
||||
|
||||
|
||||
/* === Constants === */
|
||||
#ifndef ZSTDMT_NBWORKERS_MAX /* a different value can be selected at compile time */
|
||||
# define ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256)
|
||||
#endif
|
||||
#ifndef ZSTDMT_JOBSIZE_MIN /* a different value can be selected at compile time */
|
||||
# define ZSTDMT_JOBSIZE_MIN (512 KB)
|
||||
#endif
|
||||
#define ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30)
|
||||
#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB))
|
||||
|
||||
|
||||
/* ========================================================
|
||||
* === Private interface, for use by ZSTD_compress.c ===
|
||||
* === Not exposed in libzstd. Never invoke directly ===
|
||||
* ======================================================== */
|
||||
|
||||
/* === Memory management === */
|
||||
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
|
||||
/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
|
||||
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
|
||||
ZSTD_customMem cMem,
|
||||
ZSTD_threadPool *pool);
|
||||
size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
/* === Streaming functions === */
|
||||
|
||||
size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
|
||||
|
||||
/*! ZSTDMT_initCStream_internal() :
|
||||
* Private use only. Init streaming operation.
|
||||
* expects params to be valid.
|
||||
* must receive dict, or cdict, or none, but not both.
|
||||
* mtctx can be freshly constructed or reused from a prior compression.
|
||||
* If mtctx is reused, memory allocations from the prior compression may not be freed,
|
||||
* even if they are not needed for the current compression.
|
||||
* @return : 0, or an error code */
|
||||
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* mtctx,
|
||||
const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
||||
|
||||
/*! ZSTDMT_compressStream_generic() :
|
||||
* Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
|
||||
* depending on flush directive.
|
||||
* @return : minimum amount of data still to be flushed
|
||||
* 0 if fully flushed
|
||||
* or an error code
|
||||
* note : needs to be init using any ZSTD_initCStream*() variant */
|
||||
size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
||||
ZSTD_outBuffer* output,
|
||||
ZSTD_inBuffer* input,
|
||||
ZSTD_EndDirective endOp);
|
||||
|
||||
/*! ZSTDMT_toFlushNow()
|
||||
* Tell how many bytes are ready to be flushed immediately.
|
||||
* Probe the oldest active job (not yet entirely flushed) and check its output buffer.
|
||||
* If return 0, it means there is no active job,
|
||||
* or, it means oldest job is still active, but everything produced has been flushed so far,
|
||||
* therefore flushing is limited by speed of oldest job. */
|
||||
size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
/*! ZSTDMT_updateCParams_whileCompressing() :
|
||||
* Updates only a selected set of compression parameters, to remain compatible with current frame.
|
||||
* New parameters will be applied to next compression job. */
|
||||
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
|
||||
|
||||
/*! ZSTDMT_getFrameProgression():
|
||||
* tells how much data has been consumed (input) and produced (output) for current frame.
|
||||
* able to count progression inside worker threads.
|
||||
*/
|
||||
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDMT_COMPRESS_H */
|
File diff suppressed because it is too large
Load Diff
@ -1,576 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "../common/portability_macros.h"
|
||||
|
||||
/* Stack marking
|
||||
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
|
||||
*/
|
||||
#if defined(__ELF__) && defined(__GNUC__)
|
||||
.section .note.GNU-stack,"",%progbits
|
||||
#endif
|
||||
|
||||
#if ZSTD_ENABLE_ASM_X86_64_BMI2
|
||||
|
||||
/* Calling convention:
|
||||
*
|
||||
* %rdi contains the first argument: HUF_DecompressAsmArgs*.
|
||||
* %rbp isn't maintained (no frame pointer).
|
||||
* %rsp contains the stack pointer that grows down.
|
||||
* No red-zone is assumed, only addresses >= %rsp are used.
|
||||
* All register contents are preserved.
|
||||
*
|
||||
* TODO: Support Windows calling convention.
|
||||
*/
|
||||
|
||||
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
|
||||
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
|
||||
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
|
||||
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
|
||||
.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop
|
||||
.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop
|
||||
.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop
|
||||
.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop
|
||||
.text
|
||||
|
||||
/* Sets up register mappings for clarity.
|
||||
* op[], bits[], dtable & ip[0] each get their own register.
|
||||
* ip[1,2,3] & olimit alias var[].
|
||||
* %rax is a scratch register.
|
||||
*/
|
||||
|
||||
#define op0 rsi
|
||||
#define op1 rbx
|
||||
#define op2 rcx
|
||||
#define op3 rdi
|
||||
|
||||
#define ip0 r8
|
||||
#define ip1 r9
|
||||
#define ip2 r10
|
||||
#define ip3 r11
|
||||
|
||||
#define bits0 rbp
|
||||
#define bits1 rdx
|
||||
#define bits2 r12
|
||||
#define bits3 r13
|
||||
#define dtable r14
|
||||
#define olimit r15
|
||||
|
||||
/* var[] aliases ip[1,2,3] & olimit
|
||||
* ip[1,2,3] are saved every iteration.
|
||||
* olimit is only used in compute_olimit.
|
||||
*/
|
||||
#define var0 r15
|
||||
#define var1 r9
|
||||
#define var2 r10
|
||||
#define var3 r11
|
||||
|
||||
/* 32-bit var registers */
|
||||
#define vard0 r15d
|
||||
#define vard1 r9d
|
||||
#define vard2 r10d
|
||||
#define vard3 r11d
|
||||
|
||||
/* Calls X(N) for each stream 0, 1, 2, 3. */
|
||||
#define FOR_EACH_STREAM(X) \
|
||||
X(0); \
|
||||
X(1); \
|
||||
X(2); \
|
||||
X(3)
|
||||
|
||||
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
|
||||
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
|
||||
X(0, idx); \
|
||||
X(1, idx); \
|
||||
X(2, idx); \
|
||||
X(3, idx)
|
||||
|
||||
/* Define both _HUF_* & HUF_* symbols because MacOS
|
||||
* C symbols are prefixed with '_' & Linux symbols aren't.
|
||||
*/
|
||||
_HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
|
||||
HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
|
||||
ZSTD_CET_ENDBRANCH
|
||||
/* Save all registers - even if they are callee saved for simplicity. */
|
||||
push %rax
|
||||
push %rbx
|
||||
push %rcx
|
||||
push %rdx
|
||||
push %rbp
|
||||
push %rsi
|
||||
push %rdi
|
||||
push %r8
|
||||
push %r9
|
||||
push %r10
|
||||
push %r11
|
||||
push %r12
|
||||
push %r13
|
||||
push %r14
|
||||
push %r15
|
||||
|
||||
/* Read HUF_DecompressAsmArgs* args from %rax */
|
||||
movq %rdi, %rax
|
||||
movq 0(%rax), %ip0
|
||||
movq 8(%rax), %ip1
|
||||
movq 16(%rax), %ip2
|
||||
movq 24(%rax), %ip3
|
||||
movq 32(%rax), %op0
|
||||
movq 40(%rax), %op1
|
||||
movq 48(%rax), %op2
|
||||
movq 56(%rax), %op3
|
||||
movq 64(%rax), %bits0
|
||||
movq 72(%rax), %bits1
|
||||
movq 80(%rax), %bits2
|
||||
movq 88(%rax), %bits3
|
||||
movq 96(%rax), %dtable
|
||||
push %rax /* argument */
|
||||
push 104(%rax) /* ilimit */
|
||||
push 112(%rax) /* oend */
|
||||
push %olimit /* olimit space */
|
||||
|
||||
subq $24, %rsp
|
||||
|
||||
.L_4X1_compute_olimit:
|
||||
/* Computes how many iterations we can do safely
|
||||
* %r15, %rax may be clobbered
|
||||
* rbx, rdx must be saved
|
||||
* op3 & ip0 mustn't be clobbered
|
||||
*/
|
||||
movq %rbx, 0(%rsp)
|
||||
movq %rdx, 8(%rsp)
|
||||
|
||||
movq 32(%rsp), %rax /* rax = oend */
|
||||
subq %op3, %rax /* rax = oend - op3 */
|
||||
|
||||
/* r15 = (oend - op3) / 5 */
|
||||
movabsq $-3689348814741910323, %rdx
|
||||
mulq %rdx
|
||||
movq %rdx, %r15
|
||||
shrq $2, %r15
|
||||
|
||||
movq %ip0, %rax /* rax = ip0 */
|
||||
movq 40(%rsp), %rdx /* rdx = ilimit */
|
||||
subq %rdx, %rax /* rax = ip0 - ilimit */
|
||||
movq %rax, %rbx /* rbx = ip0 - ilimit */
|
||||
|
||||
/* rdx = (ip0 - ilimit) / 7 */
|
||||
movabsq $2635249153387078803, %rdx
|
||||
mulq %rdx
|
||||
subq %rdx, %rbx
|
||||
shrq %rbx
|
||||
addq %rbx, %rdx
|
||||
shrq $2, %rdx
|
||||
|
||||
/* r15 = min(%rdx, %r15) */
|
||||
cmpq %rdx, %r15
|
||||
cmova %rdx, %r15
|
||||
|
||||
/* r15 = r15 * 5 */
|
||||
leaq (%r15, %r15, 4), %r15
|
||||
|
||||
/* olimit = op3 + r15 */
|
||||
addq %op3, %olimit
|
||||
|
||||
movq 8(%rsp), %rdx
|
||||
movq 0(%rsp), %rbx
|
||||
|
||||
/* If (op3 + 20 > olimit) */
|
||||
movq %op3, %rax /* rax = op3 */
|
||||
addq $20, %rax /* rax = op3 + 20 */
|
||||
cmpq %rax, %olimit /* op3 + 20 > olimit */
|
||||
jb .L_4X1_exit
|
||||
|
||||
/* If (ip1 < ip0) go to exit */
|
||||
cmpq %ip0, %ip1
|
||||
jb .L_4X1_exit
|
||||
|
||||
/* If (ip2 < ip1) go to exit */
|
||||
cmpq %ip1, %ip2
|
||||
jb .L_4X1_exit
|
||||
|
||||
/* If (ip3 < ip2) go to exit */
|
||||
cmpq %ip2, %ip3
|
||||
jb .L_4X1_exit
|
||||
|
||||
/* Reads top 11 bits from bits[n]
|
||||
* Loads dt[bits[n]] into var[n]
|
||||
*/
|
||||
#define GET_NEXT_DELT(n) \
|
||||
movq $53, %var##n; \
|
||||
shrxq %var##n, %bits##n, %var##n; \
|
||||
movzwl (%dtable,%var##n,2),%vard##n
|
||||
|
||||
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
|
||||
* Moves var[n] to %rax
|
||||
* bits[n] <<= var[n] & 63
|
||||
* op[n][idx] = %rax >> 8
|
||||
* %ah is a way to access bits [8, 16) of %rax
|
||||
*/
|
||||
#define DECODE_FROM_DELT(n, idx) \
|
||||
movq %var##n, %rax; \
|
||||
shlxq %var##n, %bits##n, %bits##n; \
|
||||
movb %ah, idx(%op##n)
|
||||
|
||||
/* Assumes GET_NEXT_DELT has been called.
|
||||
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
|
||||
*/
|
||||
#define DECODE_AND_GET_NEXT(n, idx) \
|
||||
DECODE_FROM_DELT(n, idx); \
|
||||
GET_NEXT_DELT(n) \
|
||||
|
||||
/* // ctz & nbBytes is stored in bits[n]
|
||||
* // nbBits is stored in %rax
|
||||
* ctz = CTZ[bits[n]]
|
||||
* nbBits = ctz & 7
|
||||
* nbBytes = ctz >> 3
|
||||
* op[n] += 5
|
||||
* ip[n] -= nbBytes
|
||||
* // Note: x86-64 is little-endian ==> no bswap
|
||||
* bits[n] = MEM_readST(ip[n]) | 1
|
||||
* bits[n] <<= nbBits
|
||||
*/
|
||||
#define RELOAD_BITS(n) \
|
||||
bsfq %bits##n, %bits##n; \
|
||||
movq %bits##n, %rax; \
|
||||
andq $7, %rax; \
|
||||
shrq $3, %bits##n; \
|
||||
leaq 5(%op##n), %op##n; \
|
||||
subq %bits##n, %ip##n; \
|
||||
movq (%ip##n), %bits##n; \
|
||||
orq $1, %bits##n; \
|
||||
shlx %rax, %bits##n, %bits##n
|
||||
|
||||
/* Store clobbered variables on the stack */
|
||||
movq %olimit, 24(%rsp)
|
||||
movq %ip1, 0(%rsp)
|
||||
movq %ip2, 8(%rsp)
|
||||
movq %ip3, 16(%rsp)
|
||||
|
||||
/* Call GET_NEXT_DELT for each stream */
|
||||
FOR_EACH_STREAM(GET_NEXT_DELT)
|
||||
|
||||
.p2align 6
|
||||
|
||||
.L_4X1_loop_body:
|
||||
/* Decode 5 symbols in each of the 4 streams (20 total)
|
||||
* Must have called GET_NEXT_DELT for each stream
|
||||
*/
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
|
||||
|
||||
/* Load ip[1,2,3] from stack (var[] aliases them)
|
||||
* ip[] is needed for RELOAD_BITS
|
||||
* Each will be stored back to the stack after RELOAD
|
||||
*/
|
||||
movq 0(%rsp), %ip1
|
||||
movq 8(%rsp), %ip2
|
||||
movq 16(%rsp), %ip3
|
||||
|
||||
/* Reload each stream & fetch the next table entry
|
||||
* to prepare for the next iteration
|
||||
*/
|
||||
RELOAD_BITS(0)
|
||||
GET_NEXT_DELT(0)
|
||||
|
||||
RELOAD_BITS(1)
|
||||
movq %ip1, 0(%rsp)
|
||||
GET_NEXT_DELT(1)
|
||||
|
||||
RELOAD_BITS(2)
|
||||
movq %ip2, 8(%rsp)
|
||||
GET_NEXT_DELT(2)
|
||||
|
||||
RELOAD_BITS(3)
|
||||
movq %ip3, 16(%rsp)
|
||||
GET_NEXT_DELT(3)
|
||||
|
||||
/* If op3 < olimit: continue the loop */
|
||||
cmp %op3, 24(%rsp)
|
||||
ja .L_4X1_loop_body
|
||||
|
||||
/* Reload ip[1,2,3] from stack */
|
||||
movq 0(%rsp), %ip1
|
||||
movq 8(%rsp), %ip2
|
||||
movq 16(%rsp), %ip3
|
||||
|
||||
/* Re-compute olimit */
|
||||
jmp .L_4X1_compute_olimit
|
||||
|
||||
#undef GET_NEXT_DELT
|
||||
#undef DECODE_FROM_DELT
|
||||
#undef DECODE
|
||||
#undef RELOAD_BITS
|
||||
.L_4X1_exit:
|
||||
addq $24, %rsp
|
||||
|
||||
/* Restore stack (oend & olimit) */
|
||||
pop %rax /* olimit */
|
||||
pop %rax /* oend */
|
||||
pop %rax /* ilimit */
|
||||
pop %rax /* arg */
|
||||
|
||||
/* Save ip / op / bits */
|
||||
movq %ip0, 0(%rax)
|
||||
movq %ip1, 8(%rax)
|
||||
movq %ip2, 16(%rax)
|
||||
movq %ip3, 24(%rax)
|
||||
movq %op0, 32(%rax)
|
||||
movq %op1, 40(%rax)
|
||||
movq %op2, 48(%rax)
|
||||
movq %op3, 56(%rax)
|
||||
movq %bits0, 64(%rax)
|
||||
movq %bits1, 72(%rax)
|
||||
movq %bits2, 80(%rax)
|
||||
movq %bits3, 88(%rax)
|
||||
|
||||
/* Restore registers */
|
||||
pop %r15
|
||||
pop %r14
|
||||
pop %r13
|
||||
pop %r12
|
||||
pop %r11
|
||||
pop %r10
|
||||
pop %r9
|
||||
pop %r8
|
||||
pop %rdi
|
||||
pop %rsi
|
||||
pop %rbp
|
||||
pop %rdx
|
||||
pop %rcx
|
||||
pop %rbx
|
||||
pop %rax
|
||||
ret
|
||||
|
||||
_HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
|
||||
HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
|
||||
ZSTD_CET_ENDBRANCH
|
||||
/* Save all registers - even if they are callee saved for simplicity. */
|
||||
push %rax
|
||||
push %rbx
|
||||
push %rcx
|
||||
push %rdx
|
||||
push %rbp
|
||||
push %rsi
|
||||
push %rdi
|
||||
push %r8
|
||||
push %r9
|
||||
push %r10
|
||||
push %r11
|
||||
push %r12
|
||||
push %r13
|
||||
push %r14
|
||||
push %r15
|
||||
|
||||
movq %rdi, %rax
|
||||
movq 0(%rax), %ip0
|
||||
movq 8(%rax), %ip1
|
||||
movq 16(%rax), %ip2
|
||||
movq 24(%rax), %ip3
|
||||
movq 32(%rax), %op0
|
||||
movq 40(%rax), %op1
|
||||
movq 48(%rax), %op2
|
||||
movq 56(%rax), %op3
|
||||
movq 64(%rax), %bits0
|
||||
movq 72(%rax), %bits1
|
||||
movq 80(%rax), %bits2
|
||||
movq 88(%rax), %bits3
|
||||
movq 96(%rax), %dtable
|
||||
push %rax /* argument */
|
||||
push %rax /* olimit */
|
||||
push 104(%rax) /* ilimit */
|
||||
|
||||
movq 112(%rax), %rax
|
||||
push %rax /* oend3 */
|
||||
|
||||
movq %op3, %rax
|
||||
push %rax /* oend2 */
|
||||
|
||||
movq %op2, %rax
|
||||
push %rax /* oend1 */
|
||||
|
||||
movq %op1, %rax
|
||||
push %rax /* oend0 */
|
||||
|
||||
/* Scratch space */
|
||||
subq $8, %rsp
|
||||
|
||||
.L_4X2_compute_olimit:
|
||||
/* Computes how many iterations we can do safely
|
||||
* %r15, %rax may be clobbered
|
||||
* rdx must be saved
|
||||
* op[1,2,3,4] & ip0 mustn't be clobbered
|
||||
*/
|
||||
movq %rdx, 0(%rsp)
|
||||
|
||||
/* We can consume up to 7 input bytes each iteration. */
|
||||
movq %ip0, %rax /* rax = ip0 */
|
||||
movq 40(%rsp), %rdx /* rdx = ilimit */
|
||||
subq %rdx, %rax /* rax = ip0 - ilimit */
|
||||
movq %rax, %r15 /* r15 = ip0 - ilimit */
|
||||
|
||||
/* rdx = rax / 7 */
|
||||
movabsq $2635249153387078803, %rdx
|
||||
mulq %rdx
|
||||
subq %rdx, %r15
|
||||
shrq %r15
|
||||
addq %r15, %rdx
|
||||
shrq $2, %rdx
|
||||
|
||||
/* r15 = (ip0 - ilimit) / 7 */
|
||||
movq %rdx, %r15
|
||||
|
||||
/* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */
|
||||
movq 8(%rsp), %rax /* rax = oend0 */
|
||||
subq %op0, %rax /* rax = oend0 - op0 */
|
||||
movq 16(%rsp), %rdx /* rdx = oend1 */
|
||||
subq %op1, %rdx /* rdx = oend1 - op1 */
|
||||
|
||||
cmpq %rax, %rdx
|
||||
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
|
||||
|
||||
movq 24(%rsp), %rax /* rax = oend2 */
|
||||
subq %op2, %rax /* rax = oend2 - op2 */
|
||||
|
||||
cmpq %rax, %rdx
|
||||
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
|
||||
|
||||
movq 32(%rsp), %rax /* rax = oend3 */
|
||||
subq %op3, %rax /* rax = oend3 - op3 */
|
||||
|
||||
cmpq %rax, %rdx
|
||||
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
|
||||
|
||||
movabsq $-3689348814741910323, %rax
|
||||
mulq %rdx
|
||||
shrq $3, %rdx /* rdx = rdx / 10 */
|
||||
|
||||
/* r15 = min(%rdx, %r15) */
|
||||
cmpq %rdx, %r15
|
||||
cmova %rdx, %r15
|
||||
|
||||
/* olimit = op3 + 5 * r15 */
|
||||
movq %r15, %rax
|
||||
leaq (%op3, %rax, 4), %olimit
|
||||
addq %rax, %olimit
|
||||
|
||||
movq 0(%rsp), %rdx
|
||||
|
||||
/* If (op3 + 10 > olimit) */
|
||||
movq %op3, %rax /* rax = op3 */
|
||||
addq $10, %rax /* rax = op3 + 10 */
|
||||
cmpq %rax, %olimit /* op3 + 10 > olimit */
|
||||
jb .L_4X2_exit
|
||||
|
||||
/* If (ip1 < ip0) go to exit */
|
||||
cmpq %ip0, %ip1
|
||||
jb .L_4X2_exit
|
||||
|
||||
/* If (ip2 < ip1) go to exit */
|
||||
cmpq %ip1, %ip2
|
||||
jb .L_4X2_exit
|
||||
|
||||
/* If (ip3 < ip2) go to exit */
|
||||
cmpq %ip2, %ip3
|
||||
jb .L_4X2_exit
|
||||
|
||||
#define DECODE(n, idx) \
|
||||
movq %bits##n, %rax; \
|
||||
shrq $53, %rax; \
|
||||
movzwl 0(%dtable,%rax,4),%r8d; \
|
||||
movzbl 2(%dtable,%rax,4),%r15d; \
|
||||
movzbl 3(%dtable,%rax,4),%eax; \
|
||||
movw %r8w, (%op##n); \
|
||||
shlxq %r15, %bits##n, %bits##n; \
|
||||
addq %rax, %op##n
|
||||
|
||||
#define RELOAD_BITS(n) \
|
||||
bsfq %bits##n, %bits##n; \
|
||||
movq %bits##n, %rax; \
|
||||
shrq $3, %bits##n; \
|
||||
andq $7, %rax; \
|
||||
subq %bits##n, %ip##n; \
|
||||
movq (%ip##n), %bits##n; \
|
||||
orq $1, %bits##n; \
|
||||
shlxq %rax, %bits##n, %bits##n
|
||||
|
||||
|
||||
movq %olimit, 48(%rsp)
|
||||
|
||||
.p2align 6
|
||||
|
||||
.L_4X2_loop_body:
|
||||
/* We clobber r8, so store it on the stack */
|
||||
movq %r8, 0(%rsp)
|
||||
|
||||
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
|
||||
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
|
||||
|
||||
/* Reload r8 */
|
||||
movq 0(%rsp), %r8
|
||||
|
||||
FOR_EACH_STREAM(RELOAD_BITS)
|
||||
|
||||
cmp %op3, 48(%rsp)
|
||||
ja .L_4X2_loop_body
|
||||
jmp .L_4X2_compute_olimit
|
||||
|
||||
#undef DECODE
|
||||
#undef RELOAD_BITS
|
||||
.L_4X2_exit:
|
||||
addq $8, %rsp
|
||||
/* Restore stack (oend & olimit) */
|
||||
pop %rax /* oend0 */
|
||||
pop %rax /* oend1 */
|
||||
pop %rax /* oend2 */
|
||||
pop %rax /* oend3 */
|
||||
pop %rax /* ilimit */
|
||||
pop %rax /* olimit */
|
||||
pop %rax /* arg */
|
||||
|
||||
/* Save ip / op / bits */
|
||||
movq %ip0, 0(%rax)
|
||||
movq %ip1, 8(%rax)
|
||||
movq %ip2, 16(%rax)
|
||||
movq %ip3, 24(%rax)
|
||||
movq %op0, 32(%rax)
|
||||
movq %op1, 40(%rax)
|
||||
movq %op2, 48(%rax)
|
||||
movq %op3, 56(%rax)
|
||||
movq %bits0, 64(%rax)
|
||||
movq %bits1, 72(%rax)
|
||||
movq %bits2, 80(%rax)
|
||||
movq %bits3, 88(%rax)
|
||||
|
||||
/* Restore registers */
|
||||
pop %r15
|
||||
pop %r14
|
||||
pop %r13
|
||||
pop %r12
|
||||
pop %r11
|
||||
pop %r10
|
||||
pop %r9
|
||||
pop %r8
|
||||
pop %rdi
|
||||
pop %rsi
|
||||
pop %rbp
|
||||
pop %rdx
|
||||
pop %rcx
|
||||
pop %rbx
|
||||
pop %rax
|
||||
ret
|
||||
|
||||
#endif
|
@ -1,244 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* zstd_ddict.c :
|
||||
* concentrates all logic that needs to know the internals of ZSTD_DDict object */
|
||||
|
||||
/*-*******************************************************
|
||||
* Dependencies
|
||||
*********************************************************/
|
||||
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
||||
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
|
||||
#include "../common/cpu.h" /* bmi2 */
|
||||
#include "../common/mem.h" /* low level memory routines */
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "../common/fse.h"
|
||||
#include "../common/huf.h"
|
||||
#include "zstd_decompress_internal.h"
|
||||
#include "zstd_ddict.h"
|
||||
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
|
||||
# include "../legacy/zstd_legacy.h"
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/*-*******************************************************
|
||||
* Types
|
||||
*********************************************************/
|
||||
struct ZSTD_DDict_s {
|
||||
void* dictBuffer;
|
||||
const void* dictContent;
|
||||
size_t dictSize;
|
||||
ZSTD_entropyDTables_t entropy;
|
||||
U32 dictID;
|
||||
U32 entropyPresent;
|
||||
ZSTD_customMem cMem;
|
||||
}; /* typedef'd to ZSTD_DDict within "zstd.h" */
|
||||
|
||||
const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
|
||||
{
|
||||
assert(ddict != NULL);
|
||||
return ddict->dictContent;
|
||||
}
|
||||
|
||||
size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
|
||||
{
|
||||
assert(ddict != NULL);
|
||||
return ddict->dictSize;
|
||||
}
|
||||
|
||||
void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
|
||||
{
|
||||
DEBUGLOG(4, "ZSTD_copyDDictParameters");
|
||||
assert(dctx != NULL);
|
||||
assert(ddict != NULL);
|
||||
dctx->dictID = ddict->dictID;
|
||||
dctx->prefixStart = ddict->dictContent;
|
||||
dctx->virtualStart = ddict->dictContent;
|
||||
dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
|
||||
dctx->previousDstEnd = dctx->dictEnd;
|
||||
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
dctx->dictContentBeginForFuzzing = dctx->prefixStart;
|
||||
dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
|
||||
#endif
|
||||
if (ddict->entropyPresent) {
|
||||
dctx->litEntropy = 1;
|
||||
dctx->fseEntropy = 1;
|
||||
dctx->LLTptr = ddict->entropy.LLTable;
|
||||
dctx->MLTptr = ddict->entropy.MLTable;
|
||||
dctx->OFTptr = ddict->entropy.OFTable;
|
||||
dctx->HUFptr = ddict->entropy.hufTable;
|
||||
dctx->entropy.rep[0] = ddict->entropy.rep[0];
|
||||
dctx->entropy.rep[1] = ddict->entropy.rep[1];
|
||||
dctx->entropy.rep[2] = ddict->entropy.rep[2];
|
||||
} else {
|
||||
dctx->litEntropy = 0;
|
||||
dctx->fseEntropy = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t
|
||||
ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
|
||||
ZSTD_dictContentType_e dictContentType)
|
||||
{
|
||||
ddict->dictID = 0;
|
||||
ddict->entropyPresent = 0;
|
||||
if (dictContentType == ZSTD_dct_rawContent) return 0;
|
||||
|
||||
if (ddict->dictSize < 8) {
|
||||
if (dictContentType == ZSTD_dct_fullDict)
|
||||
return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
|
||||
return 0; /* pure content mode */
|
||||
}
|
||||
{ U32 const magic = MEM_readLE32(ddict->dictContent);
|
||||
if (magic != ZSTD_MAGIC_DICTIONARY) {
|
||||
if (dictContentType == ZSTD_dct_fullDict)
|
||||
return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
|
||||
return 0; /* pure content mode */
|
||||
}
|
||||
}
|
||||
ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
|
||||
|
||||
/* load entropy tables */
|
||||
RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
|
||||
&ddict->entropy, ddict->dictContent, ddict->dictSize)),
|
||||
dictionary_corrupted, "");
|
||||
ddict->entropyPresent = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictContentType_e dictContentType)
|
||||
{
|
||||
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
|
||||
ddict->dictBuffer = NULL;
|
||||
ddict->dictContent = dict;
|
||||
if (!dict) dictSize = 0;
|
||||
} else {
|
||||
void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
|
||||
ddict->dictBuffer = internalBuffer;
|
||||
ddict->dictContent = internalBuffer;
|
||||
if (!internalBuffer) return ERROR(memory_allocation);
|
||||
ZSTD_memcpy(internalBuffer, dict, dictSize);
|
||||
}
|
||||
ddict->dictSize = dictSize;
|
||||
ddict->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */
|
||||
|
||||
/* parse dictionary content */
|
||||
FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictContentType_e dictContentType,
|
||||
ZSTD_customMem customMem)
|
||||
{
|
||||
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
|
||||
|
||||
{ ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
|
||||
if (ddict == NULL) return NULL;
|
||||
ddict->cMem = customMem;
|
||||
{ size_t const initResult = ZSTD_initDDict_internal(ddict,
|
||||
dict, dictSize,
|
||||
dictLoadMethod, dictContentType);
|
||||
if (ZSTD_isError(initResult)) {
|
||||
ZSTD_freeDDict(ddict);
|
||||
return NULL;
|
||||
} }
|
||||
return ddict;
|
||||
}
|
||||
}
|
||||
|
||||
/*! ZSTD_createDDict() :
|
||||
* Create a digested dictionary, to start decompression without startup delay.
|
||||
* `dict` content is copied inside DDict.
|
||||
* Consequently, `dict` can be released after `ZSTD_DDict` creation */
|
||||
ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
|
||||
{
|
||||
ZSTD_customMem const allocator = { NULL, NULL, NULL };
|
||||
return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
|
||||
}
|
||||
|
||||
/*! ZSTD_createDDict_byReference() :
|
||||
* Create a digested dictionary, to start decompression without startup delay.
|
||||
* Dictionary content is simply referenced, it will be accessed during decompression.
|
||||
* Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
|
||||
ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
|
||||
{
|
||||
ZSTD_customMem const allocator = { NULL, NULL, NULL };
|
||||
return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
|
||||
}
|
||||
|
||||
|
||||
const ZSTD_DDict* ZSTD_initStaticDDict(
|
||||
void* sBuffer, size_t sBufferSize,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictLoadMethod_e dictLoadMethod,
|
||||
ZSTD_dictContentType_e dictContentType)
|
||||
{
|
||||
size_t const neededSpace = sizeof(ZSTD_DDict)
|
||||
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
|
||||
ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
|
||||
assert(sBuffer != NULL);
|
||||
assert(dict != NULL);
|
||||
if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
|
||||
if (sBufferSize < neededSpace) return NULL;
|
||||
if (dictLoadMethod == ZSTD_dlm_byCopy) {
|
||||
ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */
|
||||
dict = ddict+1;
|
||||
}
|
||||
if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
|
||||
dict, dictSize,
|
||||
ZSTD_dlm_byRef, dictContentType) ))
|
||||
return NULL;
|
||||
return ddict;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
|
||||
{
|
||||
if (ddict==NULL) return 0; /* support free on NULL */
|
||||
{ ZSTD_customMem const cMem = ddict->cMem;
|
||||
ZSTD_customFree(ddict->dictBuffer, cMem);
|
||||
ZSTD_customFree(ddict, cMem);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*! ZSTD_estimateDDictSize() :
|
||||
* Estimate amount of memory that will be needed to create a dictionary for decompression.
|
||||
* Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
|
||||
size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
|
||||
{
|
||||
return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
|
||||
}
|
||||
|
||||
size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
|
||||
{
|
||||
if (ddict==NULL) return 0; /* support sizeof on NULL */
|
||||
return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
|
||||
}
|
||||
|
||||
/*! ZSTD_getDictID_fromDDict() :
|
||||
* Provides the dictID of the dictionary loaded into `ddict`.
|
||||
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
|
||||
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
|
||||
unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
|
||||
{
|
||||
if (ddict==NULL) return 0;
|
||||
return ddict->dictID;
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef ZSTD_DDICT_H
|
||||
#define ZSTD_DDICT_H
|
||||
|
||||
/*-*******************************************************
|
||||
* Dependencies
|
||||
*********************************************************/
|
||||
#include "../common/zstd_deps.h" /* size_t */
|
||||
#include "../zstd.h" /* ZSTD_DDict, and several public functions */
|
||||
|
||||
|
||||
/*-*******************************************************
|
||||
* Interface
|
||||
*********************************************************/
|
||||
|
||||
/* note: several prototypes are already published in `zstd.h` :
|
||||
* ZSTD_createDDict()
|
||||
* ZSTD_createDDict_byReference()
|
||||
* ZSTD_createDDict_advanced()
|
||||
* ZSTD_freeDDict()
|
||||
* ZSTD_initStaticDDict()
|
||||
* ZSTD_sizeof_DDict()
|
||||
* ZSTD_estimateDDictSize()
|
||||
* ZSTD_getDictID_fromDict()
|
||||
*/
|
||||
|
||||
const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
|
||||
size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
|
||||
|
||||
void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
|
||||
|
||||
|
||||
|
||||
#endif /* ZSTD_DDICT_H */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,68 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef ZSTD_DEC_BLOCK_H
|
||||
#define ZSTD_DEC_BLOCK_H
|
||||
|
||||
/*-*******************************************************
|
||||
* Dependencies
|
||||
*********************************************************/
|
||||
#include "../common/zstd_deps.h" /* size_t */
|
||||
#include "../zstd.h" /* DCtx, and some public functions */
|
||||
#include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */
|
||||
#include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */
|
||||
|
||||
|
||||
/* === Prototypes === */
|
||||
|
||||
/* note: prototypes already published within `zstd.h` :
|
||||
* ZSTD_decompressBlock()
|
||||
*/
|
||||
|
||||
/* note: prototypes already published within `zstd_internal.h` :
|
||||
* ZSTD_getcBlockSize()
|
||||
* ZSTD_decodeSeqHeaders()
|
||||
*/
|
||||
|
||||
|
||||
/* Streaming state is used to inform allocation of the literal buffer */
|
||||
typedef enum {
|
||||
not_streaming = 0,
|
||||
is_streaming = 1
|
||||
} streaming_operation;
|
||||
|
||||
/* ZSTD_decompressBlock_internal() :
|
||||
* decompress block, starting at `src`,
|
||||
* into destination buffer `dst`.
|
||||
* @return : decompressed block size,
|
||||
* or an error code (which can be tested using ZSTD_isError())
|
||||
*/
|
||||
size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize, const int frame, const streaming_operation streaming);
|
||||
|
||||
/* ZSTD_buildFSETable() :
|
||||
* generate FSE decoding table for one symbol (ll, ml or off)
|
||||
* this function must be called with valid parameters only
|
||||
* (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
|
||||
* in which case it cannot fail.
|
||||
* The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
|
||||
* defined in zstd_decompress_internal.h.
|
||||
* Internal use only.
|
||||
*/
|
||||
void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue,
|
||||
const U32* baseValue, const U8* nbAdditionalBits,
|
||||
unsigned tableLog, void* wksp, size_t wkspSize,
|
||||
int bmi2);
|
||||
|
||||
|
||||
#endif /* ZSTD_DEC_BLOCK_H */
|
@ -1,238 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
/* zstd_decompress_internal:
|
||||
* objects and definitions shared within lib/decompress modules */
|
||||
|
||||
#ifndef ZSTD_DECOMPRESS_INTERNAL_H
|
||||
#define ZSTD_DECOMPRESS_INTERNAL_H
|
||||
|
||||
|
||||
/*-*******************************************************
|
||||
* Dependencies
|
||||
*********************************************************/
|
||||
#include "../common/mem.h" /* BYTE, U16, U32 */
|
||||
#include "../common/zstd_internal.h" /* constants : MaxLL, MaxML, MaxOff, LLFSELog, etc. */
|
||||
|
||||
|
||||
|
||||
/*-*******************************************************
|
||||
* Constants
|
||||
*********************************************************/
|
||||
static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 18, 20, 22, 24, 28, 32, 40,
|
||||
48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
|
||||
0x2000, 0x4000, 0x8000, 0x10000 };
|
||||
|
||||
static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
|
||||
0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
|
||||
0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
|
||||
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
|
||||
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
|
||||
|
||||
static UNUSED_ATTR const U8 OF_bits[MaxOff+1] = {
|
||||
0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23,
|
||||
24, 25, 26, 27, 28, 29, 30, 31 };
|
||||
|
||||
static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
|
||||
3, 4, 5, 6, 7, 8, 9, 10,
|
||||
11, 12, 13, 14, 15, 16, 17, 18,
|
||||
19, 20, 21, 22, 23, 24, 25, 26,
|
||||
27, 28, 29, 30, 31, 32, 33, 34,
|
||||
35, 37, 39, 41, 43, 47, 51, 59,
|
||||
67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
|
||||
0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
|
||||
|
||||
|
||||
/*-*******************************************************
|
||||
* Decompression types
|
||||
*********************************************************/
|
||||
typedef struct {
|
||||
U32 fastMode;
|
||||
U32 tableLog;
|
||||
} ZSTD_seqSymbol_header;
|
||||
|
||||
typedef struct {
|
||||
U16 nextState;
|
||||
BYTE nbAdditionalBits;
|
||||
BYTE nbBits;
|
||||
U32 baseValue;
|
||||
} ZSTD_seqSymbol;
|
||||
|
||||
#define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
|
||||
|
||||
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
|
||||
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
|
||||
#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12
|
||||
|
||||
typedef struct {
|
||||
ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
|
||||
ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
|
||||
ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
|
||||
HUF_DTable hufTable[HUF_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUF_decompress4X */
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
|
||||
} ZSTD_entropyDTables_t;
|
||||
|
||||
typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
|
||||
ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
|
||||
ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
|
||||
ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
|
||||
|
||||
typedef enum { zdss_init=0, zdss_loadHeader,
|
||||
zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
|
||||
|
||||
typedef enum {
|
||||
ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */
|
||||
ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */
|
||||
ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
|
||||
} ZSTD_dictUses_e;
|
||||
|
||||
/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
|
||||
typedef struct {
|
||||
const ZSTD_DDict** ddictPtrTable;
|
||||
size_t ddictPtrTableSize;
|
||||
size_t ddictPtrCount;
|
||||
} ZSTD_DDictHashSet;
|
||||
|
||||
#ifndef ZSTD_DECODER_INTERNAL_BUFFER
|
||||
# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16)
|
||||
#endif
|
||||
|
||||
#define ZSTD_LBMIN 64
|
||||
#define ZSTD_LBMAX (128 << 10)
|
||||
|
||||
/* extra buffer, compensates when dst is not large enough to store litBuffer */
|
||||
#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX)
|
||||
|
||||
typedef enum {
|
||||
ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */
|
||||
ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */
|
||||
ZSTD_split = 2 /* Split between litExtraBuffer and dst */
|
||||
} ZSTD_litLocation_e;
|
||||
|
||||
struct ZSTD_DCtx_s
|
||||
{
|
||||
const ZSTD_seqSymbol* LLTptr;
|
||||
const ZSTD_seqSymbol* MLTptr;
|
||||
const ZSTD_seqSymbol* OFTptr;
|
||||
const HUF_DTable* HUFptr;
|
||||
ZSTD_entropyDTables_t entropy;
|
||||
U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */
|
||||
const void* previousDstEnd; /* detect continuity */
|
||||
const void* prefixStart; /* start of current segment */
|
||||
const void* virtualStart; /* virtual start of previous segment if it was just before current one */
|
||||
const void* dictEnd; /* end of previous segment */
|
||||
size_t expected;
|
||||
ZSTD_frameHeader fParams;
|
||||
U64 processedCSize;
|
||||
U64 decodedSize;
|
||||
blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
|
||||
ZSTD_dStage stage;
|
||||
U32 litEntropy;
|
||||
U32 fseEntropy;
|
||||
XXH64_state_t xxhState;
|
||||
size_t headerSize;
|
||||
ZSTD_format_e format;
|
||||
ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
|
||||
U32 validateChecksum; /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
|
||||
const BYTE* litPtr;
|
||||
ZSTD_customMem customMem;
|
||||
size_t litSize;
|
||||
size_t rleSize;
|
||||
size_t staticSize;
|
||||
#if DYNAMIC_BMI2 != 0
|
||||
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
|
||||
#endif
|
||||
|
||||
/* dictionary */
|
||||
ZSTD_DDict* ddictLocal;
|
||||
const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
|
||||
U32 dictID;
|
||||
int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
|
||||
ZSTD_dictUses_e dictUses;
|
||||
ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */
|
||||
ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
|
||||
int disableHufAsm;
|
||||
|
||||
/* streaming */
|
||||
ZSTD_dStreamStage streamStage;
|
||||
char* inBuff;
|
||||
size_t inBuffSize;
|
||||
size_t inPos;
|
||||
size_t maxWindowSize;
|
||||
char* outBuff;
|
||||
size_t outBuffSize;
|
||||
size_t outStart;
|
||||
size_t outEnd;
|
||||
size_t lhSize;
|
||||
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
|
||||
void* legacyContext;
|
||||
U32 previousLegacyVersion;
|
||||
U32 legacyVersion;
|
||||
#endif
|
||||
U32 hostageByte;
|
||||
int noForwardProgress;
|
||||
ZSTD_bufferMode_e outBufferMode;
|
||||
ZSTD_outBuffer expectedOutBuffer;
|
||||
|
||||
/* workspace */
|
||||
BYTE* litBuffer;
|
||||
const BYTE* litBufferEnd;
|
||||
ZSTD_litLocation_e litBufferLocation;
|
||||
BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */
|
||||
BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
|
||||
|
||||
size_t oversizedDuration;
|
||||
|
||||
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
void const* dictContentBeginForFuzzing;
|
||||
void const* dictContentEndForFuzzing;
|
||||
#endif
|
||||
|
||||
/* Tracing */
|
||||
#if ZSTD_TRACE
|
||||
ZSTD_TraceCtx traceCtx;
|
||||
#endif
|
||||
}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
|
||||
|
||||
MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) {
|
||||
#if DYNAMIC_BMI2 != 0
|
||||
return dctx->bmi2;
|
||||
#else
|
||||
(void)dctx;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*-*******************************************************
|
||||
* Shared internal functions
|
||||
*********************************************************/
|
||||
|
||||
/*! ZSTD_loadDEntropy() :
|
||||
* dict : must point at beginning of a valid zstd dictionary.
|
||||
* @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
|
||||
size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
|
||||
const void* const dict, size_t const dictSize);
|
||||
|
||||
/*! ZSTD_checkContinuity() :
|
||||
* check if next `dst` follows previous position, where decompression ended.
|
||||
* If yes, do nothing (continue on current segment).
|
||||
* If not, classify previous segment as "external dictionary", and start a new segment.
|
||||
* This function cannot fail. */
|
||||
void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
|
||||
|
||||
|
||||
#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
|
File diff suppressed because it is too large
Load Diff
@ -1,158 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZDICT_STATIC_LINKING_ONLY
|
||||
# define ZDICT_STATIC_LINKING_ONLY
|
||||
#endif
|
||||
|
||||
#include <stdio.h> /* fprintf */
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
#include <string.h> /* memset */
|
||||
#include <time.h> /* clock */
|
||||
#include "../common/mem.h" /* read */
|
||||
#include "../common/pool.h"
|
||||
#include "../common/threading.h"
|
||||
#include "../common/zstd_internal.h" /* includes zstd.h */
|
||||
#include "../zdict.h"
|
||||
|
||||
/**
|
||||
* COVER_best_t is used for two purposes:
|
||||
* 1. Synchronizing threads.
|
||||
* 2. Saving the best parameters and dictionary.
|
||||
*
|
||||
* All of the methods except COVER_best_init() are thread safe if zstd is
|
||||
* compiled with multithreaded support.
|
||||
*/
|
||||
typedef struct COVER_best_s {
|
||||
ZSTD_pthread_mutex_t mutex;
|
||||
ZSTD_pthread_cond_t cond;
|
||||
size_t liveJobs;
|
||||
void *dict;
|
||||
size_t dictSize;
|
||||
ZDICT_cover_params_t parameters;
|
||||
size_t compressedSize;
|
||||
} COVER_best_t;
|
||||
|
||||
/**
|
||||
* A segment is a range in the source as well as the score of the segment.
|
||||
*/
|
||||
typedef struct {
|
||||
U32 begin;
|
||||
U32 end;
|
||||
U32 score;
|
||||
} COVER_segment_t;
|
||||
|
||||
/**
|
||||
*Number of epochs and size of each epoch.
|
||||
*/
|
||||
typedef struct {
|
||||
U32 num;
|
||||
U32 size;
|
||||
} COVER_epoch_info_t;
|
||||
|
||||
/**
|
||||
* Struct used for the dictionary selection function.
|
||||
*/
|
||||
typedef struct COVER_dictSelection {
|
||||
BYTE* dictContent;
|
||||
size_t dictSize;
|
||||
size_t totalCompressedSize;
|
||||
} COVER_dictSelection_t;
|
||||
|
||||
/**
|
||||
* Computes the number of epochs and the size of each epoch.
|
||||
* We will make sure that each epoch gets at least 10 * k bytes.
|
||||
*
|
||||
* The COVER algorithms divide the data up into epochs of equal size and
|
||||
* select one segment from each epoch.
|
||||
*
|
||||
* @param maxDictSize The maximum allowed dictionary size.
|
||||
* @param nbDmers The number of dmers we are training on.
|
||||
* @param k The parameter k (segment size).
|
||||
* @param passes The target number of passes over the dmer corpus.
|
||||
* More passes means a better dictionary.
|
||||
*/
|
||||
COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers,
|
||||
U32 k, U32 passes);
|
||||
|
||||
/**
|
||||
* Warns the user when their corpus is too small.
|
||||
*/
|
||||
void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel);
|
||||
|
||||
/**
|
||||
* Checks total compressed size of a dictionary
|
||||
*/
|
||||
size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
|
||||
const size_t *samplesSizes, const BYTE *samples,
|
||||
size_t *offsets,
|
||||
size_t nbTrainSamples, size_t nbSamples,
|
||||
BYTE *const dict, size_t dictBufferCapacity);
|
||||
|
||||
/**
|
||||
* Returns the sum of the sample sizes.
|
||||
*/
|
||||
size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ;
|
||||
|
||||
/**
|
||||
* Initialize the `COVER_best_t`.
|
||||
*/
|
||||
void COVER_best_init(COVER_best_t *best);
|
||||
|
||||
/**
|
||||
* Wait until liveJobs == 0.
|
||||
*/
|
||||
void COVER_best_wait(COVER_best_t *best);
|
||||
|
||||
/**
|
||||
* Call COVER_best_wait() and then destroy the COVER_best_t.
|
||||
*/
|
||||
void COVER_best_destroy(COVER_best_t *best);
|
||||
|
||||
/**
|
||||
* Called when a thread is about to be launched.
|
||||
* Increments liveJobs.
|
||||
*/
|
||||
void COVER_best_start(COVER_best_t *best);
|
||||
|
||||
/**
|
||||
* Called when a thread finishes executing, both on error or success.
|
||||
* Decrements liveJobs and signals any waiting threads if liveJobs == 0.
|
||||
* If this dictionary is the best so far save it and its parameters.
|
||||
*/
|
||||
void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
|
||||
COVER_dictSelection_t selection);
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Checks if the return
|
||||
* value is an error.
|
||||
*/
|
||||
unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection);
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Returns a struct where
|
||||
* return.totalCompressedSize is a ZSTD error.
|
||||
*/
|
||||
COVER_dictSelection_t COVER_dictSelectionError(size_t error);
|
||||
|
||||
/**
|
||||
* Always call after selectDict is called to free up used memory from
|
||||
* newly created dictionary.
|
||||
*/
|
||||
void COVER_dictSelectionFree(COVER_dictSelection_t selection);
|
||||
|
||||
/**
|
||||
* Called to finalize the dictionary and select one based on whether or not
|
||||
* the shrink-dict flag was enabled. If enabled the dictionary used is the
|
||||
* smallest dictionary within a specified regression of the compressed size
|
||||
* from the largest dictionary.
|
||||
*/
|
||||
COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity,
|
||||
size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
|
||||
size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize);
|
File diff suppressed because it is too large
Load Diff
@ -1,67 +0,0 @@
|
||||
/*
|
||||
* divsufsort.h for libdivsufsort-lite
|
||||
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use,
|
||||
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following
|
||||
* conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DIVSUFSORT_H
|
||||
#define _DIVSUFSORT_H 1
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
|
||||
/*- Prototypes -*/
|
||||
|
||||
/**
|
||||
* Constructs the suffix array of a given string.
|
||||
* @param T [0..n-1] The input string.
|
||||
* @param SA [0..n-1] The output array of suffixes.
|
||||
* @param n The length of the given string.
|
||||
* @param openMP enables OpenMP optimization.
|
||||
* @return 0 if no error occurred, -1 or -2 otherwise.
|
||||
*/
|
||||
int
|
||||
divsufsort(const unsigned char *T, int *SA, int n, int openMP);
|
||||
|
||||
/**
|
||||
* Constructs the burrows-wheeler transformed string of a given string.
|
||||
* @param T [0..n-1] The input string.
|
||||
* @param U [0..n-1] The output string. (can be T)
|
||||
* @param A [0..n-1] The temporary array. (can be NULL)
|
||||
* @param n The length of the given string.
|
||||
* @param num_indexes The length of secondary indexes array. (can be NULL)
|
||||
* @param indexes The secondary indexes array. (can be NULL)
|
||||
* @param openMP enables OpenMP optimization.
|
||||
* @return The primary index if no error occurred, -1 or -2 otherwise.
|
||||
*/
|
||||
int
|
||||
divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* _DIVSUFSORT_H */
|
@ -1,766 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include <stdio.h> /* fprintf */
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
#include <string.h> /* memset */
|
||||
#include <time.h> /* clock */
|
||||
|
||||
#ifndef ZDICT_STATIC_LINKING_ONLY
|
||||
# define ZDICT_STATIC_LINKING_ONLY
|
||||
#endif
|
||||
|
||||
#include "../common/mem.h" /* read */
|
||||
#include "../common/pool.h"
|
||||
#include "../common/threading.h"
|
||||
#include "../common/zstd_internal.h" /* includes zstd.h */
|
||||
#include "../compress/zstd_compress_internal.h" /* ZSTD_hash*() */
|
||||
#include "../zdict.h"
|
||||
#include "cover.h"
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
/**
|
||||
* There are 32bit indexes used to ref samples, so limit samples size to 4GB
|
||||
* on 64bit builds.
|
||||
* For 32bit builds we choose 1 GB.
|
||||
* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large
|
||||
* contiguous buffer, so 1GB is already a high limit.
|
||||
*/
|
||||
#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
|
||||
#define FASTCOVER_MAX_F 31
|
||||
#define FASTCOVER_MAX_ACCEL 10
|
||||
#define FASTCOVER_DEFAULT_SPLITPOINT 0.75
|
||||
#define DEFAULT_F 20
|
||||
#define DEFAULT_ACCEL 1
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Console display
|
||||
***************************************/
|
||||
#ifndef LOCALDISPLAYLEVEL
|
||||
static int g_displayLevel = 0;
|
||||
#endif
|
||||
#undef DISPLAY
|
||||
#define DISPLAY(...) \
|
||||
{ \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fflush(stderr); \
|
||||
}
|
||||
#undef LOCALDISPLAYLEVEL
|
||||
#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
|
||||
if (displayLevel >= l) { \
|
||||
DISPLAY(__VA_ARGS__); \
|
||||
} /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
|
||||
#undef DISPLAYLEVEL
|
||||
#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
|
||||
|
||||
#ifndef LOCALDISPLAYUPDATE
|
||||
static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
|
||||
static clock_t g_time = 0;
|
||||
#endif
|
||||
#undef LOCALDISPLAYUPDATE
|
||||
#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
|
||||
if (displayLevel >= l) { \
|
||||
if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \
|
||||
g_time = clock(); \
|
||||
DISPLAY(__VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#undef DISPLAYUPDATE
|
||||
#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Hash Functions
|
||||
***************************************/
|
||||
/**
|
||||
* Hash the d-byte value pointed to by p and mod 2^f into the frequency vector
|
||||
*/
|
||||
static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 f, unsigned d) {
|
||||
if (d == 6) {
|
||||
return ZSTD_hash6Ptr(p, f);
|
||||
}
|
||||
return ZSTD_hash8Ptr(p, f);
|
||||
}
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Acceleration
|
||||
***************************************/
|
||||
typedef struct {
|
||||
unsigned finalize; /* Percentage of training samples used for ZDICT_finalizeDictionary */
|
||||
unsigned skip; /* Number of dmer skipped between each dmer counted in computeFrequency */
|
||||
} FASTCOVER_accel_t;
|
||||
|
||||
|
||||
static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = {
|
||||
{ 100, 0 }, /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */
|
||||
{ 100, 0 }, /* accel = 1 */
|
||||
{ 50, 1 }, /* accel = 2 */
|
||||
{ 34, 2 }, /* accel = 3 */
|
||||
{ 25, 3 }, /* accel = 4 */
|
||||
{ 20, 4 }, /* accel = 5 */
|
||||
{ 17, 5 }, /* accel = 6 */
|
||||
{ 14, 6 }, /* accel = 7 */
|
||||
{ 13, 7 }, /* accel = 8 */
|
||||
{ 11, 8 }, /* accel = 9 */
|
||||
{ 10, 9 }, /* accel = 10 */
|
||||
};
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Context
|
||||
***************************************/
|
||||
typedef struct {
|
||||
const BYTE *samples;
|
||||
size_t *offsets;
|
||||
const size_t *samplesSizes;
|
||||
size_t nbSamples;
|
||||
size_t nbTrainSamples;
|
||||
size_t nbTestSamples;
|
||||
size_t nbDmers;
|
||||
U32 *freqs;
|
||||
unsigned d;
|
||||
unsigned f;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
} FASTCOVER_ctx_t;
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
/**
|
||||
* Selects the best segment in an epoch.
|
||||
* Segments of are scored according to the function:
|
||||
*
|
||||
* Let F(d) be the frequency of all dmers with hash value d.
|
||||
* Let S_i be hash value of the dmer at position i of segment S which has length k.
|
||||
*
|
||||
* Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
|
||||
*
|
||||
* Once the dmer with hash value d is in the dictionary we set F(d) = 0.
|
||||
*/
|
||||
static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,
|
||||
U32 *freqs, U32 begin, U32 end,
|
||||
ZDICT_cover_params_t parameters,
|
||||
U16* segmentFreqs) {
|
||||
/* Constants */
|
||||
const U32 k = parameters.k;
|
||||
const U32 d = parameters.d;
|
||||
const U32 f = ctx->f;
|
||||
const U32 dmersInK = k - d + 1;
|
||||
|
||||
/* Try each segment (activeSegment) and save the best (bestSegment) */
|
||||
COVER_segment_t bestSegment = {0, 0, 0};
|
||||
COVER_segment_t activeSegment;
|
||||
|
||||
/* Reset the activeDmers in the segment */
|
||||
/* The activeSegment starts at the beginning of the epoch. */
|
||||
activeSegment.begin = begin;
|
||||
activeSegment.end = begin;
|
||||
activeSegment.score = 0;
|
||||
|
||||
/* Slide the activeSegment through the whole epoch.
|
||||
* Save the best segment in bestSegment.
|
||||
*/
|
||||
while (activeSegment.end < end) {
|
||||
/* Get hash value of current dmer */
|
||||
const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
|
||||
|
||||
/* Add frequency of this index to score if this is the first occurrence of index in active segment */
|
||||
if (segmentFreqs[idx] == 0) {
|
||||
activeSegment.score += freqs[idx];
|
||||
}
|
||||
/* Increment end of segment and segmentFreqs*/
|
||||
activeSegment.end += 1;
|
||||
segmentFreqs[idx] += 1;
|
||||
/* If the window is now too large, drop the first position */
|
||||
if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
|
||||
/* Get hash value of the dmer to be eliminated from active segment */
|
||||
const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
|
||||
segmentFreqs[delIndex] -= 1;
|
||||
/* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */
|
||||
if (segmentFreqs[delIndex] == 0) {
|
||||
activeSegment.score -= freqs[delIndex];
|
||||
}
|
||||
/* Increment start of segment */
|
||||
activeSegment.begin += 1;
|
||||
}
|
||||
|
||||
/* If this segment is the best so far save it */
|
||||
if (activeSegment.score > bestSegment.score) {
|
||||
bestSegment = activeSegment;
|
||||
}
|
||||
}
|
||||
|
||||
/* Zero out rest of segmentFreqs array */
|
||||
while (activeSegment.begin < end) {
|
||||
const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
|
||||
segmentFreqs[delIndex] -= 1;
|
||||
activeSegment.begin += 1;
|
||||
}
|
||||
|
||||
{
|
||||
/* Zero the frequency of hash value of each dmer covered by the chosen segment. */
|
||||
U32 pos;
|
||||
for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
|
||||
const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);
|
||||
freqs[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return bestSegment;
|
||||
}
|
||||
|
||||
|
||||
static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters,
|
||||
size_t maxDictSize, unsigned f,
|
||||
unsigned accel) {
|
||||
/* k, d, and f are required parameters */
|
||||
if (parameters.d == 0 || parameters.k == 0) {
|
||||
return 0;
|
||||
}
|
||||
/* d has to be 6 or 8 */
|
||||
if (parameters.d != 6 && parameters.d != 8) {
|
||||
return 0;
|
||||
}
|
||||
/* k <= maxDictSize */
|
||||
if (parameters.k > maxDictSize) {
|
||||
return 0;
|
||||
}
|
||||
/* d <= k */
|
||||
if (parameters.d > parameters.k) {
|
||||
return 0;
|
||||
}
|
||||
/* 0 < f <= FASTCOVER_MAX_F*/
|
||||
if (f > FASTCOVER_MAX_F || f == 0) {
|
||||
return 0;
|
||||
}
|
||||
/* 0 < splitPoint <= 1 */
|
||||
if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) {
|
||||
return 0;
|
||||
}
|
||||
/* 0 < accel <= 10 */
|
||||
if (accel > 10 || accel == 0) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Clean up a context initialized with `FASTCOVER_ctx_init()`.
|
||||
*/
|
||||
static void
|
||||
FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
|
||||
{
|
||||
if (!ctx) return;
|
||||
|
||||
free(ctx->freqs);
|
||||
ctx->freqs = NULL;
|
||||
|
||||
free(ctx->offsets);
|
||||
ctx->offsets = NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Calculate for frequency of hash value of each dmer in ctx->samples
|
||||
*/
|
||||
static void
|
||||
FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx)
|
||||
{
|
||||
const unsigned f = ctx->f;
|
||||
const unsigned d = ctx->d;
|
||||
const unsigned skip = ctx->accelParams.skip;
|
||||
const unsigned readLength = MAX(d, 8);
|
||||
size_t i;
|
||||
assert(ctx->nbTrainSamples >= 5);
|
||||
assert(ctx->nbTrainSamples <= ctx->nbSamples);
|
||||
for (i = 0; i < ctx->nbTrainSamples; i++) {
|
||||
size_t start = ctx->offsets[i]; /* start of current dmer */
|
||||
size_t const currSampleEnd = ctx->offsets[i+1];
|
||||
while (start + readLength <= currSampleEnd) {
|
||||
const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);
|
||||
freqs[dmerIndex]++;
|
||||
start = start + skip + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Prepare a context for dictionary building.
|
||||
* The context is only dependent on the parameter `d` and can be used multiple
|
||||
* times.
|
||||
* Returns 0 on success or error code on error.
|
||||
* The context must be destroyed with `FASTCOVER_ctx_destroy()`.
|
||||
*/
|
||||
static size_t
|
||||
FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,
|
||||
const void* samplesBuffer,
|
||||
const size_t* samplesSizes, unsigned nbSamples,
|
||||
unsigned d, double splitPoint, unsigned f,
|
||||
FASTCOVER_accel_t accelParams)
|
||||
{
|
||||
const BYTE* const samples = (const BYTE*)samplesBuffer;
|
||||
const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
|
||||
/* Split samples into testing and training sets */
|
||||
const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
|
||||
const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
|
||||
const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
|
||||
const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
|
||||
|
||||
/* Checks */
|
||||
if (totalSamplesSize < MAX(d, sizeof(U64)) ||
|
||||
totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {
|
||||
DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
|
||||
(unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
|
||||
/* Check if there are at least 5 training samples */
|
||||
if (nbTrainSamples < 5) {
|
||||
DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples);
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
|
||||
/* Check if there's testing sample */
|
||||
if (nbTestSamples < 1) {
|
||||
DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples);
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
|
||||
/* Zero the context */
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
|
||||
(unsigned)trainingSamplesSize);
|
||||
DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
|
||||
(unsigned)testSamplesSize);
|
||||
|
||||
ctx->samples = samples;
|
||||
ctx->samplesSizes = samplesSizes;
|
||||
ctx->nbSamples = nbSamples;
|
||||
ctx->nbTrainSamples = nbTrainSamples;
|
||||
ctx->nbTestSamples = nbTestSamples;
|
||||
ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
|
||||
ctx->d = d;
|
||||
ctx->f = f;
|
||||
ctx->accelParams = accelParams;
|
||||
|
||||
/* The offsets of each file */
|
||||
ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t));
|
||||
if (ctx->offsets == NULL) {
|
||||
DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n");
|
||||
FASTCOVER_ctx_destroy(ctx);
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
/* Fill offsets from the samplesSizes */
|
||||
{ U32 i;
|
||||
ctx->offsets[0] = 0;
|
||||
assert(nbSamples >= 5);
|
||||
for (i = 1; i <= nbSamples; ++i) {
|
||||
ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize frequency array of size 2^f */
|
||||
ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32));
|
||||
if (ctx->freqs == NULL) {
|
||||
DISPLAYLEVEL(1, "Failed to allocate frequency table \n");
|
||||
FASTCOVER_ctx_destroy(ctx);
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
|
||||
DISPLAYLEVEL(2, "Computing frequencies\n");
|
||||
FASTCOVER_computeFrequency(ctx->freqs, ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Given the prepared context build the dictionary.
|
||||
*/
|
||||
static size_t
|
||||
FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx,
|
||||
U32* freqs,
|
||||
void* dictBuffer, size_t dictBufferCapacity,
|
||||
ZDICT_cover_params_t parameters,
|
||||
U16* segmentFreqs)
|
||||
{
|
||||
BYTE *const dict = (BYTE *)dictBuffer;
|
||||
size_t tail = dictBufferCapacity;
|
||||
/* Divide the data into epochs. We will select one segment from each epoch. */
|
||||
const COVER_epoch_info_t epochs = COVER_computeEpochs(
|
||||
(U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1);
|
||||
const size_t maxZeroScoreRun = 10;
|
||||
size_t zeroScoreRun = 0;
|
||||
size_t epoch;
|
||||
DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
|
||||
(U32)epochs.num, (U32)epochs.size);
|
||||
/* Loop through the epochs until there are no more segments or the dictionary
|
||||
* is full.
|
||||
*/
|
||||
for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
|
||||
const U32 epochBegin = (U32)(epoch * epochs.size);
|
||||
const U32 epochEnd = epochBegin + epochs.size;
|
||||
size_t segmentSize;
|
||||
/* Select a segment */
|
||||
COVER_segment_t segment = FASTCOVER_selectSegment(
|
||||
ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs);
|
||||
|
||||
/* If the segment covers no dmers, then we are out of content.
|
||||
* There may be new content in other epochs, for continue for some time.
|
||||
*/
|
||||
if (segment.score == 0) {
|
||||
if (++zeroScoreRun >= maxZeroScoreRun) {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
zeroScoreRun = 0;
|
||||
|
||||
/* Trim the segment if necessary and if it is too small then we are done */
|
||||
segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
|
||||
if (segmentSize < parameters.d) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* We fill the dictionary from the back to allow the best segments to be
|
||||
* referenced with the smallest offsets.
|
||||
*/
|
||||
tail -= segmentSize;
|
||||
memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
|
||||
DISPLAYUPDATE(
|
||||
2, "\r%u%% ",
|
||||
(unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
|
||||
}
|
||||
DISPLAYLEVEL(2, "\r%79s\r", "");
|
||||
return tail;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for FASTCOVER_tryParameters().
|
||||
*/
|
||||
typedef struct FASTCOVER_tryParameters_data_s {
|
||||
const FASTCOVER_ctx_t* ctx;
|
||||
COVER_best_t* best;
|
||||
size_t dictBufferCapacity;
|
||||
ZDICT_cover_params_t parameters;
|
||||
} FASTCOVER_tryParameters_data_t;
|
||||
|
||||
|
||||
/**
|
||||
* Tries a set of parameters and updates the COVER_best_t with the results.
|
||||
* This function is thread safe if zstd is compiled with multithreaded support.
|
||||
* It takes its parameters as an *OWNING* opaque pointer to support threading.
|
||||
*/
|
||||
static void FASTCOVER_tryParameters(void* opaque)
|
||||
{
|
||||
/* Save parameters as local variables */
|
||||
FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t*)opaque;
|
||||
const FASTCOVER_ctx_t *const ctx = data->ctx;
|
||||
const ZDICT_cover_params_t parameters = data->parameters;
|
||||
size_t dictBufferCapacity = data->dictBufferCapacity;
|
||||
size_t totalCompressedSize = ERROR(GENERIC);
|
||||
/* Initialize array to keep track of frequency of dmer within activeSegment */
|
||||
U16* segmentFreqs = (U16*)calloc(((U64)1 << ctx->f), sizeof(U16));
|
||||
/* Allocate space for hash table, dict, and freqs */
|
||||
BYTE *const dict = (BYTE*)malloc(dictBufferCapacity);
|
||||
COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
|
||||
U32* freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));
|
||||
if (!segmentFreqs || !dict || !freqs) {
|
||||
DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
|
||||
goto _cleanup;
|
||||
}
|
||||
/* Copy the frequencies because we need to modify them */
|
||||
memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32));
|
||||
/* Build the dictionary */
|
||||
{ const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity,
|
||||
parameters, segmentFreqs);
|
||||
|
||||
const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
|
||||
selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
|
||||
ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
|
||||
totalCompressedSize);
|
||||
|
||||
if (COVER_dictSelectionIsError(selection)) {
|
||||
DISPLAYLEVEL(1, "Failed to select dictionary\n");
|
||||
goto _cleanup;
|
||||
}
|
||||
}
|
||||
_cleanup:
|
||||
free(dict);
|
||||
COVER_best_finish(data->best, parameters, selection);
|
||||
free(data);
|
||||
free(segmentFreqs);
|
||||
COVER_dictSelectionFree(selection);
|
||||
free(freqs);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams,
|
||||
ZDICT_cover_params_t* coverParams)
|
||||
{
|
||||
coverParams->k = fastCoverParams.k;
|
||||
coverParams->d = fastCoverParams.d;
|
||||
coverParams->steps = fastCoverParams.steps;
|
||||
coverParams->nbThreads = fastCoverParams.nbThreads;
|
||||
coverParams->splitPoint = fastCoverParams.splitPoint;
|
||||
coverParams->zParams = fastCoverParams.zParams;
|
||||
coverParams->shrinkDict = fastCoverParams.shrinkDict;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams,
|
||||
ZDICT_fastCover_params_t* fastCoverParams,
|
||||
unsigned f, unsigned accel)
|
||||
{
|
||||
fastCoverParams->k = coverParams.k;
|
||||
fastCoverParams->d = coverParams.d;
|
||||
fastCoverParams->steps = coverParams.steps;
|
||||
fastCoverParams->nbThreads = coverParams.nbThreads;
|
||||
fastCoverParams->splitPoint = coverParams.splitPoint;
|
||||
fastCoverParams->f = f;
|
||||
fastCoverParams->accel = accel;
|
||||
fastCoverParams->zParams = coverParams.zParams;
|
||||
fastCoverParams->shrinkDict = coverParams.shrinkDict;
|
||||
}
|
||||
|
||||
|
||||
ZDICTLIB_API size_t
|
||||
ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer,
|
||||
const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_fastCover_params_t parameters)
|
||||
{
|
||||
BYTE* const dict = (BYTE*)dictBuffer;
|
||||
FASTCOVER_ctx_t ctx;
|
||||
ZDICT_cover_params_t coverParams;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
/* Initialize global data */
|
||||
g_displayLevel = (int)parameters.zParams.notificationLevel;
|
||||
/* Assign splitPoint and f if not provided */
|
||||
parameters.splitPoint = 1.0;
|
||||
parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f;
|
||||
parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel;
|
||||
/* Convert to cover parameter */
|
||||
memset(&coverParams, 0 , sizeof(coverParams));
|
||||
FASTCOVER_convertToCoverParams(parameters, &coverParams);
|
||||
/* Checks */
|
||||
if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f,
|
||||
parameters.accel)) {
|
||||
DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
|
||||
return ERROR(parameter_outOfBound);
|
||||
}
|
||||
if (nbSamples == 0) {
|
||||
DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n");
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
|
||||
DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
|
||||
ZDICT_DICTSIZE_MIN);
|
||||
return ERROR(dstSize_tooSmall);
|
||||
}
|
||||
/* Assign corresponding FASTCOVER_accel_t to accelParams*/
|
||||
accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
|
||||
/* Initialize context */
|
||||
{
|
||||
size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
|
||||
coverParams.d, parameters.splitPoint, parameters.f,
|
||||
accelParams);
|
||||
if (ZSTD_isError(initVal)) {
|
||||
DISPLAYLEVEL(1, "Failed to initialize context\n");
|
||||
return initVal;
|
||||
}
|
||||
}
|
||||
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
|
||||
/* Build the dictionary */
|
||||
DISPLAYLEVEL(2, "Building dictionary\n");
|
||||
{
|
||||
/* Initialize array to keep track of frequency of dmer within activeSegment */
|
||||
U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16));
|
||||
const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer,
|
||||
dictBufferCapacity, coverParams, segmentFreqs);
|
||||
const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
|
||||
const size_t dictionarySize = ZDICT_finalizeDictionary(
|
||||
dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
|
||||
samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);
|
||||
if (!ZSTD_isError(dictionarySize)) {
|
||||
DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
|
||||
(unsigned)dictionarySize);
|
||||
}
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
free(segmentFreqs);
|
||||
return dictionarySize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ZDICTLIB_API size_t
|
||||
ZDICT_optimizeTrainFromBuffer_fastCover(
|
||||
void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer,
|
||||
const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_fastCover_params_t* parameters)
|
||||
{
|
||||
ZDICT_cover_params_t coverParams;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
/* constants */
|
||||
const unsigned nbThreads = parameters->nbThreads;
|
||||
const double splitPoint =
|
||||
parameters->splitPoint <= 0.0 ? FASTCOVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
|
||||
const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
|
||||
const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
|
||||
const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
|
||||
const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
|
||||
const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
|
||||
const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
|
||||
const unsigned kIterations =
|
||||
(1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
|
||||
const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f;
|
||||
const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;
|
||||
const unsigned shrinkDict = 0;
|
||||
/* Local variables */
|
||||
const int displayLevel = (int)parameters->zParams.notificationLevel;
|
||||
unsigned iteration = 1;
|
||||
unsigned d;
|
||||
unsigned k;
|
||||
COVER_best_t best;
|
||||
POOL_ctx *pool = NULL;
|
||||
int warned = 0;
|
||||
/* Checks */
|
||||
if (splitPoint <= 0 || splitPoint > 1) {
|
||||
LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n");
|
||||
return ERROR(parameter_outOfBound);
|
||||
}
|
||||
if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) {
|
||||
LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n");
|
||||
return ERROR(parameter_outOfBound);
|
||||
}
|
||||
if (kMinK < kMaxD || kMaxK < kMinK) {
|
||||
LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n");
|
||||
return ERROR(parameter_outOfBound);
|
||||
}
|
||||
if (nbSamples == 0) {
|
||||
LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n");
|
||||
return ERROR(srcSize_wrong);
|
||||
}
|
||||
if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
|
||||
LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n",
|
||||
ZDICT_DICTSIZE_MIN);
|
||||
return ERROR(dstSize_tooSmall);
|
||||
}
|
||||
if (nbThreads > 1) {
|
||||
pool = POOL_create(nbThreads, 1);
|
||||
if (!pool) {
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
}
|
||||
/* Initialization */
|
||||
COVER_best_init(&best);
|
||||
memset(&coverParams, 0 , sizeof(coverParams));
|
||||
FASTCOVER_convertToCoverParams(*parameters, &coverParams);
|
||||
accelParams = FASTCOVER_defaultAccelParameters[accel];
|
||||
/* Turn down global display level to clean up display at level 2 and below */
|
||||
g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
|
||||
/* Loop through d first because each new value needs a new context */
|
||||
LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
|
||||
kIterations);
|
||||
for (d = kMinD; d <= kMaxD; d += 2) {
|
||||
/* Initialize the context for this value of d */
|
||||
FASTCOVER_ctx_t ctx;
|
||||
LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
|
||||
{
|
||||
size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams);
|
||||
if (ZSTD_isError(initVal)) {
|
||||
LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return initVal;
|
||||
}
|
||||
}
|
||||
if (!warned) {
|
||||
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
|
||||
warned = 1;
|
||||
}
|
||||
/* Loop through k reusing the same context */
|
||||
for (k = kMinK; k <= kMaxK; k += kStepSize) {
|
||||
/* Prepare the arguments */
|
||||
FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc(
|
||||
sizeof(FASTCOVER_tryParameters_data_t));
|
||||
LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
|
||||
if (!data) {
|
||||
LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
|
||||
COVER_best_destroy(&best);
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
POOL_free(pool);
|
||||
return ERROR(memory_allocation);
|
||||
}
|
||||
data->ctx = &ctx;
|
||||
data->best = &best;
|
||||
data->dictBufferCapacity = dictBufferCapacity;
|
||||
data->parameters = coverParams;
|
||||
data->parameters.k = k;
|
||||
data->parameters.d = d;
|
||||
data->parameters.splitPoint = splitPoint;
|
||||
data->parameters.steps = kSteps;
|
||||
data->parameters.shrinkDict = shrinkDict;
|
||||
data->parameters.zParams.notificationLevel = (unsigned)g_displayLevel;
|
||||
/* Check the parameters */
|
||||
if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,
|
||||
data->ctx->f, accel)) {
|
||||
DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
|
||||
free(data);
|
||||
continue;
|
||||
}
|
||||
/* Call the function and pass ownership of data to it */
|
||||
COVER_best_start(&best);
|
||||
if (pool) {
|
||||
POOL_add(pool, &FASTCOVER_tryParameters, data);
|
||||
} else {
|
||||
FASTCOVER_tryParameters(data);
|
||||
}
|
||||
/* Print status */
|
||||
LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
|
||||
(unsigned)((iteration * 100) / kIterations));
|
||||
++iteration;
|
||||
}
|
||||
COVER_best_wait(&best);
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
}
|
||||
LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
|
||||
/* Fill the output buffer and parameters with output of the best parameters */
|
||||
{
|
||||
const size_t dictSize = best.dictSize;
|
||||
if (ZSTD_isError(best.compressedSize)) {
|
||||
const size_t compressedSize = best.compressedSize;
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return compressedSize;
|
||||
}
|
||||
FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
|
||||
memcpy(dictBuffer, best.dict, dictSize);
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return dictSize;
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,474 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef ZSTD_ZDICT_H
|
||||
#define ZSTD_ZDICT_H
|
||||
|
||||
/*====== Dependencies ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ===== ZDICTLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZDICTLIB_VISIBLE
|
||||
/* Backwards compatibility with old macro name */
|
||||
# ifdef ZDICTLIB_VISIBILITY
|
||||
# define ZDICTLIB_VISIBLE ZDICTLIB_VISIBILITY
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZDICTLIB_VISIBLE __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define ZDICTLIB_VISIBLE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef ZDICTLIB_HIDDEN
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZDICTLIB_HIDDEN __attribute__ ((visibility ("hidden")))
|
||||
# else
|
||||
# define ZDICTLIB_HIDDEN
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBLE
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZDICTLIB_API ZDICTLIB_VISIBLE
|
||||
#endif
|
||||
|
||||
/*******************************************************************************
|
||||
* Zstd dictionary builder
|
||||
*
|
||||
* FAQ
|
||||
* ===
|
||||
* Why should I use a dictionary?
|
||||
* ------------------------------
|
||||
*
|
||||
* Zstd can use dictionaries to improve compression ratio of small data.
|
||||
* Traditionally small files don't compress well because there is very little
|
||||
* repetition in a single sample, since it is small. But, if you are compressing
|
||||
* many similar files, like a bunch of JSON records that share the same
|
||||
* structure, you can train a dictionary on ahead of time on some samples of
|
||||
* these files. Then, zstd can use the dictionary to find repetitions that are
|
||||
* present across samples. This can vastly improve compression ratio.
|
||||
*
|
||||
* When is a dictionary useful?
|
||||
* ----------------------------
|
||||
*
|
||||
* Dictionaries are useful when compressing many small files that are similar.
|
||||
* The larger a file is, the less benefit a dictionary will have. Generally,
|
||||
* we don't expect dictionary compression to be effective past 100KB. And the
|
||||
* smaller a file is, the more we would expect the dictionary to help.
|
||||
*
|
||||
* How do I use a dictionary?
|
||||
* --------------------------
|
||||
*
|
||||
* Simply pass the dictionary to the zstd compressor with
|
||||
* `ZSTD_CCtx_loadDictionary()`. The same dictionary must then be passed to
|
||||
* the decompressor, using `ZSTD_DCtx_loadDictionary()`. There are other
|
||||
* more advanced functions that allow selecting some options, see zstd.h for
|
||||
* complete documentation.
|
||||
*
|
||||
* What is a zstd dictionary?
|
||||
* --------------------------
|
||||
*
|
||||
* A zstd dictionary has two pieces: Its header, and its content. The header
|
||||
* contains a magic number, the dictionary ID, and entropy tables. These
|
||||
* entropy tables allow zstd to save on header costs in the compressed file,
|
||||
* which really matters for small data. The content is just bytes, which are
|
||||
* repeated content that is common across many samples.
|
||||
*
|
||||
* What is a raw content dictionary?
|
||||
* ---------------------------------
|
||||
*
|
||||
* A raw content dictionary is just bytes. It doesn't have a zstd dictionary
|
||||
* header, a dictionary ID, or entropy tables. Any buffer is a valid raw
|
||||
* content dictionary.
|
||||
*
|
||||
* How do I train a dictionary?
|
||||
* ----------------------------
|
||||
*
|
||||
* Gather samples from your use case. These samples should be similar to each
|
||||
* other. If you have several use cases, you could try to train one dictionary
|
||||
* per use case.
|
||||
*
|
||||
* Pass those samples to `ZDICT_trainFromBuffer()` and that will train your
|
||||
* dictionary. There are a few advanced versions of this function, but this
|
||||
* is a great starting point. If you want to further tune your dictionary
|
||||
* you could try `ZDICT_optimizeTrainFromBuffer_cover()`. If that is too slow
|
||||
* you can try `ZDICT_optimizeTrainFromBuffer_fastCover()`.
|
||||
*
|
||||
* If the dictionary training function fails, that is likely because you
|
||||
* either passed too few samples, or a dictionary would not be effective
|
||||
* for your data. Look at the messages that the dictionary trainer printed,
|
||||
* if it doesn't say too few samples, then a dictionary would not be effective.
|
||||
*
|
||||
* How large should my dictionary be?
|
||||
* ----------------------------------
|
||||
*
|
||||
* A reasonable dictionary size, the `dictBufferCapacity`, is about 100KB.
|
||||
* The zstd CLI defaults to a 110KB dictionary. You likely don't need a
|
||||
* dictionary larger than that. But, most use cases can get away with a
|
||||
* smaller dictionary. The advanced dictionary builders can automatically
|
||||
* shrink the dictionary for you, and select the smallest size that doesn't
|
||||
* hurt compression ratio too much. See the `shrinkDict` parameter.
|
||||
* A smaller dictionary can save memory, and potentially speed up
|
||||
* compression.
|
||||
*
|
||||
* How many samples should I provide to the dictionary builder?
|
||||
* ------------------------------------------------------------
|
||||
*
|
||||
* We generally recommend passing ~100x the size of the dictionary
|
||||
* in samples. A few thousand should suffice. Having too few samples
|
||||
* can hurt the dictionaries effectiveness. Having more samples will
|
||||
* only improve the dictionaries effectiveness. But having too many
|
||||
* samples can slow down the dictionary builder.
|
||||
*
|
||||
* How do I determine if a dictionary will be effective?
|
||||
* -----------------------------------------------------
|
||||
*
|
||||
* Simply train a dictionary and try it out. You can use zstd's built in
|
||||
* benchmarking tool to test the dictionary effectiveness.
|
||||
*
|
||||
* # Benchmark levels 1-3 without a dictionary
|
||||
* zstd -b1e3 -r /path/to/my/files
|
||||
* # Benchmark levels 1-3 with a dictionary
|
||||
* zstd -b1e3 -r /path/to/my/files -D /path/to/my/dictionary
|
||||
*
|
||||
* When should I retrain a dictionary?
|
||||
* -----------------------------------
|
||||
*
|
||||
* You should retrain a dictionary when its effectiveness drops. Dictionary
|
||||
* effectiveness drops as the data you are compressing changes. Generally, we do
|
||||
* expect dictionaries to "decay" over time, as your data changes, but the rate
|
||||
* at which they decay depends on your use case. Internally, we regularly
|
||||
* retrain dictionaries, and if the new dictionary performs significantly
|
||||
* better than the old dictionary, we will ship the new dictionary.
|
||||
*
|
||||
* I have a raw content dictionary, how do I turn it into a zstd dictionary?
|
||||
* -------------------------------------------------------------------------
|
||||
*
|
||||
* If you have a raw content dictionary, e.g. by manually constructing it, or
|
||||
* using a third-party dictionary builder, you can turn it into a zstd
|
||||
* dictionary by using `ZDICT_finalizeDictionary()`. You'll also have to
|
||||
* provide some samples of the data. It will add the zstd header to the
|
||||
* raw content, which contains a dictionary ID and entropy tables, which
|
||||
* will improve compression ratio, and allow zstd to write the dictionary ID
|
||||
* into the frame, if you so choose.
|
||||
*
|
||||
* Do I have to use zstd's dictionary builder?
|
||||
* -------------------------------------------
|
||||
*
|
||||
* No! You can construct dictionary content however you please, it is just
|
||||
* bytes. It will always be valid as a raw content dictionary. If you want
|
||||
* a zstd dictionary, which can improve compression ratio, use
|
||||
* `ZDICT_finalizeDictionary()`.
|
||||
*
|
||||
* What is the attack surface of a zstd dictionary?
|
||||
* ------------------------------------------------
|
||||
*
|
||||
* Zstd is heavily fuzz tested, including loading fuzzed dictionaries, so
|
||||
* zstd should never crash, or access out-of-bounds memory no matter what
|
||||
* the dictionary is. However, if an attacker can control the dictionary
|
||||
* during decompression, they can cause zstd to generate arbitrary bytes,
|
||||
* just like if they controlled the compressed data.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
|
||||
/*! ZDICT_trainFromBuffer():
|
||||
* Train a dictionary from an array of samples.
|
||||
* Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,
|
||||
* f=20, and accel=1.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* Note: Dictionary training will fail if there are not enough samples to construct a
|
||||
* dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).
|
||||
* If dictionary training fails, you should use zstd without a dictionary, as the dictionary
|
||||
* would've been ineffective anyways. If you believe your samples would benefit from a dictionary
|
||||
* please open an issue with details, and we can look into it.
|
||||
* Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer,
|
||||
const size_t* samplesSizes, unsigned nbSamples);
|
||||
|
||||
typedef struct {
|
||||
int compressionLevel; /**< optimize for a specific zstd compression level; 0 means default */
|
||||
unsigned notificationLevel; /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
|
||||
unsigned dictID; /**< force dictID value; 0 means auto mode (32-bits random value)
|
||||
* NOTE: The zstd format reserves some dictionary IDs for future use.
|
||||
* You may use them in private settings, but be warned that they
|
||||
* may be used by zstd in a public dictionary registry in the future.
|
||||
* These dictionary IDs are:
|
||||
* - low range : <= 32767
|
||||
* - high range : >= (2^31)
|
||||
*/
|
||||
} ZDICT_params_t;
|
||||
|
||||
/*! ZDICT_finalizeDictionary():
|
||||
* Given a custom content as a basis for dictionary, and a set of samples,
|
||||
* finalize dictionary by adding headers and statistics according to the zstd
|
||||
* dictionary format.
|
||||
*
|
||||
* Samples must be stored concatenated in a flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each
|
||||
* sample in order. The samples are used to construct the statistics, so they
|
||||
* should be representative of what you will compress with this dictionary.
|
||||
*
|
||||
* The compression level can be set in `parameters`. You should pass the
|
||||
* compression level you expect to use in production. The statistics for each
|
||||
* compression level differ, so tuning the dictionary for the compression level
|
||||
* can help quite a bit.
|
||||
*
|
||||
* You can set an explicit dictionary ID in `parameters`, or allow us to pick
|
||||
* a random dictionary ID for you, but we can't guarantee no collisions.
|
||||
*
|
||||
* The dstDictBuffer and the dictContent may overlap, and the content will be
|
||||
* appended to the end of the header. If the header + the content doesn't fit in
|
||||
* maxDictSize the beginning of the content is truncated to make room, since it
|
||||
* is presumed that the most profitable content is at the end of the dictionary,
|
||||
* since that is the cheapest to reference.
|
||||
*
|
||||
* `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN).
|
||||
*
|
||||
* @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),
|
||||
* or an error code, which can be tested by ZDICT_isError().
|
||||
* Note: ZDICT_finalizeDictionary() will push notifications into stderr if
|
||||
* instructed to, using notificationLevel>0.
|
||||
* NOTE: This function currently may fail in several edge cases including:
|
||||
* * Not enough samples
|
||||
* * Samples are uncompressible
|
||||
* * Samples are all exactly the same
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dstDictBuffer, size_t maxDictSize,
|
||||
const void* dictContent, size_t dictContentSize,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_params_t parameters);
|
||||
|
||||
|
||||
/*====== Helper functions ======*/
|
||||
ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */
|
||||
ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize); /* returns dict header size; returns a ZSTD error code on failure */
|
||||
ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
|
||||
ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
|
||||
|
||||
#endif /* ZSTD_ZDICT_H */
|
||||
|
||||
#if defined(ZDICT_STATIC_LINKING_ONLY) && !defined(ZSTD_ZDICT_H_STATIC)
|
||||
#define ZSTD_ZDICT_H_STATIC
|
||||
|
||||
/* This can be overridden externally to hide static symbols. */
|
||||
#ifndef ZDICTLIB_STATIC_API
|
||||
# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZDICTLIB_STATIC_API __declspec(dllexport) ZDICTLIB_VISIBLE
|
||||
# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZDICTLIB_STATIC_API __declspec(dllimport) ZDICTLIB_VISIBLE
|
||||
# else
|
||||
# define ZDICTLIB_STATIC_API ZDICTLIB_VISIBLE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* ====================================================================================
|
||||
* The definitions in this section are considered experimental.
|
||||
* They should never be used with a dynamic library, as they may change in the future.
|
||||
* They are provided for advanced usages.
|
||||
* Use them only in association with static linking.
|
||||
* ==================================================================================== */
|
||||
|
||||
#define ZDICT_DICTSIZE_MIN 256
|
||||
/* Deprecated: Remove in v1.6.0 */
|
||||
#define ZDICT_CONTENTSIZE_MIN 128
|
||||
|
||||
/*! ZDICT_cover_params_t:
|
||||
* k and d are the only required parameters.
|
||||
* For others, value 0 means default.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
|
||||
unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
|
||||
unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
|
||||
unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
|
||||
double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */
|
||||
unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */
|
||||
unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
|
||||
ZDICT_params_t zParams;
|
||||
} ZDICT_cover_params_t;
|
||||
|
||||
typedef struct {
|
||||
unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
|
||||
unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
|
||||
unsigned f; /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/
|
||||
unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
|
||||
unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
|
||||
double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */
|
||||
unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */
|
||||
unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */
|
||||
unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
|
||||
|
||||
ZDICT_params_t zParams;
|
||||
} ZDICT_fastCover_params_t;
|
||||
|
||||
/*! ZDICT_trainFromBuffer_cover():
|
||||
* Train a dictionary from an array of samples using the COVER algorithm.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover(
|
||||
void *dictBuffer, size_t dictBufferCapacity,
|
||||
const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
|
||||
ZDICT_cover_params_t parameters);
|
||||
|
||||
/*! ZDICT_optimizeTrainFromBuffer_cover():
|
||||
* The same requirements as above hold for all the parameters except `parameters`.
|
||||
* This function tries many parameter combinations and picks the best parameters.
|
||||
* `*parameters` is filled with the best parameters found,
|
||||
* dictionary constructed with those parameters is stored in `dictBuffer`.
|
||||
*
|
||||
* All of the parameters d, k, steps are optional.
|
||||
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
|
||||
* if steps is zero it defaults to its default value.
|
||||
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* On success `*parameters` contains the parameters selected.
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
|
||||
*/
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover(
|
||||
void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_cover_params_t* parameters);
|
||||
|
||||
/*! ZDICT_trainFromBuffer_fastCover():
|
||||
* Train a dictionary from an array of samples using a modified version of COVER algorithm.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* d and k are required.
|
||||
* All other parameters are optional, will use default values if not provided
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,
|
||||
size_t dictBufferCapacity, const void *samplesBuffer,
|
||||
const size_t *samplesSizes, unsigned nbSamples,
|
||||
ZDICT_fastCover_params_t parameters);
|
||||
|
||||
/*! ZDICT_optimizeTrainFromBuffer_fastCover():
|
||||
* The same requirements as above hold for all the parameters except `parameters`.
|
||||
* This function tries many parameter combinations (specifically, k and d combinations)
|
||||
* and picks the best parameters. `*parameters` is filled with the best parameters found,
|
||||
* dictionary constructed with those parameters is stored in `dictBuffer`.
|
||||
* All of the parameters d, k, steps, f, and accel are optional.
|
||||
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
|
||||
* if steps is zero it defaults to its default value.
|
||||
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
|
||||
* If f is zero, default value of 20 is used.
|
||||
* If accel is zero, default value of 1 is used.
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* On success `*parameters` contains the parameters selected.
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
|
||||
*/
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
|
||||
size_t dictBufferCapacity, const void* samplesBuffer,
|
||||
const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_fastCover_params_t* parameters);
|
||||
|
||||
typedef struct {
|
||||
unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */
|
||||
ZDICT_params_t zParams;
|
||||
} ZDICT_legacy_params_t;
|
||||
|
||||
/*! ZDICT_trainFromBuffer_legacy():
|
||||
* Train a dictionary from an array of samples.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* `parameters` is optional and can be provided with values set to 0 to mean "default".
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
* Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
|
||||
*/
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_legacy(
|
||||
void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_legacy_params_t parameters);
|
||||
|
||||
|
||||
/* Deprecation warnings */
|
||||
/* It is generally possible to disable deprecation warnings from compiler,
|
||||
for example with -Wno-deprecated-declarations for gcc
|
||||
or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
|
||||
#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
|
||||
# define ZDICT_DEPRECATED(message) /* disable deprecation warnings */
|
||||
#else
|
||||
# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define ZDICT_DEPRECATED(message) [[deprecated(message)]]
|
||||
# elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405)
|
||||
# define ZDICT_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif (ZDICT_GCC_VERSION >= 301)
|
||||
# define ZDICT_DEPRECATED(message) __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define ZDICT_DEPRECATED(message) __declspec(deprecated(message))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
|
||||
# define ZDICT_DEPRECATED(message)
|
||||
# endif
|
||||
#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
|
||||
ZDICTLIB_STATIC_API
|
||||
size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
|
||||
|
||||
|
||||
#endif /* ZSTD_ZDICT_H_STATIC */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,114 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_ERRORS_H_398273423
|
||||
#define ZSTD_ERRORS_H_398273423
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*===== dependency =====*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZSTDERRORLIB_VISIBLE
|
||||
/* Backwards compatibility with old macro name */
|
||||
# ifdef ZSTDERRORLIB_VISIBILITY
|
||||
# define ZSTDERRORLIB_VISIBLE ZSTDERRORLIB_VISIBILITY
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZSTDERRORLIB_VISIBLE __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define ZSTDERRORLIB_VISIBLE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef ZSTDERRORLIB_HIDDEN
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden")))
|
||||
# else
|
||||
# define ZSTDERRORLIB_HIDDEN
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBLE
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE
|
||||
#endif
|
||||
|
||||
/*-*********************************************
|
||||
* Error codes list
|
||||
*-*********************************************
|
||||
* Error codes _values_ are pinned down since v1.3.1 only.
|
||||
* Therefore, don't rely on values if you may link to any version < v1.3.1.
|
||||
*
|
||||
* Only values < 100 are considered stable.
|
||||
*
|
||||
* note 1 : this API shall be used with static linking only.
|
||||
* dynamic linking is not yet officially supported.
|
||||
* note 2 : Prefer relying on the enum than on its value whenever possible
|
||||
* This is the only supported way to use the error list < v1.3.1
|
||||
* note 3 : ZSTD_isError() is always correct, whatever the library version.
|
||||
**********************************************/
|
||||
typedef enum {
|
||||
ZSTD_error_no_error = 0,
|
||||
ZSTD_error_GENERIC = 1,
|
||||
ZSTD_error_prefix_unknown = 10,
|
||||
ZSTD_error_version_unsupported = 12,
|
||||
ZSTD_error_frameParameter_unsupported = 14,
|
||||
ZSTD_error_frameParameter_windowTooLarge = 16,
|
||||
ZSTD_error_corruption_detected = 20,
|
||||
ZSTD_error_checksum_wrong = 22,
|
||||
ZSTD_error_literals_headerWrong = 24,
|
||||
ZSTD_error_dictionary_corrupted = 30,
|
||||
ZSTD_error_dictionary_wrong = 32,
|
||||
ZSTD_error_dictionaryCreation_failed = 34,
|
||||
ZSTD_error_parameter_unsupported = 40,
|
||||
ZSTD_error_parameter_combination_unsupported = 41,
|
||||
ZSTD_error_parameter_outOfBound = 42,
|
||||
ZSTD_error_tableLog_tooLarge = 44,
|
||||
ZSTD_error_maxSymbolValue_tooLarge = 46,
|
||||
ZSTD_error_maxSymbolValue_tooSmall = 48,
|
||||
ZSTD_error_stabilityCondition_notRespected = 50,
|
||||
ZSTD_error_stage_wrong = 60,
|
||||
ZSTD_error_init_missing = 62,
|
||||
ZSTD_error_memory_allocation = 64,
|
||||
ZSTD_error_workSpace_tooSmall= 66,
|
||||
ZSTD_error_dstSize_tooSmall = 70,
|
||||
ZSTD_error_srcSize_wrong = 72,
|
||||
ZSTD_error_dstBuffer_null = 74,
|
||||
ZSTD_error_noForwardProgress_destFull = 80,
|
||||
ZSTD_error_noForwardProgress_inputEmpty = 82,
|
||||
/* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
|
||||
ZSTD_error_frameIndex_tooLarge = 100,
|
||||
ZSTD_error_seekableIO = 102,
|
||||
ZSTD_error_dstBuffer_wrong = 104,
|
||||
ZSTD_error_srcBuffer_wrong = 105,
|
||||
ZSTD_error_sequenceProducer_failed = 106,
|
||||
ZSTD_error_externalSequences_invalid = 107,
|
||||
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
|
||||
} ZSTD_ErrorCode;
|
||||
|
||||
/*! ZSTD_getErrorCode() :
|
||||
convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
|
||||
which can be used to compare with enum list published above */
|
||||
ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
|
||||
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_ERRORS_H_398273423 */
|
Loading…
Reference in New Issue
Block a user