2020-12-12 20:06:59 +08:00
/**
2024-10-26 01:41:51 +08:00
* meshoptimizer - version 0.22
2020-12-12 20:06:59 +08:00
*
2024-10-26 01:41:51 +08:00
* Copyright ( C ) 2016 - 2024 , by Arseny Kapoulkine ( arseny . kapoulkine @ gmail . com )
2020-12-12 20:06:59 +08:00
* Report bugs and download new versions at https : //github.com/zeux/meshoptimizer
*
* This library is distributed under the MIT License . See notice at the end of this file .
*/
# pragma once
# include <assert.h>
# include <stddef.h>
/* Version macro; major * 1000 + minor * 10 + patch */
2024-10-26 01:41:51 +08:00
# define MESHOPTIMIZER_VERSION 220 /* 0.22 */
2020-12-12 20:06:59 +08:00
/* If no API is defined, assume default */
# ifndef MESHOPTIMIZER_API
# define MESHOPTIMIZER_API
# endif
2022-05-18 05:24:52 +08:00
/* Set the calling-convention for alloc/dealloc function pointers */
# ifndef MESHOPTIMIZER_ALLOC_CALLCONV
# ifdef _MSC_VER
# define MESHOPTIMIZER_ALLOC_CALLCONV __cdecl
# else
# define MESHOPTIMIZER_ALLOC_CALLCONV
# endif
# endif
2020-12-12 20:06:59 +08:00
/* Experimental APIs have unstable interface and might have implementation that's not fully tested or optimized */
2024-10-26 01:41:51 +08:00
# ifndef MESHOPTIMIZER_EXPERIMENTAL
2020-12-12 20:06:59 +08:00
# define MESHOPTIMIZER_EXPERIMENTAL MESHOPTIMIZER_API
2024-10-26 01:41:51 +08:00
# endif
2020-12-12 20:06:59 +08:00
/* C interface */
# ifdef __cplusplus
2024-10-26 01:41:51 +08:00
extern " C "
{
2020-12-12 20:06:59 +08:00
# endif
/**
2022-12-22 23:22:33 +08:00
* Vertex attribute stream
* Each element takes size bytes , beginning at data , with stride controlling the spacing between successive elements ( stride > = size ) .
2020-12-12 20:06:59 +08:00
*/
struct meshopt_Stream
{
const void * data ;
size_t size ;
size_t stride ;
} ;
/**
* Generates a vertex remap table from the vertex buffer and an optional index buffer and returns number of unique vertices
* As a result , all vertices that are binary equivalent map to the same ( new ) location , with no gaps in the resulting sequence .
* Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer / meshopt_remapIndexBuffer .
* Note that binary equivalence considers all vertex_size bytes , including padding which should be zero - initialized .
*
* destination must contain enough space for the resulting remap table ( vertex_count elements )
* indices can be NULL if the input is unindexed
*/
MESHOPTIMIZER_API size_t meshopt_generateVertexRemap ( unsigned int * destination , const unsigned int * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size ) ;
/**
* Generates a vertex remap table from multiple vertex streams and an optional index buffer and returns number of unique vertices
* As a result , all vertices that are binary equivalent map to the same ( new ) location , with no gaps in the resulting sequence .
* Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer / meshopt_remapIndexBuffer .
* To remap vertex buffers , you will need to call meshopt_remapVertexBuffer for each vertex stream .
* Note that binary equivalence considers all size bytes in each stream , including padding which should be zero - initialized .
*
* destination must contain enough space for the resulting remap table ( vertex_count elements )
* indices can be NULL if the input is unindexed
2023-11-03 05:03:02 +08:00
* stream_count must be < = 16
2020-12-12 20:06:59 +08:00
*/
MESHOPTIMIZER_API size_t meshopt_generateVertexRemapMulti ( unsigned int * destination , const unsigned int * indices , size_t index_count , size_t vertex_count , const struct meshopt_Stream * streams , size_t stream_count ) ;
/**
* Generates vertex buffer from the source vertex buffer and remap table generated by meshopt_generateVertexRemap
*
* destination must contain enough space for the resulting vertex buffer ( unique_vertex_count elements , returned by meshopt_generateVertexRemap )
* vertex_count should be the initial vertex count and not the value returned by meshopt_generateVertexRemap
*/
MESHOPTIMIZER_API void meshopt_remapVertexBuffer ( void * destination , const void * vertices , size_t vertex_count , size_t vertex_size , const unsigned int * remap ) ;
/**
* Generate index buffer from the source index buffer and remap table generated by meshopt_generateVertexRemap
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
* indices can be NULL if the input is unindexed
*/
MESHOPTIMIZER_API void meshopt_remapIndexBuffer ( unsigned int * destination , const unsigned int * indices , size_t index_count , const unsigned int * remap ) ;
/**
* Generate index buffer that can be used for more efficient rendering when only a subset of the vertex attributes is necessary
* All vertices that are binary equivalent ( wrt first vertex_size bytes ) map to the first vertex in the original vertex buffer .
* This makes it possible to use the index buffer for Z pre - pass or shadowmap rendering , while using the original index buffer for regular rendering .
* Note that binary equivalence considers all vertex_size bytes , including padding which should be zero - initialized .
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
*/
MESHOPTIMIZER_API void meshopt_generateShadowIndexBuffer ( unsigned int * destination , const unsigned int * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size , size_t vertex_stride ) ;
/**
* Generate index buffer that can be used for more efficient rendering when only a subset of the vertex attributes is necessary
* All vertices that are binary equivalent ( wrt specified streams ) map to the first vertex in the original vertex buffer .
* This makes it possible to use the index buffer for Z pre - pass or shadowmap rendering , while using the original index buffer for regular rendering .
* Note that binary equivalence considers all size bytes in each stream , including padding which should be zero - initialized .
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
2023-11-03 05:03:02 +08:00
* stream_count must be < = 16
2020-12-12 20:06:59 +08:00
*/
MESHOPTIMIZER_API void meshopt_generateShadowIndexBufferMulti ( unsigned int * destination , const unsigned int * indices , size_t index_count , size_t vertex_count , const struct meshopt_Stream * streams , size_t stream_count ) ;
2021-04-18 22:15:43 +08:00
/**
* Generate index buffer that can be used as a geometry shader input with triangle adjacency topology
* Each triangle is converted into a 6 - vertex patch with the following layout :
* - 0 , 2 , 4 : original triangle vertices
* - 1 , 3 , 5 : vertices adjacent to edges 02 , 24 and 40
* The resulting patch can be rendered with geometry shaders using e . g . VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY .
* This can be used to implement algorithms like silhouette detection / expansion and other forms of GS - driven rendering .
*
* destination must contain enough space for the resulting index buffer ( index_count * 2 elements )
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2021-04-18 22:15:43 +08:00
*/
2022-05-18 05:24:52 +08:00
MESHOPTIMIZER_API void meshopt_generateAdjacencyIndexBuffer ( unsigned int * destination , const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
2021-04-18 22:15:43 +08:00
/**
* Generate index buffer that can be used for PN - AEN tessellation with crack - free displacement
* Each triangle is converted into a 12 - vertex patch with the following layout :
* - 0 , 1 , 2 : original triangle vertices
* - 3 , 4 : opposing edge for edge 0 , 1
* - 5 , 6 : opposing edge for edge 1 , 2
* - 7 , 8 : opposing edge for edge 2 , 0
* - 9 , 10 , 11 : dominant vertices for corners 0 , 1 , 2
* The resulting patch can be rendered with hardware tessellation using PN - AEN and displacement mapping .
* See " Tessellation on Any Budget " ( John McDonald , GDC 2011 ) for implementation details .
*
* destination must contain enough space for the resulting index buffer ( index_count * 4 elements )
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2021-04-18 22:15:43 +08:00
*/
2022-05-18 05:24:52 +08:00
MESHOPTIMIZER_API void meshopt_generateTessellationIndexBuffer ( unsigned int * destination , const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
2021-04-18 22:15:43 +08:00
2024-10-26 01:41:51 +08:00
/**
* Experimental : Generate index buffer that can be used for visibility buffer rendering and returns the size of the reorder table
* Each triangle ' s provoking vertex index is equal to primitive id ; this allows passing it to the fragment shader using nointerpolate attribute .
* This is important for performance on hardware where primitive id can ' t be accessed efficiently in fragment shader .
* The reorder table stores the original vertex id for each vertex in the new index buffer , and should be used in the vertex shader to load vertex data .
* The provoking vertex is assumed to be the first vertex in the triangle ; if this is not the case ( OpenGL ) , rotate each triangle ( abc - > bca ) before rendering .
* For maximum efficiency the input index buffer should be optimized for vertex cache first .
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
* reorder must contain enough space for the worst case reorder table ( vertex_count + index_count / 3 elements )
*/
MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_generateProvokingIndexBuffer ( unsigned int * destination , unsigned int * reorder , const unsigned int * indices , size_t index_count , size_t vertex_count ) ;
2020-12-12 20:06:59 +08:00
/**
* Vertex transform cache optimizer
* Reorders indices to reduce the number of GPU vertex shader invocations
* If index buffer contains multiple ranges for multiple draw calls , this functions needs to be called on each range individually .
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
*/
MESHOPTIMIZER_API void meshopt_optimizeVertexCache ( unsigned int * destination , const unsigned int * indices , size_t index_count , size_t vertex_count ) ;
/**
* Vertex transform cache optimizer for strip - like caches
* Produces inferior results to meshopt_optimizeVertexCache from the GPU vertex cache perspective
* However , the resulting index order is more optimal if the goal is to reduce the triangle strip length or improve compression efficiency
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
*/
MESHOPTIMIZER_API void meshopt_optimizeVertexCacheStrip ( unsigned int * destination , const unsigned int * indices , size_t index_count , size_t vertex_count ) ;
/**
* Vertex transform cache optimizer for FIFO caches
* Reorders indices to reduce the number of GPU vertex shader invocations
* Generally takes ~ 3 x less time to optimize meshes but produces inferior results compared to meshopt_optimizeVertexCache
* If index buffer contains multiple ranges for multiple draw calls , this functions needs to be called on each range individually .
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
* cache_size should be less than the actual GPU cache size to avoid cache thrashing
*/
MESHOPTIMIZER_API void meshopt_optimizeVertexCacheFifo ( unsigned int * destination , const unsigned int * indices , size_t index_count , size_t vertex_count , unsigned int cache_size ) ;
/**
* Overdraw optimizer
* Reorders indices to reduce the number of GPU vertex shader invocations and the pixel overdraw
* If index buffer contains multiple ranges for multiple draw calls , this functions needs to be called on each range individually .
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
* indices must contain index data that is the result of meshopt_optimizeVertexCache ( * not * the original mesh indices ! )
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2020-12-12 20:06:59 +08:00
* threshold indicates how much the overdraw optimizer can degrade vertex cache efficiency ( 1.05 = up to 5 % ) to reduce overdraw more efficiently
*/
MESHOPTIMIZER_API void meshopt_optimizeOverdraw ( unsigned int * destination , const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , float threshold ) ;
/**
* Vertex fetch cache optimizer
* Reorders vertices and changes indices to reduce the amount of GPU memory fetches during vertex processing
* Returns the number of unique vertices , which is the same as input vertex count unless some vertices are unused
* This functions works for a single vertex stream ; for multiple vertex streams , use meshopt_optimizeVertexFetchRemap + meshopt_remapVertexBuffer for each stream .
*
* destination must contain enough space for the resulting vertex buffer ( vertex_count elements )
* indices is used both as an input and as an output index buffer
*/
MESHOPTIMIZER_API size_t meshopt_optimizeVertexFetch ( void * destination , unsigned int * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size ) ;
/**
* Vertex fetch cache optimizer
* Generates vertex remap to reduce the amount of GPU memory fetches during vertex processing
* Returns the number of unique vertices , which is the same as input vertex count unless some vertices are unused
* The resulting remap table should be used to reorder vertex / index buffers using meshopt_remapVertexBuffer / meshopt_remapIndexBuffer
*
* destination must contain enough space for the resulting remap table ( vertex_count elements )
*/
MESHOPTIMIZER_API size_t meshopt_optimizeVertexFetchRemap ( unsigned int * destination , const unsigned int * indices , size_t index_count , size_t vertex_count ) ;
/**
* Index buffer encoder
* Encodes index data into an array of bytes that is generally much smaller ( < 1.5 bytes / triangle ) and compresses better ( < 1 bytes / triangle ) compared to original .
* Input index buffer must represent a triangle list .
* Returns encoded data size on success , 0 on error ; the only error condition is if buffer doesn ' t have enough space
* For maximum efficiency the index buffer being encoded has to be optimized for vertex cache and vertex fetch first .
*
* buffer must contain enough space for the encoded index buffer ( use meshopt_encodeIndexBufferBound to compute worst case size )
*/
MESHOPTIMIZER_API size_t meshopt_encodeIndexBuffer ( unsigned char * buffer , size_t buffer_size , const unsigned int * indices , size_t index_count ) ;
MESHOPTIMIZER_API size_t meshopt_encodeIndexBufferBound ( size_t index_count , size_t vertex_count ) ;
/**
2022-05-18 05:24:52 +08:00
* Set index encoder format version
2020-12-12 20:06:59 +08:00
* version must specify the data format version to encode ; valid values are 0 ( decodable by all library versions ) and 1 ( decodable by 0.14 + )
*/
2022-05-18 05:24:52 +08:00
MESHOPTIMIZER_API void meshopt_encodeIndexVersion ( int version ) ;
2020-12-12 20:06:59 +08:00
/**
* Index buffer decoder
* Decodes index data from an array of bytes generated by meshopt_encodeIndexBuffer
* Returns 0 if decoding was successful , and an error code otherwise
* The decoder is safe to use for untrusted input , but it may produce garbage data ( e . g . out of range indices ) .
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
*/
MESHOPTIMIZER_API int meshopt_decodeIndexBuffer ( void * destination , size_t index_count , size_t index_size , const unsigned char * buffer , size_t buffer_size ) ;
/**
2022-05-18 05:24:52 +08:00
* Index sequence encoder
2020-12-12 20:06:59 +08:00
* Encodes index sequence into an array of bytes that is generally smaller and compresses better compared to original .
* Input index sequence can represent arbitrary topology ; for triangle lists meshopt_encodeIndexBuffer is likely to be better .
* Returns encoded data size on success , 0 on error ; the only error condition is if buffer doesn ' t have enough space
*
* buffer must contain enough space for the encoded index sequence ( use meshopt_encodeIndexSequenceBound to compute worst case size )
*/
2022-05-18 05:24:52 +08:00
MESHOPTIMIZER_API size_t meshopt_encodeIndexSequence ( unsigned char * buffer , size_t buffer_size , const unsigned int * indices , size_t index_count ) ;
MESHOPTIMIZER_API size_t meshopt_encodeIndexSequenceBound ( size_t index_count , size_t vertex_count ) ;
2020-12-12 20:06:59 +08:00
/**
* Index sequence decoder
* Decodes index data from an array of bytes generated by meshopt_encodeIndexSequence
* Returns 0 if decoding was successful , and an error code otherwise
* The decoder is safe to use for untrusted input , but it may produce garbage data ( e . g . out of range indices ) .
*
* destination must contain enough space for the resulting index sequence ( index_count elements )
*/
2022-05-18 05:24:52 +08:00
MESHOPTIMIZER_API int meshopt_decodeIndexSequence ( void * destination , size_t index_count , size_t index_size , const unsigned char * buffer , size_t buffer_size ) ;
2020-12-12 20:06:59 +08:00
/**
* Vertex buffer encoder
* Encodes vertex data into an array of bytes that is generally smaller and compresses better compared to original .
* Returns encoded data size on success , 0 on error ; the only error condition is if buffer doesn ' t have enough space
* This function works for a single vertex stream ; for multiple vertex streams , call meshopt_encodeVertexBuffer for each stream .
* Note that all vertex_size bytes of each vertex are encoded verbatim , including padding which should be zero - initialized .
2024-10-26 01:41:51 +08:00
* For maximum efficiency the vertex buffer being encoded has to be quantized and optimized for locality of reference ( cache / fetch ) first .
2020-12-12 20:06:59 +08:00
*
* buffer must contain enough space for the encoded vertex buffer ( use meshopt_encodeVertexBufferBound to compute worst case size )
*/
MESHOPTIMIZER_API size_t meshopt_encodeVertexBuffer ( unsigned char * buffer , size_t buffer_size , const void * vertices , size_t vertex_count , size_t vertex_size ) ;
MESHOPTIMIZER_API size_t meshopt_encodeVertexBufferBound ( size_t vertex_count , size_t vertex_size ) ;
/**
2022-05-18 05:24:52 +08:00
* Set vertex encoder format version
2020-12-12 20:06:59 +08:00
* version must specify the data format version to encode ; valid values are 0 ( decodable by all library versions )
*/
2022-05-18 05:24:52 +08:00
MESHOPTIMIZER_API void meshopt_encodeVertexVersion ( int version ) ;
2020-12-12 20:06:59 +08:00
/**
* Vertex buffer decoder
* Decodes vertex data from an array of bytes generated by meshopt_encodeVertexBuffer
* Returns 0 if decoding was successful , and an error code otherwise
* The decoder is safe to use for untrusted input , but it may produce garbage data .
*
* destination must contain enough space for the resulting vertex buffer ( vertex_count * vertex_size bytes )
*/
MESHOPTIMIZER_API int meshopt_decodeVertexBuffer ( void * destination , size_t vertex_count , size_t vertex_size , const unsigned char * buffer , size_t buffer_size ) ;
/**
* Vertex buffer filters
* These functions can be used to filter output of meshopt_decodeVertexBuffer in - place .
*
* meshopt_decodeFilterOct decodes octahedral encoding of a unit vector with K - bit ( K < = 16 ) signed X / Y as an input ; Z must store 1.0f .
* Each component is stored as an 8 - bit or 16 - bit normalized integer ; stride must be equal to 4 or 8. W is preserved as is .
*
* meshopt_decodeFilterQuat decodes 3 - component quaternion encoding with K - bit ( 4 < = K < = 16 ) component encoding and a 2 - bit component index indicating which component to reconstruct .
* Each component is stored as an 16 - bit integer ; stride must be equal to 8.
*
* meshopt_decodeFilterExp decodes exponential encoding of floating - point data with 8 - bit exponent and 24 - bit integer mantissa as 2 ^ E * M .
* Each 32 - bit component is decoded in isolation ; stride must be divisible by 4.
*/
2024-10-26 01:41:51 +08:00
MESHOPTIMIZER_API void meshopt_decodeFilterOct ( void * buffer , size_t count , size_t stride ) ;
MESHOPTIMIZER_API void meshopt_decodeFilterQuat ( void * buffer , size_t count , size_t stride ) ;
MESHOPTIMIZER_API void meshopt_decodeFilterExp ( void * buffer , size_t count , size_t stride ) ;
2021-11-19 21:02:23 +08:00
/**
* Vertex buffer filter encoders
* These functions can be used to encode data in a format that meshopt_decodeFilter can decode
2022-05-18 05:24:52 +08:00
*
2021-11-19 21:02:23 +08:00
* meshopt_encodeFilterOct encodes unit vectors with K - bit ( K < = 16 ) signed X / Y as an output .
* Each component is stored as an 8 - bit or 16 - bit normalized integer ; stride must be equal to 4 or 8. W is preserved as is .
* Input data must contain 4 floats for every vector ( count * 4 total ) .
2022-05-18 05:24:52 +08:00
*
2021-11-19 21:02:23 +08:00
* meshopt_encodeFilterQuat encodes unit quaternions with K - bit ( 4 < = K < = 16 ) component encoding .
* Each component is stored as an 16 - bit integer ; stride must be equal to 8.
* Input data must contain 4 floats for every quaternion ( count * 4 total ) .
2022-05-18 05:24:52 +08:00
*
2021-11-19 21:02:23 +08:00
* meshopt_encodeFilterExp encodes arbitrary ( finite ) floating - point data with 8 - bit exponent and K - bit integer mantissa ( 1 < = K < = 24 ) .
2023-11-03 05:03:02 +08:00
* Exponent can be shared between all components of a given vector as defined by stride or all values of a given component ; stride must be divisible by 4.
2021-11-19 21:02:23 +08:00
* Input data must contain stride / 4 floats for every vector ( count * stride / 4 total ) .
*/
2023-11-03 05:03:02 +08:00
enum meshopt_EncodeExpMode
{
2024-10-26 01:41:51 +08:00
/* When encoding exponents, use separate values for each component (maximum quality) */
meshopt_EncodeExpSeparate ,
/* When encoding exponents, use shared value for all components of each vector (better compression) */
meshopt_EncodeExpSharedVector ,
/* When encoding exponents, use shared value for each component of all vectors (best compression) */
meshopt_EncodeExpSharedComponent ,
/* Experimental: When encoding exponents, use separate values for each component, but clamp to 0 (good quality if very small values are not important) */
meshopt_EncodeExpClamped ,
2023-11-03 05:03:02 +08:00
} ;
2024-10-26 01:41:51 +08:00
MESHOPTIMIZER_API void meshopt_encodeFilterOct ( void * destination , size_t count , size_t stride , int bits , const float * data ) ;
MESHOPTIMIZER_API void meshopt_encodeFilterQuat ( void * destination , size_t count , size_t stride , int bits , const float * data ) ;
MESHOPTIMIZER_API void meshopt_encodeFilterExp ( void * destination , size_t count , size_t stride , int bits , const float * data , enum meshopt_EncodeExpMode mode ) ;
2020-12-12 20:06:59 +08:00
/**
2022-12-22 23:22:33 +08:00
* Simplification options
*/
enum
{
2024-10-26 01:41:51 +08:00
/* Do not move vertices that are located on the topological border (vertices on triangle edges that don't have a paired triangle). Useful for simplifying portions of the larger mesh. */
meshopt_SimplifyLockBorder = 1 < < 0 ,
/* Improve simplification performance assuming input indices are a sparse subset of the mesh. Note that error becomes relative to subset extents. */
meshopt_SimplifySparse = 1 < < 1 ,
/* Treat error limit and resulting error as absolute instead of relative to mesh extents. */
meshopt_SimplifyErrorAbsolute = 1 < < 2 ,
/* Experimental: remove disconnected parts of the mesh during simplification incrementally, regardless of the topological restrictions inside components. */
meshopt_SimplifyPrune = 1 < < 3 ,
2022-12-22 23:22:33 +08:00
} ;
/**
* Mesh simplifier
2020-12-12 20:06:59 +08:00
* Reduces the number of triangles in the mesh , attempting to preserve mesh appearance as much as possible
* The algorithm tries to preserve mesh topology and can stop short of the target goal based on topology constraints or target error .
2024-10-26 01:41:51 +08:00
* If not all attributes from the input mesh are required , it ' s recommended to reindex the mesh without them prior to simplification .
2020-12-12 20:06:59 +08:00
* Returns the number of indices after simplification , with destination containing new index data
* The resulting index buffer references vertices from the original vertex buffer .
* If the original vertex data isn ' t required , creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended .
*
2021-01-10 02:04:09 +08:00
* destination must contain enough space for the target index buffer , worst case is index_count elements ( * not * target_index_count ) !
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
* target_error represents the error relative to mesh extents that can be tolerated , e . g . 0.01 = 1 % deformation ; value range [ 0. .1 ]
* options must be a bitmask composed of meshopt_SimplifyX options ; 0 is a safe default
2020-12-28 08:54:21 +08:00
* result_error can be NULL ; when it ' s not NULL , it will contain the resulting ( relative ) error after simplification
2020-12-12 20:06:59 +08:00
*/
2022-12-22 23:22:33 +08:00
MESHOPTIMIZER_API size_t meshopt_simplify ( unsigned int * destination , const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t target_index_count , float target_error , unsigned int options , float * result_error ) ;
2021-04-10 13:44:36 +08:00
2023-11-03 05:03:02 +08:00
/**
* Experimental : Mesh simplifier with attribute metric
2024-10-26 01:41:51 +08:00
* The algorithm enhances meshopt_simplify by incorporating attribute values into the error metric used to prioritize simplification order ; see meshopt_simplify documentation for details .
2023-11-03 05:03:02 +08:00
* Note that the number of attributes affects memory requirements and running time ; this algorithm requires ~ 1.5 x more memory and time compared to meshopt_simplify when using 4 scalar attributes .
*
* vertex_attributes should have attribute_count floats for each vertex
2024-10-26 01:41:51 +08:00
* attribute_weights should have attribute_count floats in total ; the weights determine relative priority of attributes between each other and wrt position
* attribute_count must be < = 32
* vertex_lock can be NULL ; when it ' s not NULL , it should have a value for each vertex ; 1 denotes vertices that can ' t be moved
2023-11-03 05:03:02 +08:00
*/
2024-10-26 01:41:51 +08:00
MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifyWithAttributes ( unsigned int * destination , const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , const float * vertex_attributes , size_t vertex_attributes_stride , const float * attribute_weights , size_t attribute_count , const unsigned char * vertex_lock , size_t target_index_count , float target_error , unsigned int options , float * result_error ) ;
2023-11-03 05:03:02 +08:00
2020-12-12 20:06:59 +08:00
/**
* Experimental : Mesh simplifier ( sloppy )
2021-11-19 21:02:23 +08:00
* Reduces the number of triangles in the mesh , sacrificing mesh appearance for simplification performance
2021-01-10 02:04:09 +08:00
* The algorithm doesn ' t preserve mesh topology but can stop short of the target goal based on target error .
2020-12-12 20:06:59 +08:00
* Returns the number of indices after simplification , with destination containing new index data
* The resulting index buffer references vertices from the original vertex buffer .
* If the original vertex data isn ' t required , creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended .
*
2021-01-10 02:04:09 +08:00
* destination must contain enough space for the target index buffer , worst case is index_count elements ( * not * target_index_count ) !
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
* target_error represents the error relative to mesh extents that can be tolerated , e . g . 0.01 = 1 % deformation ; value range [ 0. .1 ]
2021-01-10 02:04:09 +08:00
* result_error can be NULL ; when it ' s not NULL , it will contain the resulting ( relative ) error after simplification
2020-12-12 20:06:59 +08:00
*/
2021-01-10 02:04:09 +08:00
MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifySloppy ( unsigned int * destination , const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t target_index_count , float target_error , float * result_error ) ;
2020-12-12 20:06:59 +08:00
/**
* Experimental : Point cloud simplifier
* Reduces the number of points in the cloud to reach the given target
* Returns the number of points after simplification , with destination containing new index data
* The resulting index buffer references vertices from the original vertex buffer .
* If the original vertex data isn ' t required , creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended .
*
2021-01-10 02:04:09 +08:00
* destination must contain enough space for the target index buffer ( target_vertex_count elements )
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2023-11-03 05:03:02 +08:00
* vertex_colors should can be NULL ; when it ' s not NULL , it should have float3 color in the first 12 bytes of each vertex
2024-10-26 01:41:51 +08:00
* color_weight determines relative priority of color wrt position ; 1.0 is a safe default
2020-12-12 20:06:59 +08:00
*/
2023-11-03 05:03:02 +08:00
MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifyPoints ( unsigned int * destination , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , const float * vertex_colors , size_t vertex_colors_stride , float color_weight , size_t target_vertex_count ) ;
2020-12-12 20:06:59 +08:00
2020-12-28 08:54:21 +08:00
/**
2022-12-22 23:22:33 +08:00
* Returns the error scaling factor used by the simplifier to convert between absolute and relative extents
2022-05-18 05:24:52 +08:00
*
2020-12-28 08:54:21 +08:00
* Absolute error must be * divided * by the scaling factor before passing it to meshopt_simplify as target_error
* Relative error returned by meshopt_simplify via result_error must be * multiplied * by the scaling factor to get absolute error .
*/
2022-12-22 23:22:33 +08:00
MESHOPTIMIZER_API float meshopt_simplifyScale ( const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
2020-12-28 08:54:21 +08:00
2020-12-12 20:06:59 +08:00
/**
* Mesh stripifier
* Converts a previously vertex cache optimized triangle list to triangle strip , stitching strips using restart index or degenerate triangles
* Returns the number of indices in the resulting strip , with destination containing new index data
* For maximum efficiency the index buffer being converted has to be optimized for vertex cache first .
* Using restart indices can result in ~ 10 % smaller index buffers , but on some GPUs restart indices may result in decreased performance .
*
* destination must contain enough space for the target index buffer , worst case can be computed with meshopt_stripifyBound
* restart_index should be 0xffff or 0xffffffff depending on index size , or 0 to use degenerate triangles
*/
MESHOPTIMIZER_API size_t meshopt_stripify ( unsigned int * destination , const unsigned int * indices , size_t index_count , size_t vertex_count , unsigned int restart_index ) ;
MESHOPTIMIZER_API size_t meshopt_stripifyBound ( size_t index_count ) ;
/**
* Mesh unstripifier
* Converts a triangle strip to a triangle list
* Returns the number of indices in the resulting list , with destination containing new index data
*
* destination must contain enough space for the target index buffer , worst case can be computed with meshopt_unstripifyBound
*/
MESHOPTIMIZER_API size_t meshopt_unstripify ( unsigned int * destination , const unsigned int * indices , size_t index_count , unsigned int restart_index ) ;
MESHOPTIMIZER_API size_t meshopt_unstripifyBound ( size_t index_count ) ;
struct meshopt_VertexCacheStatistics
{
unsigned int vertices_transformed ;
unsigned int warps_executed ;
float acmr ; /* transformed vertices / triangle count; best case 0.5, worst case 3.0, optimum depends on topology */
float atvr ; /* transformed vertices / vertex count; best case 1.0, worst case 6.0, optimum is 1.0 (each vertex is transformed once) */
} ;
/**
* Vertex transform cache analyzer
* Returns cache hit statistics using a simplified FIFO model
* Results may not match actual GPU performance
*/
MESHOPTIMIZER_API struct meshopt_VertexCacheStatistics meshopt_analyzeVertexCache ( const unsigned int * indices , size_t index_count , size_t vertex_count , unsigned int cache_size , unsigned int warp_size , unsigned int primgroup_size ) ;
struct meshopt_OverdrawStatistics
{
unsigned int pixels_covered ;
unsigned int pixels_shaded ;
float overdraw ; /* shaded pixels / covered pixels; best case 1.0 */
} ;
/**
* Overdraw analyzer
* Returns overdraw statistics using a software rasterizer
* Results may not match actual GPU performance
*
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2020-12-12 20:06:59 +08:00
*/
MESHOPTIMIZER_API struct meshopt_OverdrawStatistics meshopt_analyzeOverdraw ( const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
struct meshopt_VertexFetchStatistics
{
unsigned int bytes_fetched ;
float overfetch ; /* fetched bytes / vertex buffer size; best case 1.0 (each byte is fetched once) */
} ;
/**
* Vertex fetch cache analyzer
* Returns cache hit statistics using a simplified direct mapped model
* Results may not match actual GPU performance
*/
MESHOPTIMIZER_API struct meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch ( const unsigned int * indices , size_t index_count , size_t vertex_count , size_t vertex_size ) ;
2024-10-26 01:41:51 +08:00
/**
* Meshlet is a small mesh cluster ( subset ) that consists of :
* - triangles , an 8 - bit micro triangle ( index ) buffer , that for each triangle specifies three local vertices to use ;
* - vertices , a 32 - bit vertex indirection buffer , that for each local vertex specifies which mesh vertex to fetch vertex attributes from .
*
* For efficiency , meshlet triangles and vertices are packed into two large arrays ; this structure contains offsets and counts to access the data .
*/
2020-12-12 20:06:59 +08:00
struct meshopt_Meshlet
{
2021-04-18 22:15:43 +08:00
/* offsets within meshlet_vertices and meshlet_triangles arrays with meshlet data */
unsigned int vertex_offset ;
unsigned int triangle_offset ;
/* number of vertices and triangles used in the meshlet; data is stored in consecutive range defined by offset and count */
unsigned int vertex_count ;
unsigned int triangle_count ;
2020-12-12 20:06:59 +08:00
} ;
/**
2022-05-18 05:24:52 +08:00
* Meshlet builder
2020-12-12 20:06:59 +08:00
* Splits the mesh into a set of meshlets where each meshlet has a micro index buffer indexing into meshlet vertices that refer to the original vertex buffer
* The resulting data can be used to render meshes using NVidia programmable mesh shading pipeline , or in other cluster - based renderers .
2024-10-26 01:41:51 +08:00
* When targeting mesh shading hardware , for maximum efficiency meshlets should be further optimized using meshopt_optimizeMeshlet .
2021-04-18 22:15:43 +08:00
* When using buildMeshlets , vertex positions need to be provided to minimize the size of the resulting clusters .
* When using buildMeshletsScan , for maximum efficiency the index buffer being converted has to be optimized for vertex cache first .
2020-12-12 20:06:59 +08:00
*
2021-04-18 22:15:43 +08:00
* meshlets must contain enough space for all meshlets , worst case size can be computed with meshopt_buildMeshletsBound
* meshlet_vertices must contain enough space for all meshlets , worst case size is equal to max_meshlets * max_vertices
* meshlet_triangles must contain enough space for all meshlets , worst case size is equal to max_meshlets * max_triangles * 3
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2024-10-26 01:41:51 +08:00
* max_vertices and max_triangles must not exceed implementation limits ( max_vertices < = 255 - not 256 ! , max_triangles < = 512 ; max_triangles must be divisible by 4 )
2021-04-18 22:15:43 +08:00
* cone_weight should be set to 0 when cone culling is not used , and a value between 0 and 1 otherwise to balance between cluster size and cone culling efficiency
2020-12-12 20:06:59 +08:00
*/
2022-05-18 05:24:52 +08:00
MESHOPTIMIZER_API size_t meshopt_buildMeshlets ( struct meshopt_Meshlet * meshlets , unsigned int * meshlet_vertices , unsigned char * meshlet_triangles , const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t max_vertices , size_t max_triangles , float cone_weight ) ;
MESHOPTIMIZER_API size_t meshopt_buildMeshletsScan ( struct meshopt_Meshlet * meshlets , unsigned int * meshlet_vertices , unsigned char * meshlet_triangles , const unsigned int * indices , size_t index_count , size_t vertex_count , size_t max_vertices , size_t max_triangles ) ;
MESHOPTIMIZER_API size_t meshopt_buildMeshletsBound ( size_t index_count , size_t max_vertices , size_t max_triangles ) ;
2020-12-12 20:06:59 +08:00
2024-10-26 01:41:51 +08:00
/**
* Experimental : Meshlet optimizer
* Reorders meshlet vertices and triangles to maximize locality to improve rasterizer throughput
*
* meshlet_triangles and meshlet_vertices must refer to meshlet triangle and vertex index data ; when buildMeshlets * is used , these
* need to be computed from meshlet ' s vertex_offset and triangle_offset
* triangle_count and vertex_count must not exceed implementation limits ( vertex_count < = 255 - not 256 ! , triangle_count < = 512 )
*/
MESHOPTIMIZER_EXPERIMENTAL void meshopt_optimizeMeshlet ( unsigned int * meshlet_vertices , unsigned char * meshlet_triangles , size_t triangle_count , size_t vertex_count ) ;
2020-12-12 20:06:59 +08:00
struct meshopt_Bounds
{
/* bounding sphere, useful for frustum and occlusion culling */
float center [ 3 ] ;
float radius ;
/* normal cone, useful for backface culling */
float cone_apex [ 3 ] ;
float cone_axis [ 3 ] ;
float cone_cutoff ; /* = cos(angle/2) */
/* normal cone axis and cutoff, stored in 8-bit SNORM format; decode using x/127.0 */
signed char cone_axis_s8 [ 3 ] ;
signed char cone_cutoff_s8 ;
} ;
/**
2022-05-18 05:24:52 +08:00
* Cluster bounds generator
2020-12-12 20:06:59 +08:00
* Creates bounding volumes that can be used for frustum , backface and occlusion culling .
*
* For backface culling with orthographic projection , use the following formula to reject backfacing clusters :
* dot ( view , cone_axis ) > = cone_cutoff
*
2023-11-03 05:03:02 +08:00
* For perspective projection , you can use the formula that needs cone apex in addition to axis & cutoff :
2020-12-12 20:06:59 +08:00
* dot ( normalize ( cone_apex - camera_position ) , cone_axis ) > = cone_cutoff
*
* Alternatively , you can use the formula that doesn ' t need cone apex and uses bounding sphere instead :
* dot ( normalize ( center - camera_position ) , cone_axis ) > = cone_cutoff + radius / length ( center - camera_position )
* or an equivalent formula that doesn ' t have a singularity at center = camera_position :
* dot ( center - camera_position , cone_axis ) > = cone_cutoff * length ( center - camera_position ) + radius
*
* The formula that uses the apex is slightly more accurate but needs the apex ; if you are already using bounding sphere
2023-11-03 05:03:02 +08:00
* to do frustum / occlusion culling , the formula that doesn ' t use the apex may be preferable ( for derivation see
* Real - Time Rendering 4 th Edition , section 19.3 ) .
2020-12-12 20:06:59 +08:00
*
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2024-10-26 01:41:51 +08:00
* vertex_count should specify the number of vertices in the entire mesh , not cluster or meshlet
* index_count / 3 and triangle_count must not exceed implementation limits ( < = 512 )
2020-12-12 20:06:59 +08:00
*/
2022-05-18 05:24:52 +08:00
MESHOPTIMIZER_API struct meshopt_Bounds meshopt_computeClusterBounds ( const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
MESHOPTIMIZER_API struct meshopt_Bounds meshopt_computeMeshletBounds ( const unsigned int * meshlet_vertices , const unsigned char * meshlet_triangles , size_t triangle_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
2020-12-12 20:06:59 +08:00
/**
2023-11-03 05:03:02 +08:00
* Spatial sorter
2020-12-12 20:06:59 +08:00
* Generates a remap table that can be used to reorder points for spatial locality .
* Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer .
*
* destination must contain enough space for the resulting remap table ( vertex_count elements )
2023-11-03 05:03:02 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2020-12-12 20:06:59 +08:00
*/
2023-11-03 05:03:02 +08:00
MESHOPTIMIZER_API void meshopt_spatialSortRemap ( unsigned int * destination , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
2020-12-12 20:06:59 +08:00
/**
* Experimental : Spatial sorter
* Reorders triangles for spatial locality , and generates a new index buffer . The resulting index buffer can be used with other functions like optimizeVertexCache .
*
* destination must contain enough space for the resulting index buffer ( index_count elements )
2022-12-22 23:22:33 +08:00
* vertex_positions should have float3 position in the first 12 bytes of each vertex
2020-12-12 20:06:59 +08:00
*/
MESHOPTIMIZER_EXPERIMENTAL void meshopt_spatialSortTriangles ( unsigned int * destination , const unsigned int * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
/**
* Set allocation callbacks
* These callbacks will be used instead of the default operator new / operator delete for all temporary allocations in the library .
* Note that all algorithms only allocate memory for temporary use .
* allocate / deallocate are always called in a stack - like order - last pointer to be allocated is deallocated first .
*/
2024-10-26 01:41:51 +08:00
MESHOPTIMIZER_API void meshopt_setAllocator ( void * ( MESHOPTIMIZER_ALLOC_CALLCONV * allocate ) ( size_t ) , void ( MESHOPTIMIZER_ALLOC_CALLCONV * deallocate ) ( void * ) ) ;
2020-12-12 20:06:59 +08:00
# ifdef __cplusplus
} /* extern "C" */
# endif
/* Quantization into commonly supported data formats */
# ifdef __cplusplus
/**
* Quantize a float in [ 0. .1 ] range into an N - bit fixed point unorm value
* Assumes reconstruction function ( q / ( 2 ^ N - 1 ) ) , which is the case for fixed - function normalized fixed point conversion
* Maximum reconstruction error : 1 / 2 ^ ( N + 1 )
*/
inline int meshopt_quantizeUnorm ( float v , int N ) ;
/**
* Quantize a float in [ - 1. .1 ] range into an N - bit fixed point snorm value
* Assumes reconstruction function ( q / ( 2 ^ ( N - 1 ) - 1 ) ) , which is the case for fixed - function normalized fixed point conversion ( except early OpenGL versions )
* Maximum reconstruction error : 1 / 2 ^ N
*/
inline int meshopt_quantizeSnorm ( float v , int N ) ;
/**
2023-11-03 05:03:02 +08:00
* Quantize a float into half - precision ( as defined by IEEE - 754 fp16 ) floating point value
2020-12-12 20:06:59 +08:00
* Generates + - inf for overflow , preserves NaN , flushes denormals to zero , rounds to nearest
* Representable magnitude range : [ 6e-5 ; 65504 ]
* Maximum relative reconstruction error : 5e-4
*/
2023-11-03 05:03:02 +08:00
MESHOPTIMIZER_API unsigned short meshopt_quantizeHalf ( float v ) ;
2020-12-12 20:06:59 +08:00
/**
2023-11-03 05:03:02 +08:00
* Quantize a float into a floating point value with a limited number of significant mantissa bits , preserving the IEEE - 754 fp32 binary representation
2020-12-12 20:06:59 +08:00
* Generates + - inf for overflow , preserves NaN , flushes denormals to zero , rounds to nearest
* Assumes N is in a valid mantissa precision range , which is 1. .23
*/
2023-11-03 05:03:02 +08:00
MESHOPTIMIZER_API float meshopt_quantizeFloat ( float v , int N ) ;
/**
* Reverse quantization of a half - precision ( as defined by IEEE - 754 fp16 ) floating point value
* Preserves Inf / NaN , flushes denormals to zero
*/
MESHOPTIMIZER_API float meshopt_dequantizeHalf ( unsigned short h ) ;
2020-12-12 20:06:59 +08:00
# endif
/**
* C + + template interface
*
* These functions mirror the C interface the library provides , providing template - based overloads so that
* the caller can use an arbitrary type for the index data , both for input and output .
* When the supplied type is the same size as that of unsigned int , the wrappers are zero - cost ; when it ' s not ,
* the wrappers end up allocating memory and copying index data to convert from one type to another .
*/
# if defined(__cplusplus) && !defined(MESHOPTIMIZER_NO_WRAPPERS)
template < typename T >
inline size_t meshopt_generateVertexRemap ( unsigned int * destination , const T * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size ) ;
template < typename T >
inline size_t meshopt_generateVertexRemapMulti ( unsigned int * destination , const T * indices , size_t index_count , size_t vertex_count , const meshopt_Stream * streams , size_t stream_count ) ;
template < typename T >
inline void meshopt_remapIndexBuffer ( T * destination , const T * indices , size_t index_count , const unsigned int * remap ) ;
template < typename T >
inline void meshopt_generateShadowIndexBuffer ( T * destination , const T * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size , size_t vertex_stride ) ;
template < typename T >
inline void meshopt_generateShadowIndexBufferMulti ( T * destination , const T * indices , size_t index_count , size_t vertex_count , const meshopt_Stream * streams , size_t stream_count ) ;
template < typename T >
2021-04-18 22:15:43 +08:00
inline void meshopt_generateAdjacencyIndexBuffer ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
template < typename T >
inline void meshopt_generateTessellationIndexBuffer ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
template < typename T >
2024-10-26 01:41:51 +08:00
inline size_t meshopt_generateProvokingIndexBuffer ( T * destination , unsigned int * reorder , const T * indices , size_t index_count , size_t vertex_count ) ;
template < typename T >
2020-12-12 20:06:59 +08:00
inline void meshopt_optimizeVertexCache ( T * destination , const T * indices , size_t index_count , size_t vertex_count ) ;
template < typename T >
inline void meshopt_optimizeVertexCacheStrip ( T * destination , const T * indices , size_t index_count , size_t vertex_count ) ;
template < typename T >
inline void meshopt_optimizeVertexCacheFifo ( T * destination , const T * indices , size_t index_count , size_t vertex_count , unsigned int cache_size ) ;
template < typename T >
inline void meshopt_optimizeOverdraw ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , float threshold ) ;
template < typename T >
inline size_t meshopt_optimizeVertexFetchRemap ( unsigned int * destination , const T * indices , size_t index_count , size_t vertex_count ) ;
template < typename T >
inline size_t meshopt_optimizeVertexFetch ( void * destination , T * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size ) ;
template < typename T >
inline size_t meshopt_encodeIndexBuffer ( unsigned char * buffer , size_t buffer_size , const T * indices , size_t index_count ) ;
template < typename T >
inline int meshopt_decodeIndexBuffer ( T * destination , size_t index_count , const unsigned char * buffer , size_t buffer_size ) ;
template < typename T >
inline size_t meshopt_encodeIndexSequence ( unsigned char * buffer , size_t buffer_size , const T * indices , size_t index_count ) ;
template < typename T >
inline int meshopt_decodeIndexSequence ( T * destination , size_t index_count , const unsigned char * buffer , size_t buffer_size ) ;
template < typename T >
2023-11-03 05:03:02 +08:00
inline size_t meshopt_simplify ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t target_index_count , float target_error , unsigned int options = 0 , float * result_error = NULL ) ;
template < typename T >
2024-10-26 01:41:51 +08:00
inline size_t meshopt_simplifyWithAttributes ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , const float * vertex_attributes , size_t vertex_attributes_stride , const float * attribute_weights , size_t attribute_count , const unsigned char * vertex_lock , size_t target_index_count , float target_error , unsigned int options = 0 , float * result_error = NULL ) ;
2020-12-12 20:06:59 +08:00
template < typename T >
2023-11-03 05:03:02 +08:00
inline size_t meshopt_simplifySloppy ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t target_index_count , float target_error , float * result_error = NULL ) ;
2020-12-12 20:06:59 +08:00
template < typename T >
inline size_t meshopt_stripify ( T * destination , const T * indices , size_t index_count , size_t vertex_count , T restart_index ) ;
template < typename T >
inline size_t meshopt_unstripify ( T * destination , const T * indices , size_t index_count , T restart_index ) ;
template < typename T >
inline meshopt_VertexCacheStatistics meshopt_analyzeVertexCache ( const T * indices , size_t index_count , size_t vertex_count , unsigned int cache_size , unsigned int warp_size , unsigned int buffer_size ) ;
template < typename T >
inline meshopt_OverdrawStatistics meshopt_analyzeOverdraw ( const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
template < typename T >
inline meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch ( const T * indices , size_t index_count , size_t vertex_count , size_t vertex_size ) ;
template < typename T >
2021-04-18 22:15:43 +08:00
inline size_t meshopt_buildMeshlets ( meshopt_Meshlet * meshlets , unsigned int * meshlet_vertices , unsigned char * meshlet_triangles , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t max_vertices , size_t max_triangles , float cone_weight ) ;
template < typename T >
inline size_t meshopt_buildMeshletsScan ( meshopt_Meshlet * meshlets , unsigned int * meshlet_vertices , unsigned char * meshlet_triangles , const T * indices , size_t index_count , size_t vertex_count , size_t max_vertices , size_t max_triangles ) ;
2020-12-12 20:06:59 +08:00
template < typename T >
inline meshopt_Bounds meshopt_computeClusterBounds ( const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
template < typename T >
inline void meshopt_spatialSortTriangles ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride ) ;
# endif
/* Inline implementation */
# ifdef __cplusplus
inline int meshopt_quantizeUnorm ( float v , int N )
{
const float scale = float ( ( 1 < < N ) - 1 ) ;
v = ( v > = 0 ) ? v : 0 ;
v = ( v < = 1 ) ? v : 1 ;
return int ( v * scale + 0.5f ) ;
}
inline int meshopt_quantizeSnorm ( float v , int N )
{
const float scale = float ( ( 1 < < ( N - 1 ) ) - 1 ) ;
float round = ( v > = 0 ? 0.5f : - 0.5f ) ;
v = ( v > = - 1 ) ? v : - 1 ;
v = ( v < = + 1 ) ? v : + 1 ;
return int ( v * scale + round ) ;
}
# endif
/* Internal implementation helpers */
# ifdef __cplusplus
class meshopt_Allocator
{
public :
template < typename T >
struct StorageT
{
2024-10-26 01:41:51 +08:00
static void * ( MESHOPTIMIZER_ALLOC_CALLCONV * allocate ) ( size_t ) ;
static void ( MESHOPTIMIZER_ALLOC_CALLCONV * deallocate ) ( void * ) ;
2020-12-12 20:06:59 +08:00
} ;
typedef StorageT < void > Storage ;
meshopt_Allocator ( )
2024-10-26 01:41:51 +08:00
: blocks ( )
, count ( 0 )
2020-12-12 20:06:59 +08:00
{
}
~ meshopt_Allocator ( )
{
for ( size_t i = count ; i > 0 ; - - i )
Storage : : deallocate ( blocks [ i - 1 ] ) ;
}
2024-10-26 01:41:51 +08:00
template < typename T >
T * allocate ( size_t size )
2020-12-12 20:06:59 +08:00
{
assert ( count < sizeof ( blocks ) / sizeof ( blocks [ 0 ] ) ) ;
T * result = static_cast < T * > ( Storage : : allocate ( size > size_t ( - 1 ) / sizeof ( T ) ? size_t ( - 1 ) : size * sizeof ( T ) ) ) ;
blocks [ count + + ] = result ;
return result ;
}
2023-11-03 05:03:02 +08:00
void deallocate ( void * ptr )
{
assert ( count > 0 & & blocks [ count - 1 ] = = ptr ) ;
Storage : : deallocate ( ptr ) ;
count - - ;
}
2020-12-12 20:06:59 +08:00
private :
void * blocks [ 24 ] ;
size_t count ;
} ;
// This makes sure that allocate/deallocate are lazily generated in translation units that need them and are deduplicated by the linker
2024-10-26 01:41:51 +08:00
template < typename T >
void * ( MESHOPTIMIZER_ALLOC_CALLCONV * meshopt_Allocator : : StorageT < T > : : allocate ) ( size_t ) = operator new ;
template < typename T >
void ( MESHOPTIMIZER_ALLOC_CALLCONV * meshopt_Allocator : : StorageT < T > : : deallocate ) ( void * ) = operator delete ;
2020-12-12 20:06:59 +08:00
# endif
/* Inline implementation for C++ templated wrappers */
# if defined(__cplusplus) && !defined(MESHOPTIMIZER_NO_WRAPPERS)
template < typename T , bool ZeroCopy = sizeof ( T ) = = sizeof ( unsigned int ) >
struct meshopt_IndexAdapter ;
template < typename T >
struct meshopt_IndexAdapter < T , false >
{
T * result ;
unsigned int * data ;
size_t count ;
meshopt_IndexAdapter ( T * result_ , const T * input , size_t count_ )
: result ( result_ )
2023-11-03 05:03:02 +08:00
, data ( NULL )
2020-12-12 20:06:59 +08:00
, count ( count_ )
{
size_t size = count > size_t ( - 1 ) / sizeof ( unsigned int ) ? size_t ( - 1 ) : count * sizeof ( unsigned int ) ;
data = static_cast < unsigned int * > ( meshopt_Allocator : : Storage : : allocate ( size ) ) ;
if ( input )
{
for ( size_t i = 0 ; i < count ; + + i )
data [ i ] = input [ i ] ;
}
}
~ meshopt_IndexAdapter ( )
{
if ( result )
{
for ( size_t i = 0 ; i < count ; + + i )
result [ i ] = T ( data [ i ] ) ;
}
meshopt_Allocator : : Storage : : deallocate ( data ) ;
}
} ;
template < typename T >
struct meshopt_IndexAdapter < T , true >
{
unsigned int * data ;
meshopt_IndexAdapter ( T * result , const T * input , size_t )
: data ( reinterpret_cast < unsigned int * > ( result ? result : const_cast < T * > ( input ) ) )
{
}
} ;
template < typename T >
inline size_t meshopt_generateVertexRemap ( unsigned int * destination , const T * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , indices ? index_count : 0 ) ;
2020-12-12 20:06:59 +08:00
2023-11-03 05:03:02 +08:00
return meshopt_generateVertexRemap ( destination , indices ? in . data : NULL , index_count , vertices , vertex_count , vertex_size ) ;
2020-12-12 20:06:59 +08:00
}
template < typename T >
inline size_t meshopt_generateVertexRemapMulti ( unsigned int * destination , const T * indices , size_t index_count , size_t vertex_count , const meshopt_Stream * streams , size_t stream_count )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , indices ? index_count : 0 ) ;
2020-12-12 20:06:59 +08:00
2023-11-03 05:03:02 +08:00
return meshopt_generateVertexRemapMulti ( destination , indices ? in . data : NULL , index_count , vertex_count , streams , stream_count ) ;
2020-12-12 20:06:59 +08:00
}
template < typename T >
inline void meshopt_remapIndexBuffer ( T * destination , const T * indices , size_t index_count , const unsigned int * remap )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , indices ? index_count : 0 ) ;
2020-12-12 20:06:59 +08:00
meshopt_IndexAdapter < T > out ( destination , 0 , index_count ) ;
2023-11-03 05:03:02 +08:00
meshopt_remapIndexBuffer ( out . data , indices ? in . data : NULL , index_count , remap ) ;
2020-12-12 20:06:59 +08:00
}
template < typename T >
inline void meshopt_generateShadowIndexBuffer ( T * destination , const T * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size , size_t vertex_stride )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
meshopt_generateShadowIndexBuffer ( out . data , in . data , index_count , vertices , vertex_count , vertex_size , vertex_stride ) ;
}
template < typename T >
inline void meshopt_generateShadowIndexBufferMulti ( T * destination , const T * indices , size_t index_count , size_t vertex_count , const meshopt_Stream * streams , size_t stream_count )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
meshopt_generateShadowIndexBufferMulti ( out . data , in . data , index_count , vertex_count , streams , stream_count ) ;
}
2021-04-18 22:15:43 +08:00
template < typename T >
inline void meshopt_generateAdjacencyIndexBuffer ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count * 2 ) ;
2021-04-18 22:15:43 +08:00
meshopt_generateAdjacencyIndexBuffer ( out . data , in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride ) ;
}
template < typename T >
inline void meshopt_generateTessellationIndexBuffer ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count * 4 ) ;
2021-04-18 22:15:43 +08:00
meshopt_generateTessellationIndexBuffer ( out . data , in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride ) ;
}
2024-10-26 01:41:51 +08:00
template < typename T >
inline size_t meshopt_generateProvokingIndexBuffer ( T * destination , unsigned int * reorder , const T * indices , size_t index_count , size_t vertex_count )
{
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
size_t bound = vertex_count + ( index_count / 3 ) ;
assert ( size_t ( T ( bound - 1 ) ) = = bound - 1 ) ; // bound - 1 must fit in T
( void ) bound ;
return meshopt_generateProvokingIndexBuffer ( out . data , reorder , in . data , index_count , vertex_count ) ;
}
2020-12-12 20:06:59 +08:00
template < typename T >
inline void meshopt_optimizeVertexCache ( T * destination , const T * indices , size_t index_count , size_t vertex_count )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
meshopt_optimizeVertexCache ( out . data , in . data , index_count , vertex_count ) ;
}
template < typename T >
inline void meshopt_optimizeVertexCacheStrip ( T * destination , const T * indices , size_t index_count , size_t vertex_count )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
meshopt_optimizeVertexCacheStrip ( out . data , in . data , index_count , vertex_count ) ;
}
template < typename T >
inline void meshopt_optimizeVertexCacheFifo ( T * destination , const T * indices , size_t index_count , size_t vertex_count , unsigned int cache_size )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
meshopt_optimizeVertexCacheFifo ( out . data , in . data , index_count , vertex_count , cache_size ) ;
}
template < typename T >
inline void meshopt_optimizeOverdraw ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , float threshold )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
meshopt_optimizeOverdraw ( out . data , in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride , threshold ) ;
}
template < typename T >
inline size_t meshopt_optimizeVertexFetchRemap ( unsigned int * destination , const T * indices , size_t index_count , size_t vertex_count )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2020-12-12 20:06:59 +08:00
return meshopt_optimizeVertexFetchRemap ( destination , in . data , index_count , vertex_count ) ;
}
template < typename T >
inline size_t meshopt_optimizeVertexFetch ( void * destination , T * indices , size_t index_count , const void * vertices , size_t vertex_count , size_t vertex_size )
{
meshopt_IndexAdapter < T > inout ( indices , indices , index_count ) ;
return meshopt_optimizeVertexFetch ( destination , inout . data , index_count , vertices , vertex_count , vertex_size ) ;
}
template < typename T >
inline size_t meshopt_encodeIndexBuffer ( unsigned char * buffer , size_t buffer_size , const T * indices , size_t index_count )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2020-12-12 20:06:59 +08:00
return meshopt_encodeIndexBuffer ( buffer , buffer_size , in . data , index_count ) ;
}
template < typename T >
inline int meshopt_decodeIndexBuffer ( T * destination , size_t index_count , const unsigned char * buffer , size_t buffer_size )
{
char index_size_valid [ sizeof ( T ) = = 2 | | sizeof ( T ) = = 4 ? 1 : - 1 ] ;
( void ) index_size_valid ;
return meshopt_decodeIndexBuffer ( destination , index_count , sizeof ( T ) , buffer , buffer_size ) ;
}
template < typename T >
inline size_t meshopt_encodeIndexSequence ( unsigned char * buffer , size_t buffer_size , const T * indices , size_t index_count )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2020-12-12 20:06:59 +08:00
return meshopt_encodeIndexSequence ( buffer , buffer_size , in . data , index_count ) ;
}
template < typename T >
inline int meshopt_decodeIndexSequence ( T * destination , size_t index_count , const unsigned char * buffer , size_t buffer_size )
{
char index_size_valid [ sizeof ( T ) = = 2 | | sizeof ( T ) = = 4 ? 1 : - 1 ] ;
( void ) index_size_valid ;
return meshopt_decodeIndexSequence ( destination , index_count , sizeof ( T ) , buffer , buffer_size ) ;
}
template < typename T >
2022-12-22 23:22:33 +08:00
inline size_t meshopt_simplify ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t target_index_count , float target_error , unsigned int options , float * result_error )
2020-12-12 20:06:59 +08:00
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
2022-12-22 23:22:33 +08:00
return meshopt_simplify ( out . data , in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride , target_index_count , target_error , options , result_error ) ;
2020-12-12 20:06:59 +08:00
}
2023-11-03 05:03:02 +08:00
template < typename T >
2024-10-26 01:41:51 +08:00
inline size_t meshopt_simplifyWithAttributes ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , const float * vertex_attributes , size_t vertex_attributes_stride , const float * attribute_weights , size_t attribute_count , const unsigned char * vertex_lock , size_t target_index_count , float target_error , unsigned int options , float * result_error )
2023-11-03 05:03:02 +08:00
{
2024-10-26 01:41:51 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2023-11-03 05:03:02 +08:00
2024-10-26 01:41:51 +08:00
return meshopt_simplifyWithAttributes ( out . data , in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride , vertex_attributes , vertex_attributes_stride , attribute_weights , attribute_count , vertex_lock , target_index_count , target_error , options , result_error ) ;
2023-11-03 05:03:02 +08:00
}
2020-12-12 20:06:59 +08:00
template < typename T >
2021-01-10 02:04:09 +08:00
inline size_t meshopt_simplifySloppy ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t target_index_count , float target_error , float * result_error )
2020-12-12 20:06:59 +08:00
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
2021-01-10 02:04:09 +08:00
return meshopt_simplifySloppy ( out . data , in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride , target_index_count , target_error , result_error ) ;
2020-12-12 20:06:59 +08:00
}
template < typename T >
inline size_t meshopt_stripify ( T * destination , const T * indices , size_t index_count , size_t vertex_count , T restart_index )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , ( index_count / 3 ) * 5 ) ;
2020-12-12 20:06:59 +08:00
return meshopt_stripify ( out . data , in . data , index_count , vertex_count , unsigned ( restart_index ) ) ;
}
template < typename T >
inline size_t meshopt_unstripify ( T * destination , const T * indices , size_t index_count , T restart_index )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , ( index_count - 2 ) * 3 ) ;
2020-12-12 20:06:59 +08:00
return meshopt_unstripify ( out . data , in . data , index_count , unsigned ( restart_index ) ) ;
}
template < typename T >
inline meshopt_VertexCacheStatistics meshopt_analyzeVertexCache ( const T * indices , size_t index_count , size_t vertex_count , unsigned int cache_size , unsigned int warp_size , unsigned int buffer_size )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2020-12-12 20:06:59 +08:00
return meshopt_analyzeVertexCache ( in . data , index_count , vertex_count , cache_size , warp_size , buffer_size ) ;
}
template < typename T >
inline meshopt_OverdrawStatistics meshopt_analyzeOverdraw ( const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2020-12-12 20:06:59 +08:00
return meshopt_analyzeOverdraw ( in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride ) ;
}
template < typename T >
inline meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch ( const T * indices , size_t index_count , size_t vertex_count , size_t vertex_size )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2020-12-12 20:06:59 +08:00
return meshopt_analyzeVertexFetch ( in . data , index_count , vertex_count , vertex_size ) ;
}
template < typename T >
2021-04-18 22:15:43 +08:00
inline size_t meshopt_buildMeshlets ( meshopt_Meshlet * meshlets , unsigned int * meshlet_vertices , unsigned char * meshlet_triangles , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride , size_t max_vertices , size_t max_triangles , float cone_weight )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2021-04-18 22:15:43 +08:00
return meshopt_buildMeshlets ( meshlets , meshlet_vertices , meshlet_triangles , in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride , max_vertices , max_triangles , cone_weight ) ;
}
template < typename T >
inline size_t meshopt_buildMeshletsScan ( meshopt_Meshlet * meshlets , unsigned int * meshlet_vertices , unsigned char * meshlet_triangles , const T * indices , size_t index_count , size_t vertex_count , size_t max_vertices , size_t max_triangles )
2020-12-12 20:06:59 +08:00
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2020-12-12 20:06:59 +08:00
2021-04-18 22:15:43 +08:00
return meshopt_buildMeshletsScan ( meshlets , meshlet_vertices , meshlet_triangles , in . data , index_count , vertex_count , max_vertices , max_triangles ) ;
2020-12-12 20:06:59 +08:00
}
template < typename T >
inline meshopt_Bounds meshopt_computeClusterBounds ( const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
2020-12-12 20:06:59 +08:00
return meshopt_computeClusterBounds ( in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride ) ;
}
template < typename T >
inline void meshopt_spatialSortTriangles ( T * destination , const T * indices , size_t index_count , const float * vertex_positions , size_t vertex_count , size_t vertex_positions_stride )
{
2023-11-03 05:03:02 +08:00
meshopt_IndexAdapter < T > in ( NULL , indices , index_count ) ;
meshopt_IndexAdapter < T > out ( destination , NULL , index_count ) ;
2020-12-12 20:06:59 +08:00
meshopt_spatialSortTriangles ( out . data , in . data , index_count , vertex_positions , vertex_count , vertex_positions_stride ) ;
}
# endif
/**
2024-10-26 01:41:51 +08:00
* Copyright ( c ) 2016 - 2024 Arseny Kapoulkine
2020-12-12 20:06:59 +08:00
*
* Permission is hereby granted , free of charge , to any person
* obtaining a copy of this software and associated documentation
* files ( the " Software " ) , to deal in the Software without
* restriction , including without limitation the rights to use ,
* copy , modify , merge , publish , distribute , sublicense , and / or sell
* copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following
* conditions :
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY ,
* WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/