mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-03-31 19:00:35 +08:00
Fix typos
This commit is contained in:
parent
fd98cc49f1
commit
c593e9e948
@ -327,7 +327,7 @@ namespace half_impl {
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(HIP_DEVICE_COMPILE))
|
||||
// Note: We deliberately do *not* define this to 1 even if we have Arm's native
|
||||
// fp16 type since GPU halfs are rather different from native CPU halfs.
|
||||
// fp16 type since GPU half types are rather different from native CPU half types.
|
||||
// TODO: Rename to something like EIGEN_HAS_NATIVE_GPU_FP16
|
||||
#define EIGEN_HAS_NATIVE_FP16
|
||||
#endif
|
||||
|
@ -27,7 +27,7 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Packet4hf ptanh<Packet4hf>(const Packet4hf
|
||||
|
||||
template <>
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Packet8hf ptanh<Packet8hf>(const Packet8hf& x) {
|
||||
// Convert each 4 halfs to float, call the float ptanh, and then convert back.
|
||||
// Convert each 4 half types to float, call the float ptanh, and then convert back.
|
||||
return vcombine_f16(vcvt_f16_f32(ptanh<Packet4f>(vcvt_f32_f16(vget_low_f16(x)))),
|
||||
vcvt_f16_f32(ptanh<Packet4f>(vcvt_high_f32_f16(x))));
|
||||
}
|
||||
|
@ -1047,7 +1047,7 @@ void BDCSVD<MatrixType, Options>::computeSingVals(const ArrayRef& col0, const Ar
|
||||
} else {
|
||||
// We have a problem as shifting on the left or right give either a positive or negative value
|
||||
// at the middle of [left,right]...
|
||||
// Instead fo abbording or entering an infinite loop,
|
||||
// Instead of abbording or entering an infinite loop,
|
||||
// let's just use the middle as the estimated zero-crossing:
|
||||
muCur = (right - left) * RealScalar(0.5);
|
||||
// we can test exact equality here, because shift comes from `... ? left : right`
|
||||
|
@ -117,7 +117,7 @@ build:linux:cross:x86-64:clang-12:avx512dq:
|
||||
.build:linux:cuda:
|
||||
extends: .build:linux:cross:x86-64
|
||||
variables:
|
||||
# Addtional flags passed to the cuda compiler.
|
||||
# Additional flags passed to the cuda compiler.
|
||||
EIGEN_CI_CUDA_CXX_FLAGS: ""
|
||||
# Compute architectures present in the GitLab CI runners.
|
||||
EIGEN_CI_CUDA_COMPUTE_ARCH: "50;75"
|
||||
|
@ -17,7 +17,7 @@ if (-Not (Test-Path ${EIGEN_CI_BUILDDIR})) {
|
||||
}
|
||||
cd $EIGEN_CI_BUILDDIR
|
||||
|
||||
# We need to split EIGEN_CI_ADDITIONAL_ARGS, otherwise they are interpretted
|
||||
# We need to split EIGEN_CI_ADDITIONAL_ARGS, otherwise they are interpreted
|
||||
# as a single argument. Split by space, unless double-quoted.
|
||||
$split_args = [regex]::Split(${EIGEN_CI_ADDITIONAL_ARGS}, ' (?=(?:[^"]|"[^"]*")*$)' )
|
||||
|
||||
|
@ -46,7 +46,7 @@ void GpuHelper::popProjectionMode2D(void) {
|
||||
}
|
||||
|
||||
void GpuHelper::drawVector(const Vector3f& position, const Vector3f& vec, const Color& color, float aspect /* = 50.*/) {
|
||||
static GLUquadricObj* cylindre = gluNewQuadric();
|
||||
static GLUquadricObj* cylinder = gluNewQuadric();
|
||||
glColor4fv(color.data());
|
||||
float length = vec.norm();
|
||||
pushMatrix(GL_MODELVIEW);
|
||||
@ -57,15 +57,15 @@ void GpuHelper::drawVector(const Vector3f& position, const Vector3f& vec, const
|
||||
tmp.normalize();
|
||||
float angle = 180.f / M_PI * acos(tmp.z());
|
||||
if (angle > 1e-3) glRotatef(angle, ax.x(), ax.y(), ax.z());
|
||||
gluCylinder(cylindre, length / aspect, length / aspect, 0.8 * length, 10, 10);
|
||||
gluCylinder(cylinder, length / aspect, length / aspect, 0.8 * length, 10, 10);
|
||||
glTranslatef(0.0, 0.0, 0.8 * length);
|
||||
gluCylinder(cylindre, 2.0 * length / aspect, 0.0, 0.2 * length, 10, 10);
|
||||
gluCylinder(cylinder, 2.0 * length / aspect, 0.0, 0.2 * length, 10, 10);
|
||||
|
||||
popMatrix(GL_MODELVIEW);
|
||||
}
|
||||
|
||||
void GpuHelper::drawVectorBox(const Vector3f& position, const Vector3f& vec, const Color& color, float aspect) {
|
||||
static GLUquadricObj* cylindre = gluNewQuadric();
|
||||
static GLUquadricObj* cylinder = gluNewQuadric();
|
||||
glColor4fv(color.data());
|
||||
float length = vec.norm();
|
||||
pushMatrix(GL_MODELVIEW);
|
||||
@ -76,7 +76,7 @@ void GpuHelper::drawVectorBox(const Vector3f& position, const Vector3f& vec, con
|
||||
tmp.normalize();
|
||||
float angle = 180.f / M_PI * acos(tmp.z());
|
||||
if (angle > 1e-3) glRotatef(angle, ax.x(), ax.y(), ax.z());
|
||||
gluCylinder(cylindre, length / aspect, length / aspect, 0.8 * length, 10, 10);
|
||||
gluCylinder(cylinder, length / aspect, length / aspect, 0.8 * length, 10, 10);
|
||||
glTranslatef(0.0, 0.0, 0.8 * length);
|
||||
glScalef(4.0 * length / aspect, 4.0 * length / aspect, 4.0 * length / aspect);
|
||||
drawUnitCube();
|
||||
|
@ -278,7 +278,7 @@ So internal::assign_selector takes 4 template parameters, but the 2 last ones ar
|
||||
|
||||
EvalBeforeAssigning is here to enforce the EvalBeforeAssigningBit. As explained <a href="TopicLazyEvaluation.html">here</a>, certain expressions have this flag which makes them automatically evaluate into temporaries before assigning them to another expression. This is the case of the Product expression, in order to avoid strange aliasing effects when doing "m = m * m;" However, of course here our CwiseBinaryOp expression doesn't have the EvalBeforeAssigningBit: we said since the beginning that we didn't want a temporary to be introduced here. So if you go to src/Core/CwiseBinaryOp.h, you'll see that the Flags in internal::traits\<CwiseBinaryOp\> don't include the EvalBeforeAssigningBit. The Flags member of CwiseBinaryOp is then imported from the internal::traits by the EIGEN_GENERIC_PUBLIC_INTERFACE macro. Anyway, here the template parameter EvalBeforeAssigning has the value \c false.
|
||||
|
||||
NeedToTranspose is here for the case where the user wants to copy a row-vector into a column-vector. We allow this as a special exception to the general rule that in assignments we require the dimesions to match. Anyway, here both the left-hand and right-hand sides are column vectors, in the sense that ColsAtCompileTime is equal to 1. So NeedToTranspose is \c false too.
|
||||
NeedToTranspose is here for the case where the user wants to copy a row-vector into a column-vector. We allow this as a special exception to the general rule that in assignments we require the dimensions to match. Anyway, here both the left-hand and right-hand sides are column vectors, in the sense that ColsAtCompileTime is equal to 1. So NeedToTranspose is \c false too.
|
||||
|
||||
So, here we are in the partial specialization:
|
||||
\code
|
||||
|
@ -29,7 +29,7 @@ are doing.
|
||||
initialized to zero, as are new entries in matrices and arrays after resizing. Not defined by default.
|
||||
\warning The unary (resp. binary) constructor of \c 1x1 (resp. \c 2x1 or \c 1x2) fixed size matrices is
|
||||
always interpreted as an initialization constructor where the argument(s) are the coefficient values
|
||||
and not the sizes. For instance, \code Vector2d v(2,1); \endcode will create a vector with coeficients [2,1],
|
||||
and not the sizes. For instance, \code Vector2d v(2,1); \endcode will create a vector with coefficients [2,1],
|
||||
and \b not a \c 2x1 vector initialized with zeros (i.e., [0,0]). If such cases might occur, then it is
|
||||
recommended to use the default constructor with a explicit call to resize:
|
||||
\code
|
||||
|
@ -89,7 +89,7 @@ Beyond the basic functions rows() and cols(), there are some useful functions th
|
||||
sm1.nonZeros(); // Number of non zero values
|
||||
sm1.outerSize(); // Number of columns (resp. rows) for a column major (resp. row major )
|
||||
sm1.innerSize(); // Number of rows (resp. columns) for a row major (resp. column major)
|
||||
sm1.norm(); // Euclidian norm of the matrix
|
||||
sm1.norm(); // Euclidean norm of the matrix
|
||||
sm1.squaredNorm(); // Squared norm of the matrix
|
||||
sm1.blueNorm();
|
||||
sm1.isVector(); // Check if sm1 is a sparse vector or a sparse matrix
|
||||
|
@ -70,9 +70,9 @@ There are other macros derived from EIGEN_STATIC_ASSERT to enhance readability.
|
||||
|
||||
- \b EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) - passes if \a TYPE is fixed size.
|
||||
- \b EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) - passes if \a TYPE is dynamic size.
|
||||
- \b EIGEN_STATIC_ASSERT_LVALUE(Derived) - failes if \a Derived is read-only.
|
||||
- \b EIGEN_STATIC_ASSERT_LVALUE(Derived) - fails if \a Derived is read-only.
|
||||
- \b EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) - passes if \a Derived is an array expression.
|
||||
- <b>EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2)</b> - failes if the two expressions are an array one and a matrix one.
|
||||
- <b>EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2)</b> - fails if the two expressions are an array one and a matrix one.
|
||||
|
||||
Because Eigen handles both fixed-size and dynamic-size expressions, some conditions cannot be clearly determined at compile time. We classify them into strict assertions and permissive assertions.
|
||||
|
||||
|
@ -360,7 +360,7 @@ $(document).ready(function() {
|
||||
|
||||
(function (){ // wait until the first "selected" element has been created
|
||||
try {
|
||||
// this line will triger an exception if there is no #selected element, i.e., before the tree structure is
|
||||
// this line will trigger an exception if there is no #selected element, i.e., before the tree structure is
|
||||
// complete.
|
||||
document.getElementById("selected").className = "item selected";
|
||||
|
||||
|
@ -52,7 +52,7 @@ cmd.exe /c "`"${VS_INSTALL_DIR}\VC\Auxiliary\Build\vcvarsall.bat`" $EIGEN_CI_MSV
|
||||
IF (-Not (Test-Path -Path $EIGEN_CI_BUILDDIR) ) { mkdir $EIGEN_CI_BUILDDIR }
|
||||
cd $EIGEN_CI_BUILDDIR
|
||||
|
||||
# We need to split EIGEN_CI_ADDITIONAL_ARGS, otherwise they are interpretted
|
||||
# We need to split EIGEN_CI_ADDITIONAL_ARGS, otherwise they are interpreted
|
||||
# as a single argument. Split by space, unless double-quoted.
|
||||
$split_args = [regex]::Split(${EIGEN_CI_ADDITIONAL_ARGS}, ' (?=(?:[^"]|"[^"]*")*$)' )
|
||||
cmake -G "${EIGEN_CI_CMAKE_GENERATOR}" -DCMAKE_BUILD_TYPE=MinSizeRel -DEIGEN_TEST_CUSTOM_CXX_FLAGS="${EIGEN_CI_TEST_CUSTOM_CXX_FLAGS}" ${split_args} "${EIGEN_CI_ROOTDIR}"
|
||||
|
@ -153,7 +153,7 @@ void block(const MatrixType& m) {
|
||||
VERIFY_IS_EQUAL(numext::real(ones.col(c1).dot(ones.col(c2))), RealScalar(rows));
|
||||
VERIFY_IS_EQUAL(numext::real(ones.row(r1).dot(ones.row(r2))), RealScalar(cols));
|
||||
|
||||
// check that linear acccessors works on blocks
|
||||
// check that linear accessors works on blocks
|
||||
m1 = m1_copy;
|
||||
|
||||
// now test some block-inside-of-block.
|
||||
|
@ -44,7 +44,7 @@ bool find_pivot(typename MatrixType::Scalar tol, MatrixType& diffs, Index col =
|
||||
|
||||
/* Check that two column vectors are approximately equal up to permutations.
|
||||
* Initially, this method checked that the k-th power sums are equal for all k = 1, ..., vec1.rows(),
|
||||
* however this strategy is numerically inacurate because of numerical cancellation issues.
|
||||
* however this strategy is numerically inaccurate because of numerical cancellation issues.
|
||||
*/
|
||||
template <typename VectorType>
|
||||
void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType& vec2) {
|
||||
|
@ -898,7 +898,7 @@ containing the real part of the complex values of the original tensor.
|
||||
|
||||
### (Operation) imag()
|
||||
|
||||
Returns a tensor with the same dimensions as the orginal tensor
|
||||
Returns a tensor with the same dimensions as the original tensor
|
||||
containing the imaginary part of the complex values of the original
|
||||
tensor.
|
||||
|
||||
@ -910,7 +910,7 @@ exponent.
|
||||
|
||||
The type of the exponent, Scalar, is always the same as the type of the
|
||||
tensor coefficients. For example, only integer exponents can be used in
|
||||
conjuntion with tensors of integer values.
|
||||
conjunction with tensors of integer values.
|
||||
|
||||
You can use cast() to lift this restriction. For example this computes
|
||||
cubic roots of an int Tensor:
|
||||
|
@ -104,10 +104,10 @@ struct TTPanelSize {
|
||||
static EIGEN_CONSTEXPR StorageIndex TileSizeDimM = LocalThreadSizeM * WorkLoadPerThreadM;
|
||||
// TileSizeDimN: determines the tile size for the n dimension
|
||||
static EIGEN_CONSTEXPR StorageIndex TileSizeDimN = LocalThreadSizeN * WorkLoadPerThreadN;
|
||||
// LoadPerThreadLhs: determines workload per thread for loading Lhs Tensor. This must be divisable by packetsize
|
||||
// LoadPerThreadLhs: determines workload per thread for loading Lhs Tensor. This must be divisible by packetsize
|
||||
static EIGEN_CONSTEXPR StorageIndex LoadPerThreadLhs =
|
||||
((TileSizeDimK * WorkLoadPerThreadM * WorkLoadPerThreadN) / (TileSizeDimN));
|
||||
// LoadPerThreadRhs: determines workload per thread for loading Rhs Tensor. This must be divisable by packetsize
|
||||
// LoadPerThreadRhs: determines workload per thread for loading Rhs Tensor. This must be divisible by packetsize
|
||||
static EIGEN_CONSTEXPR StorageIndex LoadPerThreadRhs =
|
||||
((TileSizeDimK * WorkLoadPerThreadM * WorkLoadPerThreadN) / (TileSizeDimM));
|
||||
// BC : determines if supporting bank conflict is required
|
||||
@ -674,7 +674,7 @@ class TensorContractionKernel {
|
||||
for (StorageIndex wLPTN = 0; wLPTN < Properties::WorkLoadPerThreadN / PrivateNStride; wLPTN++) {
|
||||
// output leading dimension
|
||||
StorageIndex outputLD = 0;
|
||||
// When local memory is used the PrivateNstride is always 1 because the coalesed access on N is loaded into Local
|
||||
// When local memory is used the PrivateNstride is always 1 because the coalesced access on N is loaded into Local
|
||||
// memory and extracting from local to global is the same as no transposed version. However, when local memory is
|
||||
// not used and RHS is transposed we packetize the load for RHS.
|
||||
EIGEN_UNROLL_LOOP
|
||||
@ -898,7 +898,7 @@ class TensorContractionKernel {
|
||||
EIGEN_CONSTEXPR StorageIndex LSD = InputBlockProperties::is_rhs ? LSDR : LSDL;
|
||||
static_assert(((LocalOffset % (TileSizeDimNC / InputBlockProperties::nc_stride) == 0) &&
|
||||
(LocalOffset % (Properties::TileSizeDimK / InputBlockProperties::c_stride) == 0)),
|
||||
" LocalOffset must be divisable by stride");
|
||||
" LocalOffset must be divisible by stride");
|
||||
const StorageIndex &NC = InputBlockProperties::is_rhs ? triple_dim.N : triple_dim.M;
|
||||
StorageIndex localThreadNC = local_index.first;
|
||||
StorageIndex localThreadC = local_index.second;
|
||||
|
@ -36,7 +36,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* constCast(const T* data) {
|
||||
// used for referring to a Pointer on TensorEvaluator class. While the TensorExpression
|
||||
// is a device-agnostic type and need MakePointer class for type conversion,
|
||||
// the TensorEvaluator class can be specialized for a device, hence it is possible
|
||||
// to construct different types of temproray storage memory in TensorEvaluator
|
||||
// to construct different types of temporary storage memory in TensorEvaluator
|
||||
// for different devices by specializing the following StorageMemory class.
|
||||
template <typename T, typename device>
|
||||
struct StorageMemory : MakePointer<T> {};
|
||||
|
@ -136,7 +136,7 @@ class UniformRandomGenerator {
|
||||
// thread but for SYCL ((CLOCK * 6364136223846793005ULL) + 0xda3e39cb94b95bdbULL) is passed to each thread and each
|
||||
// thread adds the (global_thread_id* 6364136223846793005ULL) for itself only once, in order to complete the
|
||||
// construction similar to CUDA Therefore, the thread Id injection is not available at this stage.
|
||||
// However when the operator() is called the thread ID will be available. So inside the opeator,
|
||||
// However when the operator() is called the thread ID will be available. So inside the operator,
|
||||
// we add the thrreadID, BlockId,... (which is equivalent of i)
|
||||
// to the seed and construct the unique m_state per thead similar to cuda.
|
||||
m_exec_once = false;
|
||||
|
@ -433,7 +433,7 @@ struct PartialReducerLauncher {
|
||||
EIGEN_CONSTEXPR Index localRange = PannelParameters::LocalThreadSizeP * PannelParameters::LocalThreadSizeR;
|
||||
// In this step, we force the code not to be more than 2-step reduction:
|
||||
// Our empirical research shows that if each thread reduces at least 64
|
||||
// elemnts individually, we get better performance. However, this can change
|
||||
// elements individually, we get better performance. However, this can change
|
||||
// on different platforms. In this step we force the code not to be
|
||||
// morthan step reduction: Our empirical research shows that for inner_most
|
||||
// dim reducer, it is better to have 8 group in a reduce dimension for sizes
|
||||
@ -495,7 +495,7 @@ struct FullReducer<Self, Op, Eigen::SyclDevice, Vectorizable> {
|
||||
typename Self::Index inputSize = self.impl().dimensions().TotalSize();
|
||||
// In this step we force the code not to be more than 2-step reduction:
|
||||
// Our empirical research shows that if each thread reduces at least 512
|
||||
// elemnts individually, we get better performance.
|
||||
// elements individually, we get better performance.
|
||||
const Index reductionPerThread = 2048;
|
||||
// const Index num_work_group =
|
||||
Index reductionGroup = dev.getPowerOfTwo(
|
||||
|
@ -275,7 +275,7 @@ const typename NNLS<MatrixType>::SolutionVectorType &NNLS<MatrixType>::solve(con
|
||||
const Index numActive = A_.cols() - numInactive_;
|
||||
Index argmaxGradient = -1;
|
||||
const Scalar maxGradient = gradient_(index_sets_.tail(numActive)).maxCoeff(&argmaxGradient);
|
||||
argmaxGradient += numInactive_; // beacause tail() skipped the first numInactive_ elements
|
||||
argmaxGradient += numInactive_; // because tail() skipped the first numInactive_ elements
|
||||
|
||||
if (maxGradient < tolerance_) {
|
||||
info_ = ComputationInfo::Success;
|
||||
@ -299,7 +299,7 @@ const typename NNLS<MatrixType>::SolutionVectorType &NNLS<MatrixType>::solve(con
|
||||
solveInactiveSet_(b);
|
||||
++iterations_; // The solve is expensive, so that is what we count as an iteration.
|
||||
|
||||
// Check feasability...
|
||||
// Check feasibility...
|
||||
bool feasible = true;
|
||||
Scalar alpha = NumTraits<Scalar>::highest();
|
||||
Index infeasibleIdx = -1; // Which variable became infeasible first.
|
||||
|
@ -70,7 +70,7 @@ bool bicgstabl(const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditio
|
||||
rHat.col(0) = rhs - mat * x0; // r_0
|
||||
|
||||
x.setZero(); // This will contain the updates to the solution.
|
||||
// rShadow is arbritary, but must never be orthogonal to any residual.
|
||||
// rShadow is arbitrary, but must never be orthogonal to any residual.
|
||||
VectorType rShadow = VectorType::Random(N);
|
||||
|
||||
VectorType x_prime = x;
|
||||
@ -313,7 +313,7 @@ class BiCGSTABL : public IterativeSolverBase<BiCGSTABL<MatrixType_, Precondition
|
||||
|
||||
/** \internal */
|
||||
/** Loops over the number of columns of b and does the following:
|
||||
1. sets the tolerence and maxIterations
|
||||
1. sets the tolerance and maxIterations
|
||||
2. Calls the function that has the core solver routine
|
||||
*/
|
||||
template <typename Rhs, typename Dest>
|
||||
|
@ -150,7 +150,7 @@ bool idrstabl(const MatrixType &mat, const Rhs &rhs, Dest &x, const Precondition
|
||||
without any additional MV.
|
||||
|
||||
Contrary to what one would suspect, the comparison with ==0.0 for
|
||||
floating-point types is intended here. Any arbritary non-zero u is fine
|
||||
floating-point types is intended here. Any arbitrary non-zero u is fine
|
||||
to continue, however if u contains either NaN or Inf the algorithm will
|
||||
break down.
|
||||
*/
|
||||
|
@ -33,7 +33,7 @@ namespace Eigen {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
class KahanSum {
|
||||
// Straighforward Kahan summation for accurate accumulation of a sum of numbers
|
||||
// Straightforward Kahan summation for accurate accumulation of a sum of numbers
|
||||
Scalar _sum{};
|
||||
Scalar _correction{};
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
using std::sqrt;
|
||||
|
||||
// tolerance for chekcing number of iterations
|
||||
// tolerance for checking number of iterations
|
||||
#define LM_EVAL_COUNT_TOL 2
|
||||
|
||||
struct lmder_functor : DenseFunctor<double> {
|
||||
|
@ -37,5 +37,5 @@ void test_minres_T() {
|
||||
|
||||
EIGEN_DECLARE_TEST(minres) {
|
||||
CALL_SUBTEST_1(test_minres_T<double>());
|
||||
// CALL_SUBTEST_2(test_minres_T<std::compex<double> >());
|
||||
// CALL_SUBTEST_2(test_minres_T<std::complex<double> >());
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ void openglsupport_test_loop() {
|
||||
std::cerr << "GL version: " << gl_version_string << std::endl;
|
||||
std::cerr << "GLSL version: " << glGetString(GL_SHADING_LANGUAGE_VERSION) << std::endl;
|
||||
// Parse version from string since GL_MAJOR_VERSION is only supported in GL 3.0+.
|
||||
// Version string guaranteed to be <major>.<minor><vender extension>.
|
||||
// Version string guaranteed to be <major>.<minor><vendor extension>.
|
||||
GLint gl_major_version = gl_version_string[0] - '0';
|
||||
GLint gl_minor_version = gl_version_string[2] - '0';
|
||||
bool legacy_gl = gl_major_version < 3 || (gl_major_version == 3 && gl_minor_version < 2);
|
||||
|
Loading…
x
Reference in New Issue
Block a user