mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-12 19:20:36 +08:00
Fix various asan errors.
This commit is contained in:
parent
a2cf99ec6f
commit
3026f1f296
@ -275,7 +275,7 @@ inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i) {
|
||||
template <typename MatrixType>
|
||||
typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter) {
|
||||
using std::abs;
|
||||
if (iter == 10 || iter == 20) {
|
||||
if ((iter == 10 || iter == 20) && iu > 1) {
|
||||
// exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
|
||||
return abs(numext::real(m_matT.coeff(iu, iu - 1))) + abs(numext::real(m_matT.coeff(iu - 1, iu - 2)));
|
||||
}
|
||||
|
@ -112,53 +112,56 @@ static void test_cancel() {
|
||||
|
||||
static void test_pool_partitions() {
|
||||
const int kThreads = 2;
|
||||
ThreadPool tp(kThreads);
|
||||
|
||||
// Assign each thread to its own partition, so that stealing other work only
|
||||
// occurs globally when a thread is idle.
|
||||
std::vector<std::pair<unsigned, unsigned>> steal_partitions(kThreads);
|
||||
for (int i = 0; i < kThreads; ++i) {
|
||||
steal_partitions[i] = std::make_pair(i, i + 1);
|
||||
}
|
||||
tp.SetStealPartitions(steal_partitions);
|
||||
|
||||
std::atomic<int> running(0);
|
||||
std::atomic<int> done(0);
|
||||
std::atomic<int> phase(0);
|
||||
|
||||
// Schedule kThreads tasks and ensure that they all are running.
|
||||
for (int i = 0; i < kThreads; ++i) {
|
||||
tp.Schedule([&]() {
|
||||
const int thread_id = tp.CurrentThreadId();
|
||||
VERIFY_GE(thread_id, 0);
|
||||
VERIFY_LE(thread_id, kThreads - 1);
|
||||
++running;
|
||||
while (phase < 1) {
|
||||
}
|
||||
++done;
|
||||
});
|
||||
{
|
||||
ThreadPool tp(kThreads);
|
||||
|
||||
// Assign each thread to its own partition, so that stealing other work only
|
||||
// occurs globally when a thread is idle.
|
||||
std::vector<std::pair<unsigned, unsigned>> steal_partitions(kThreads);
|
||||
for (int i = 0; i < kThreads; ++i) {
|
||||
steal_partitions[i] = std::make_pair(i, i + 1);
|
||||
}
|
||||
tp.SetStealPartitions(steal_partitions);
|
||||
|
||||
// Schedule kThreads tasks and ensure that they all are running.
|
||||
for (int i = 0; i < kThreads; ++i) {
|
||||
tp.Schedule([&]() {
|
||||
const int thread_id = tp.CurrentThreadId();
|
||||
VERIFY_GE(thread_id, 0);
|
||||
VERIFY_LE(thread_id, kThreads - 1);
|
||||
++running;
|
||||
while (phase < 1) {
|
||||
}
|
||||
++done;
|
||||
});
|
||||
}
|
||||
while (running != kThreads) {
|
||||
}
|
||||
// Schedule each closure to only run on thread 'i' and verify that it does.
|
||||
for (int i = 0; i < kThreads; ++i) {
|
||||
tp.ScheduleWithHint(
|
||||
[&, i]() {
|
||||
++running;
|
||||
const int thread_id = tp.CurrentThreadId();
|
||||
VERIFY_IS_EQUAL(thread_id, i);
|
||||
while (phase < 2) {
|
||||
}
|
||||
++done;
|
||||
},
|
||||
i, i + 1);
|
||||
}
|
||||
running = 0;
|
||||
phase = 1;
|
||||
while (running != kThreads) {
|
||||
}
|
||||
running = 0;
|
||||
phase = 2;
|
||||
}
|
||||
while (running != kThreads) {
|
||||
}
|
||||
// Schedule each closure to only run on thread 'i' and verify that it does.
|
||||
for (int i = 0; i < kThreads; ++i) {
|
||||
tp.ScheduleWithHint(
|
||||
[&, i]() {
|
||||
++running;
|
||||
const int thread_id = tp.CurrentThreadId();
|
||||
VERIFY_IS_EQUAL(thread_id, i);
|
||||
while (phase < 2) {
|
||||
}
|
||||
++done;
|
||||
},
|
||||
i, i + 1);
|
||||
}
|
||||
running = 0;
|
||||
phase = 1;
|
||||
while (running != kThreads) {
|
||||
}
|
||||
running = 0;
|
||||
phase = 2;
|
||||
}
|
||||
|
||||
EIGEN_DECLARE_TEST(cxx11_non_blocking_thread_pool) {
|
||||
|
@ -126,10 +126,16 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType_>, Device> {
|
||||
TensorEvaluator(const XprType& op, const Device& device)
|
||||
: m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL) {}
|
||||
|
||||
~TensorEvaluator() { cleanup(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
|
||||
|
||||
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) {
|
||||
const Index numValues = internal::array_prod(m_impl.dimensions());
|
||||
|
||||
if (m_buffer != nullptr) {
|
||||
m_device.deallocate_temp(m_buffer);
|
||||
}
|
||||
m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType)));
|
||||
|
||||
internal::non_integral_type_placement_new<Device, CoeffReturnType>()(numValues, m_buffer);
|
||||
@ -148,6 +154,9 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType_>, Device> {
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType, EvalSubExprsCallback done) {
|
||||
const Index numValues = internal::array_prod(m_impl.dimensions());
|
||||
if (m_buffer != nullptr) {
|
||||
m_device.deallocate_temp(m_buffer);
|
||||
}
|
||||
m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType)));
|
||||
typedef TensorEvalToOp<const std::remove_const_t<ArgType>> EvalTo;
|
||||
EvalTo evalToTmp(m_device.get(m_buffer), m_op);
|
||||
|
@ -309,6 +309,7 @@ bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sy
|
||||
out << header << std::endl;
|
||||
out << mat.rows() << " " << mat.cols() << " " << mat.nonZeros() << "\n";
|
||||
int count = 0;
|
||||
EIGEN_UNUSED_VARIABLE(count);
|
||||
for (int j = 0; j < mat.outerSize(); ++j)
|
||||
for (typename SparseMatrixType::InnerIterator it(mat, j); it; ++it) {
|
||||
++count;
|
||||
|
Loading…
x
Reference in New Issue
Block a user