Remove internal::smart_copy and replace with std::copy

This commit is contained in:
Eugene Zhulenev 2019-10-29 11:25:24 -07:00
parent fbc0a9a3ec
commit e7ed4bd388
7 changed files with 28 additions and 55 deletions

View File

@ -369,7 +369,7 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
, m_cols(other.m_cols)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*m_cols)
internal::smart_copy(other.m_data, other.m_data+other.m_rows*other.m_cols, m_data);
std::copy(other.m_data, other.m_data+other.m_rows*other.m_cols, m_data);
}
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{
@ -452,7 +452,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
, m_cols(other.m_cols)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_cols*_Rows)
internal::smart_copy(other.m_data, other.m_data+_Rows*m_cols, m_data);
std::copy(other.m_data, other.m_data+_Rows*m_cols, m_data);
}
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{
@ -528,7 +528,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
, m_rows(other.m_rows)
{
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*_Cols)
internal::smart_copy(other.m_data, other.m_data+other.m_rows*_Cols, m_data);
std::copy(other.m_data, other.m_data+other.m_rows*_Cols, m_data);
}
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{

View File

@ -507,31 +507,6 @@ inline Index first_multiple(Index size, Index base)
return ((size+base-1)/base)*base;
}
// std::copy is much slower than memcpy, so let's introduce a smart_copy which
// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
template<typename T, bool UseMemcpy> struct smart_copy_helper;
template<typename T> EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T* end, T* target)
{
smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
}
template<typename T> struct smart_copy_helper<T,true> {
EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)
{
IntPtr size = IntPtr(end)-IntPtr(start);
if(size==0) return;
eigen_internal_assert(start!=0 && end!=0 && target!=0);
EIGEN_USING_STD(memcpy)
memcpy(target, start, size);
}
};
template<typename T> struct smart_copy_helper<T,false> {
EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)
{ std::copy(start, end, target); }
};
// intelligent memmove. falls back to std::memmove for POD types, uses std::copy otherwise.
template<typename T, bool UseMemmove> struct smart_memmove_helper;

View File

@ -53,8 +53,8 @@ class CompressedStorage
resize(other.size());
if(other.size()>0)
{
internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
std::copy(other.m_values, other.m_values + m_size, m_values);
std::copy(other.m_indices, other.m_indices + m_size, m_indices);
}
return *this;
}
@ -183,14 +183,14 @@ class CompressedStorage
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
// copy first chunk
internal::smart_copy(m_values, m_values +id, newValues.ptr());
internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
std::copy(m_values, m_values +id, newValues.ptr());
std::copy(m_indices, m_indices+id, newIndices.ptr());
// copy the rest
if(m_size>id)
{
internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
std::copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
std::copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
}
std::swap(m_values,newValues.ptr());
std::swap(m_indices,newIndices.ptr());
@ -218,8 +218,8 @@ class CompressedStorage
}
else
{
internal::smart_copy(m_values+from, m_values+from+chunkSize, m_values+to);
internal::smart_copy(m_indices+from, m_indices+from+chunkSize, m_indices+to);
std::copy(m_values+from, m_values+from+chunkSize, m_values+to);
std::copy(m_indices+from, m_indices+from+chunkSize, m_indices+to);
}
}
@ -251,8 +251,8 @@ class CompressedStorage
internal::scoped_array<StorageIndex> newIndices(size);
Index copySize = (std::min)(size, m_size);
if (copySize>0) {
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
std::copy(m_values, m_values+copySize, newValues.ptr());
std::copy(m_indices, m_indices+copySize, newIndices.ptr());
}
std::swap(m_values,newValues.ptr());
std::swap(m_indices,newIndices.ptr());

View File

@ -147,14 +147,14 @@ public:
// realloc manually to reduce copies
typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
std::copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
std::copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
std::copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
std::copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
std::copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
std::copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
@ -175,8 +175,8 @@ public:
update_trailing_pointers = true;
}
internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
std::copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
std::copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
}
// update outer index pointers and innerNonZeros

View File

@ -767,7 +767,7 @@ class SparseMatrix
initAssignment(other);
if(other.isCompressed())
{
internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
std::copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
m_data = other.m_data;
}
else
@ -982,8 +982,8 @@ protected:
{
Index i = newEntries[k].i;
Index p = newEntries[k].p;
internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
std::copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
std::copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
for(Index j=prev_i;j<i;++j)
m_outerIndex[j+1] += k;
if(!isComp)
@ -995,8 +995,8 @@ protected:
assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
}
{
internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
std::copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
std::copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
for(Index j=prev_i+1;j<=m_outerSize;++j)
m_outerIndex[j] += n_entries;
}

View File

@ -96,7 +96,7 @@ class TensorStorage<T, DSizes<IndexType, NumIndices_>, Options_>
: m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(other.m_dimensions)))
, m_dimensions(other.m_dimensions)
{
internal::smart_copy(other.m_data, other.m_data+internal::array_prod(other.m_dimensions), m_data);
std::copy(other.m_data, other.m_data+internal::array_prod(other.m_dimensions), m_data);
}
EIGEN_DEVICE_FUNC Self& operator=(const Self& other)
{

View File

@ -67,13 +67,11 @@ class array : public std::array<T, N> {
: Base{{v1, v2, v3, v4, v5, v6, v7, v8}} {
EIGEN_STATIC_ASSERT(N == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(std::initializer_list<T> l) {
eigen_assert(l.size() == N);
internal::smart_copy(l.begin(), l.end(), &this->front());
std::copy(l.begin(), l.end(), &this->front());
}
#endif
};
namespace internal {