I have the following example:
#include <unsupported/Eigen/CXX11/Tensor>
#include <Eigen/Core>
Eigen::Tensor<double, 5> test_tensor(3,3,2,1,1);
test_tensor.setValues({
{{{{1.1}},{{1.1}}},{{{0}},{{0}}},{{{0}},{{0}}}},
{{{{0}}, {{0}}}, {{{1}},{{1}}},{{{0}},{{0}}}},
{{{{0}}, {{0}}}, {{{0}},{{0}}},{{{1}},{{1}}}}
});
//use chip and slice to compute the subtensor sum
template <typename TensorType>
auto tensor_sum(const TensorType& tensor) -> typename TensorType::Scalar {
using T = typename TensorType::Scalar;
T sum = 0; //provisory
for (int i = 0; i < tensor.size(); ++i) {
sum += tensor.data()[i];
}
return sum;
}
Eigen::Tensor<double, 3> field_slice;
for (int l = 0; l < 3; ++l) {
for (int m = 0; m < 3; ++m) {
auto field_slice_ = test_tensor.chip(1, m).chip(0, l);
field_slice = field_slice_.slice(
Eigen::array<Eigen::Index, 3>({0, 0, 0}),
Eigen::array<Eigen::Index, 3>({2, 1, 1}));
std::cout << "sum slice " << l << " " << m << " " << tensor_sum(field_slice) << std::endl;
}
}
// use nested loops to compute subtensor sum
double sum;
for (int l = 0; l < 3; ++l) {
for (int m = 0; m < 3; ++m) {
sum = 0;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 1; ++j) {
for (int k = 0; k < 1; ++k) {
sum += test_tensor(l, m, i, j, k);
}
}
}
std::cout << "sum nested loops " << l << " " << m << " " << sum << std::endl;
}
}
Which prints out
sum slice 0 0 0
sum slice 0 1 0
sum slice 0 2 1.1
sum slice 1 0 1
sum slice 1 1 1
sum slice 1 2 1.1
sum slice 2 0 1
sum slice 2 1 1
sum slice 2 2 1.1
sum nested loops 0 0 2.2
sum nested loops 0 1 0
sum nested loops 0 2 0
sum nested loops 1 0 0
sum nested loops 1 1 2
sum nested loops 1 2 0
sum nested loops 2 0 0
sum nested loops 2 1 0
sum nested loops 2 2 2
Why are the results different? I suspect that the chip
or slice
operation isn't working the way it's supposed to. Separating the two steps by first storing the chipped tensor and then creating the slice also didn't change the result. How can I compute the sum of my subtensor without using nested loops?
You are indexing in wrong way here:
auto field_slice_ = test_tensor.chip(1, m).chip(0, l);
Please read documentation: Eigen-unsupported: Eigen Tensors
<Operation> chip(const Index offset, const Index dim)
A chip is a special kind of slice. It is the subtensor at the given offset in the dimension dim. The returned tensor has one fewer dimension than the input tensor: the dimension dim is removed.
For example, a matrix chip would be either a row or a column of the input matrix.
So basically you feeded arguments in wrong order.
auto field_slice_ = test_tensor.chip(m, 1).chip(l, 0);
or same result:
auto field_slice_ = test_tensor.chip(m, 0).chip(l, 0);