Core numerics¶
The axom::numerics
namespace was designed for convenient representation
and use of matrices and vectors, with accompanying manipulation and solver
routines.
The following example shows some basic vector operations.
namespace numerics = axom::numerics;
// First and second vectors
double u[] = {4., 1., 0.};
double v[] = {1., 2., 3.};
double w[] = {0., 0., 0.};
std::cout << std::endl;
std::cout << "Originally, u and v are" << std::endl
<< "u = [" << u[0] << ", " << u[1] << ", " << u[2] << "]" << std::endl
<< "v = [" << v[0] << ", " << v[1] << ", " << v[2] << "]"
<< std::endl;
std::cout << std::endl;
// Calculate dot and cross products
double dotprod = numerics::dot_product(u, v, 3);
numerics::cross_product(u, v, w);
std::cout << "The dot product is " << dotprod << " and the cross product is"
<< std::endl
<< "[" << w[0] << ", " << w[1] << ", " << w[2] << "]" << std::endl;
std::cout << std::endl;
// Make u orthogonal to v, then normalize v
numerics::make_orthogonal(u, v, 3);
numerics::normalize(v, 3);
std::cout << "Now orthogonal u and normalized v are" << std::endl
<< "u = [" << u[0] << ", " << u[1] << ", " << u[2] << "]" << std::endl
<< "v = [" << v[0] << ", " << v[1] << ", " << v[2] << "]"
<< std::endl;
std::cout << std::endl;
// Fill a linear space
const int lincount = 45;
double s[lincount];
numerics::linspace(1., 9., s, lincount);
// Find the real roots of a cubic equation.
// (x + 2)(x - 1)(2x - 3) = 0 = 2x^3 - x^2 - 7x + 6 has real roots at
// x = -2, x = 1, x = 1.5.
double coeff[] = {6., -7., -1., 2.};
double roots[3];
int numRoots;
int result = numerics::solve_cubic(coeff, roots, numRoots);
std::cout << "Root-finding returned " << result
<< " (should be 0, success)."
" Found "
<< numRoots << " roots (should be 3)" << std::endl
<< "at x = " << roots[0] << ", " << roots[1] << ", " << roots[2]
<< " (should be x = -2, 1, 1.5 in arbitrary order)." << std::endl;
When run, the example produces the following output:
Originally, u and v are
u = [4, 1, 0]
v = [1, 2, 3]
The dot product is 6 and the cross product is
[3, -12, 7]
Now orthogonal u and normalized v are
u = [3.57143, 0.142857, -1.28571]
v = [0.267261, 0.534522, 0.801784]
Root-finding returned 0 (should be 0, success). Found 3 roots (should be 3)
at x = 1.5, 1, -2 (should be x = -2, 1, 1.5 in arbitrary order).
The following example code shows how to construct a matrix.
namespace numerics = axom::numerics;
// Here's a 3X3 matrix of double values, initialized from an array.
const int nrows = 3;
const int ncols = 3;
double val[9] = {0.6, 2.4, 1.1, 2.4, 0.6, -.1, 1.1, -.1, 0.6};
numerics::Matrix<double> A(nrows, ncols, val, true);
// We'll make a 3X3 identity matrix.
// The third argument specifies the value to fill the matrix.
numerics::Matrix<double> m(nrows, ncols, 0.);
m.fillDiagonal(1.);
We can add and multiply matrices, vectors, and scalars, find the determinant, and extract upper and lower triangular matrices as is shown in the next example.
std::cout << "Originally, the matrix A = " << std::endl << A << std::endl;
// Multiply, add matrices
numerics::Matrix<double> result(nrows, ncols, 0.);
numerics::matrix_add(A, m, result);
std::cout << "A + identity matrix = " << std::endl << result << std::endl;
numerics::matrix_scalar_multiply(m, 2.);
numerics::matrix_multiply(A, m, result);
std::cout << "A * 2*(identity matrix) = " << std::endl << result << std::endl;
double x1[3] = {1., 2., -.5};
double b1[3];
std::cout << "Vector x1 = [" << x1[0] << ", " << x1[1] << ", " << x1[2] << "]"
<< std::endl;
numerics::matrix_vector_multiply(A, x1, b1);
std::cout << "A * x1 = [" << b1[0] << ", " << b1[1] << ", " << b1[2] << "]"
<< std::endl;
std::cout << std::endl;
// Calculate determinant
std::cout << "Determinant of A = " << numerics::determinant(A) << std::endl;
// Get lower, upper triangle.
// By default the diagonal entries are copied from A, but you can get the
// identity vector main diagonal entries by passing true as the second
// argument.
numerics::Matrix<double> ltri = lower_triangular(A);
numerics::Matrix<double> utri = upper_triangular(A, true);
std::cout << "A's lower triangle = " << std::endl << ltri << std::endl;
std::cout << "A's upper triangle (with 1s in the main diagonal) = " << std::endl
<< utri << std::endl;
// Get a column from the matrix.
double* col1 = A.getColumn(1);
std::cout << "A's column 1 is [" << col1[0] << ", " << col1[1] << ", "
<< col1[2] << "]" << std::endl;
The example generates the following output:
Originally, the matrix A =
[ 0.6 2.4 1.1 ]
[ 2.4 0.6 -0.1 ]
[ 1.1 -0.1 0.6 ]
A + identity matrix =
[ 1.6 2.4 1.1 ]
[ 2.4 1.6 -0.1 ]
[ 1.1 -0.1 1.6 ]
A * 2*(identity matrix) =
[ 1.2 4.8 2.2 ]
[ 4.8 1.2 -0.2 ]
[ 2.2 -0.2 1.2 ]
Vector x1 = [1, 2, -0.5]
A * x1 = [4.85, 3.65, 0.6]
Determinant of A = -4.5
A's lower triangle =
[ 0.6 0 0 ]
[ 2.4 0.6 0 ]
[ 1.1 -0.1 0.6 ]
A's upper triangle (with 1s in the main diagonal) =
[ 1 2.4 1.1 ]
[ 0 1 -0.1 ]
[ 0 0 1 ]
A's column 1 is [2.4, 0.6, -0.1]
We can also extract rows and columns. The preceding example shows how to get a column. Since the underlying storage layout of Matrix is column-based, retrieving a row is a little more involved: the call to getRow() retrieves the stride for accessing row elements p as well the upper bound for element indexes in the row. The next selection shows how to sum the entries in a row.
IndexType p = 0;
IndexType N = 0;
const T* row = A.getRow(i, p, N);
T row_sum = 0.0;
for(IndexType j = 0; j < N; j += p)
{
row_sum += utilities::abs(row[j]);
} // END for all columns
We can use the power method or the Jacobi method to find the eigenvalues and vectors of a matrix. The power method is a stochastic algorithm, computing many matrix-vector multiplications to produce approximations of a matrix’s eigenvalues and vectors. The Jacobi method is also an iterative algorithm, but it is not stochastic, and tends to converge much more quickly and stably than other methods. However, the Jacobi method is only applicable to symmetric matrices. In the following snippet, we show both the power method and the Jacobi method to show that they get the same answer.
Note
As of August 2020, the API of eigen_solve
is not consistent
with jacobi_eigensolve
(eigen_solve
takes a double
pointer as
input instead of a Matrix
and the return codes differ). This is an
issue we’re fixing.
// Solve for eigenvectors and values using the power method
// The power method calls rand(), so we need to initialize it with srand().
std::srand(std::time(nullptr));
double eigvec[nrows * ncols];
double eigval[nrows];
int res = numerics::eigen_solve(A, nrows, eigvec, eigval);
std::cout << "Tried to find " << nrows
<< " eigenvectors and values for"
" matrix "
<< std::endl
<< A << std::endl
<< "and the result code was " << res << " (1 = success)." << std::endl
<< std::endl;
if(res > 0)
{
for(int i = 0; i < nrows; ++i)
{
display_eigs(eigvec, eigval, nrows, i);
}
}
std::cout << std::endl;
// Solve for eigenvectors and values using the Jacobi method.
numerics::Matrix<double> evecs(nrows, ncols);
res = numerics::jacobi_eigensolve(A, evecs, eigval);
std::cout << "Using the Jacobi method, tried to find eigenvectors and "
"eigenvalues of matrix "
<< std::endl
<< A << std::endl
<< "and the result code was " << res << " ("
<< numerics::JACOBI_EIGENSOLVE_SUCCESS << " = success)." << std::endl
<< std::endl;
if(res == numerics::JACOBI_EIGENSOLVE_SUCCESS)
{
for(int i = 0; i < nrows; ++i)
{
display_eigs(evecs, eigval, i);
}
}
Here is the output of the code example:
Tried to find 3 eigenvectors and values for matrix
[ 0.6 2.4 1.1 ]
[ 2.4 0.6 -0.1 ]
[ 1.1 -0.1 0.6 ]
and the result code was 1 (1 = success).
Eigenvalue 0 = 3.2033 Eigenvector 0 = [0.711931, 0.645731, 0.276015]
Eigenvalue 1 = -2.07901 Eigenvector 1 = [0.701812, -0.64037, -0.312067]
Eigenvalue 2 = 0.675707 Eigenvector 2 = [0.0247596, -0.415881, 0.909082]
Using the Jacobi method, tried to find eigenvectors and eigenvalues of matrix
[ 0.6 2.4 1.1 ]
[ 2.4 0.6 -0.1 ]
[ 1.1 -0.1 0.6 ]
and the result code was 0 (0 = success).
Eigenvalue 0 = -2.07901 Eigenvector 0 = [0.701812, -0.64037, -0.312067]
Eigenvalue 1 = 0.675707 Eigenvector 1 = [0.0247596, -0.415881, 0.909082]
Eigenvalue 2 = 3.2033 Eigenvector 2 = [0.711931, 0.645731, 0.276015]
We can also solve a linear system directly or by using LU decomposition and back-substitution, as shown in the next example.
{
// Solve a linear system Ax = b
numerics::Matrix<double> A(nrows, ncols);
A(0, 0) = 1;
A(0, 1) = 2;
A(0, 2) = 4;
A(1, 0) = 3;
A(1, 1) = 8;
A(1, 2) = 14;
A(2, 0) = 2;
A(2, 1) = 6;
A(2, 2) = 13;
double b[3] = {3., 13., 4.};
double x[3];
int rc = numerics::linear_solve(A, b, x);
std::cout << "Solved for x in the linear system Ax = b, where" << std::endl
<< "A = " << std::endl
<< A << " and b = [" << b[0] << ", " << b[1] << ", " << b[2]
<< "]." << std::endl
<< std::endl
<< "Result code is " << rc << " (0 = success)" << std::endl;
if(rc == 0)
{
std::cout << "Found x = [" << x[0] << ", " << x[1] << ", " << x[2] << "]"
<< std::endl;
}
}
std::cout << std::endl;
{
// Solve a linear system Ax = b using LU decomposition and back-substitution
numerics::Matrix<double> A(nrows, ncols);
A(0, 0) = 1;
A(0, 1) = 2;
A(0, 2) = 4;
A(1, 0) = 3;
A(1, 1) = 8;
A(1, 2) = 14;
A(2, 0) = 2;
A(2, 1) = 6;
A(2, 2) = 13;
double b[3] = {3., 13., 4.};
double x[3];
int pivots[3];
int rc = numerics::lu_decompose(A, pivots);
std::cout << "Decomposed to " << std::endl
<< A << " with pivots [" << pivots[0] << ", " << pivots[1] << ", "
<< pivots[2] << "]"
<< " with result " << rc << " (" << numerics::LU_SUCCESS
<< " is success)" << std::endl;
rc = numerics::lu_solve(A, pivots, b, x);
if(rc == numerics::LU_SUCCESS)
{
std::cout << "Found x = [" << x[0] << ", " << x[1] << ", " << x[2] << "]"
<< std::endl;
}
}
The example produces the following output:
Solved for x in the linear system Ax = b, where
A =
[ 3 2.66667 4.66667 ]
[ 2 0.666667 5.5 ]
[ 1 -0.666667 3 ]
and b = [3, 13, 4].
Result code is 0 (0 = success)
Found x = [3, 4, -2]
Decomposed to
[ 3 2.66667 4.66667 ]
[ 2 0.666667 5.5 ]
[ 1 -0.666667 3 ]
with pivots [1, 2, 2] with result 0 (0 is success)
Found x = [3, 4, -2]