Struct libceed::operator::Operator [−][src]
Implementations
impl<'a> Operator<'a>
[src]
pub fn create<'b>(
ceed: &'a Ceed,
qf: impl Into<QFunctionOpt<'b>>,
dqf: impl Into<QFunctionOpt<'b>>,
dqfT: impl Into<QFunctionOpt<'b>>
) -> Result<Self, CeedError>
[src]
ceed: &'a Ceed,
qf: impl Into<QFunctionOpt<'b>>,
dqf: impl Into<QFunctionOpt<'b>>,
dqfT: impl Into<QFunctionOpt<'b>>
) -> Result<Self, CeedError>
pub fn apply(
&self,
input: &Vector<'_>,
output: &mut Vector<'_>
) -> Result<i32, CeedError>
[src]
&self,
input: &Vector<'_>,
output: &mut Vector<'_>
) -> Result<i32, CeedError>
Apply Operator to a vector
input
- Input Vectoroutput
- Output Vector
let ne = 4; let p = 3; let q = 4; let ndofs = p * ne - ne + 1; // Vectors let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let u = ceed.vector_from_slice(&vec![1.0; ndofs]).unwrap(); let mut v = ceed.vector(ndofs).unwrap(); v.set_value(0.0); // Restrictions let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu: Vec<i32> = vec![0; p * ne]; for i in 0..ne { indu[p * i + 0] = i as i32; indu[p * i + 1] = (i + 1) as i32; indu[p * i + 2] = (i + 2) as i32; } let ru = ceed .elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu) .unwrap(); let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu = ceed .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap(); let op_mass = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru, &bu, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru, &bu, VectorOpt::Active) .unwrap(); v.set_value(0.0); op_mass.apply(&u, &mut v).unwrap(); // Check let sum: f64 = v.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" );
pub fn apply_add(
&self,
input: &Vector<'_>,
output: &mut Vector<'_>
) -> Result<i32, CeedError>
[src]
&self,
input: &Vector<'_>,
output: &mut Vector<'_>
) -> Result<i32, CeedError>
Apply Operator to a vector and add result to output vector
input
- Input Vectoroutput
- Output Vector
let ne = 4; let p = 3; let q = 4; let ndofs = p * ne - ne + 1; // Vectors let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let u = ceed.vector_from_slice(&vec![1.0; ndofs]).unwrap(); let mut v = ceed.vector(ndofs).unwrap(); // Restrictions let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu: Vec<i32> = vec![0; p * ne]; for i in 0..ne { indu[p * i + 0] = i as i32; indu[p * i + 1] = (i + 1) as i32; indu[p * i + 2] = (i + 2) as i32; } let ru = ceed .elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu) .unwrap(); let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu = ceed .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap(); let op_mass = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru, &bu, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru, &bu, VectorOpt::Active) .unwrap(); v.set_value(1.0); op_mass.apply_add(&u, &mut v).unwrap(); // Check let sum: f64 = v.view().iter().sum(); assert!( (sum - (2.0 + ndofs as f64)).abs() < 1e-15, "Incorrect interval length computed and added" );
pub fn field<'b>(
self,
fieldname: &str,
r: impl Into<ElemRestrictionOpt<'b>>,
b: impl Into<BasisOpt<'b>>,
v: impl Into<VectorOpt<'b>>
) -> Result<Self, CeedError>
[src]
self,
fieldname: &str,
r: impl Into<ElemRestrictionOpt<'b>>,
b: impl Into<BasisOpt<'b>>,
v: impl Into<VectorOpt<'b>>
) -> Result<Self, CeedError>
Provide a field to a Operator for use by its QFunction
fieldname
- Name of the field (to be matched with the name used by the QFunction)r
- ElemRestrictionb
- Basis in which the field resides orBasisCollocated
if collocated with quadrature pointsv
- Vector to be used by Operator orVectorActive
if field is active orVectorNone
if usingWeight
with the QFunction
let qf = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); let mut op = ceed .operator(&qf, QFunctionOpt::None, QFunctionOpt::None) .unwrap(); // Operator field arguments let ne = 3; let q = 4; let mut ind: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { ind[2 * i + 0] = i as i32; ind[2 * i + 1] = (i + 1) as i32; } let r = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind) .unwrap(); let b = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); // Operator field op = op.field("dx", &r, &b, VectorOpt::Active).unwrap();
pub fn linear_assemble_diagonal(
&self,
assembled: &mut Vector<'_>
) -> Result<i32, CeedError>
[src]
&self,
assembled: &mut Vector<'_>
) -> Result<i32, CeedError>
Assemble the diagonal of a square linear Operator
This overwrites a Vector with the diagonal of a linear Operator.
Note: Currently only non-composite Operators with a single field and composite Operators with single field sub-operators are supported.
op
- Operator to assemble QFunctionassembled
- Vector to store assembled Operator diagonal
let ne = 4; let p = 3; let q = 4; let ndofs = p * ne - ne + 1; // Vectors let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let mut u = ceed.vector(ndofs).unwrap(); u.set_value(1.0); let mut v = ceed.vector(ndofs).unwrap(); v.set_value(0.0); // Restrictions let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu: Vec<i32> = vec![0; p * ne]; for i in 0..ne { indu[p * i + 0] = (2 * i) as i32; indu[p * i + 1] = (2 * i + 1) as i32; indu[p * i + 2] = (2 * i + 2) as i32; } let ru = ceed .elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu) .unwrap(); let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu = ceed .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap(); let op_mass = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru, &bu, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru, &bu, VectorOpt::Active) .unwrap(); // Diagonal let mut diag = ceed.vector(ndofs).unwrap(); diag.set_value(0.0); op_mass.linear_assemble_diagonal(&mut diag).unwrap(); // Manual diagonal computation let mut true_diag = ceed.vector(ndofs).unwrap(); for i in 0..ndofs { u.set_value(0.0); { let mut u_array = u.view_mut(); u_array[i] = 1.; } op_mass.apply(&u, &mut v).unwrap(); { let v_array = v.view_mut(); let mut true_array = true_diag.view_mut(); true_array[i] = v_array[i]; } } // Check diag.view() .iter() .zip(true_diag.view().iter()) .for_each(|(computed, actual)| { assert!( (*computed - *actual).abs() < 1e-15, "Diagonal entry incorrect" ); });
pub fn linear_assemble_add_diagonal(
&self,
assembled: &mut Vector<'_>
) -> Result<i32, CeedError>
[src]
&self,
assembled: &mut Vector<'_>
) -> Result<i32, CeedError>
Assemble the diagonal of a square linear Operator
This sums into a Vector with the diagonal of a linear Operator.
Note: Currently only non-composite Operators with a single field and composite Operators with single field sub-operators are supported.
op
- Operator to assemble QFunctionassembled
- Vector to store assembled Operator diagonal
let ne = 4; let p = 3; let q = 4; let ndofs = p * ne - ne + 1; // Vectors let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let mut u = ceed.vector(ndofs).unwrap(); u.set_value(1.0); let mut v = ceed.vector(ndofs).unwrap(); v.set_value(0.0); // Restrictions let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu: Vec<i32> = vec![0; p * ne]; for i in 0..ne { indu[p * i + 0] = (2 * i) as i32; indu[p * i + 1] = (2 * i + 1) as i32; indu[p * i + 2] = (2 * i + 2) as i32; } let ru = ceed .elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu) .unwrap(); let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu = ceed .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap(); let op_mass = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru, &bu, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru, &bu, VectorOpt::Active) .unwrap(); // Diagonal let mut diag = ceed.vector(ndofs).unwrap(); diag.set_value(1.0); op_mass.linear_assemble_add_diagonal(&mut diag).unwrap(); // Manual diagonal computation let mut true_diag = ceed.vector(ndofs).unwrap(); for i in 0..ndofs { u.set_value(0.0); { let mut u_array = u.view_mut(); u_array[i] = 1.; } op_mass.apply(&u, &mut v).unwrap(); { let v_array = v.view_mut(); let mut true_array = true_diag.view_mut(); true_array[i] = v_array[i] + 1.0; } } // Check diag.view() .iter() .zip(true_diag.view().iter()) .for_each(|(computed, actual)| { assert!( (*computed - *actual).abs() < 1e-15, "Diagonal entry incorrect" ); });
pub fn linear_assemble_point_block_diagonal(
&self,
assembled: &mut Vector<'_>
) -> Result<i32, CeedError>
[src]
&self,
assembled: &mut Vector<'_>
) -> Result<i32, CeedError>
Assemble the point block diagonal of a square linear Operator
This overwrites a Vector with the point block diagonal of a linear Operator.
Note: Currently only non-composite Operators with a single field and composite Operators with single field sub-operators are supported.
op
- Operator to assemble QFunctionassembled
- Vector to store assembled CeedOperator point block diagonal, provided in row-major form with anncomp * ncomp
block at each node. The dimensions of this vector are derived from the active vector for the CeedOperator. The array has shape[nodes, component out, component in]
.
let ne = 4; let p = 3; let q = 4; let ncomp = 2; let ndofs = p * ne - ne + 1; // Vectors let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let mut u = ceed.vector(ncomp * ndofs).unwrap(); u.set_value(1.0); let mut v = ceed.vector(ncomp * ndofs).unwrap(); v.set_value(0.0); // Restrictions let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu: Vec<i32> = vec![0; p * ne]; for i in 0..ne { indu[p * i + 0] = (2 * i) as i32; indu[p * i + 1] = (2 * i + 1) as i32; indu[p * i + 2] = (2 * i + 2) as i32; } let ru = ceed .elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu) .unwrap(); let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu = ceed .basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { // Number of quadrature points let q = qdata.len(); // Iterate over quadrature points for i in 0..q { v[i + 0 * q] = u[i + 1 * q] * qdata[i]; v[i + 1 * q] = u[i + 0 * q] * qdata[i]; } // Return clean error code 0 }; let qf_mass = ceed .q_function_interior(1, Box::new(mass_2_comp)) .unwrap() .input("u", 2, EvalMode::Interp) .unwrap() .input("qdata", 1, EvalMode::None) .unwrap() .output("v", 2, EvalMode::Interp) .unwrap(); let op_mass = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru, &bu, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru, &bu, VectorOpt::Active) .unwrap(); // Diagonal let mut diag = ceed.vector(ncomp * ncomp * ndofs).unwrap(); diag.set_value(0.0); op_mass .linear_assemble_point_block_diagonal(&mut diag) .unwrap(); // Manual diagonal computation let mut true_diag = ceed.vector(ncomp * ncomp * ndofs).unwrap(); for i in 0..ndofs { for j in 0..ncomp { u.set_value(0.0); { let mut u_array = u.view_mut(); u_array[i + j * ndofs] = 1.; } op_mass.apply(&u, &mut v).unwrap(); { let v_array = v.view_mut(); let mut true_array = true_diag.view_mut(); for k in 0..ncomp { true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; } } } } // Check diag.view() .iter() .zip(true_diag.view().iter()) .for_each(|(computed, actual)| { assert!( (*computed - *actual).abs() < 1e-15, "Diagonal entry incorrect" ); });
pub fn linear_assemble_add_point_block_diagonal(
&self,
assembled: &mut Vector<'_>
) -> Result<i32, CeedError>
[src]
&self,
assembled: &mut Vector<'_>
) -> Result<i32, CeedError>
Assemble the point block diagonal of a square linear Operator
This sums into a Vector with the point block diagonal of a linear Operator.
Note: Currently only non-composite Operators with a single field and composite Operators with single field sub-operators are supported.
op
- Operator to assemble QFunctionassembled
- Vector to store assembled CeedOperator point block diagonal, provided in row-major form with anncomp * ncomp
block at each node. The dimensions of this vector are derived from the active vector for the CeedOperator. The array has shape[nodes, component out, component in]
.
let ne = 4; let p = 3; let q = 4; let ncomp = 2; let ndofs = p * ne - ne + 1; // Vectors let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let mut u = ceed.vector(ncomp * ndofs).unwrap(); u.set_value(1.0); let mut v = ceed.vector(ncomp * ndofs).unwrap(); v.set_value(0.0); // Restrictions let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu: Vec<i32> = vec![0; p * ne]; for i in 0..ne { indu[p * i + 0] = (2 * i) as i32; indu[p * i + 1] = (2 * i + 1) as i32; indu[p * i + 2] = (2 * i + 2) as i32; } let ru = ceed .elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu) .unwrap(); let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu = ceed .basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { // Number of quadrature points let q = qdata.len(); // Iterate over quadrature points for i in 0..q { v[i + 0 * q] = u[i + 1 * q] * qdata[i]; v[i + 1 * q] = u[i + 0 * q] * qdata[i]; } // Return clean error code 0 }; let qf_mass = ceed .q_function_interior(1, Box::new(mass_2_comp)) .unwrap() .input("u", 2, EvalMode::Interp) .unwrap() .input("qdata", 1, EvalMode::None) .unwrap() .output("v", 2, EvalMode::Interp) .unwrap(); let op_mass = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru, &bu, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru, &bu, VectorOpt::Active) .unwrap(); // Diagonal let mut diag = ceed.vector(ncomp * ncomp * ndofs).unwrap(); diag.set_value(1.0); op_mass .linear_assemble_add_point_block_diagonal(&mut diag) .unwrap(); // Manual diagonal computation let mut true_diag = ceed.vector(ncomp * ncomp * ndofs).unwrap(); for i in 0..ndofs { for j in 0..ncomp { u.set_value(0.0); { let mut u_array = u.view_mut(); u_array[i + j * ndofs] = 1.; } op_mass.apply(&u, &mut v).unwrap(); { let v_array = v.view_mut(); let mut true_array = true_diag.view_mut(); for k in 0..ncomp { true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; } } } } // Check diag.view() .iter() .zip(true_diag.view().iter()) .for_each(|(computed, actual)| { assert!( (*computed - 1.0 - *actual).abs() < 1e-15, "Diagonal entry incorrect" ); });
pub fn create_multigrid_level(
&self,
p_mult_fine: &Vector<'_>,
rstr_coarse: &ElemRestriction<'_>,
basis_coarse: &Basis<'_>
) -> Result<(Operator<'_>, Operator<'_>, Operator<'_>), CeedError>
[src]
&self,
p_mult_fine: &Vector<'_>,
rstr_coarse: &ElemRestriction<'_>,
basis_coarse: &Basis<'_>
) -> Result<(Operator<'_>, Operator<'_>, Operator<'_>), CeedError>
Create a multigrid coarse Operator and level transfer Operators for a given Operator, creating the prolongation basis from the fine and coarse grid interpolation
p_mult_fine
- Lvector multiplicity in parallel gather/scatterrstr_coarse
- Coarse grid restrictionbasis_coarse
- Coarse grid active vector basis
let ne = 15; let p_coarse = 3; let p_fine = 5; let q = 6; let ndofs_coarse = p_coarse * ne - ne + 1; let ndofs_fine = p_fine * ne - ne + 1; // Vectors let x_array = (0..ne + 1) .map(|i| 2.0 * i as f64 / ne as f64 - 1.0) .collect::<Vec<f64>>(); let x = ceed.vector_from_slice(&x_array).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let mut u_coarse = ceed.vector(ndofs_coarse).unwrap(); u_coarse.set_value(1.0); let mut u_fine = ceed.vector(ndofs_fine).unwrap(); u_fine.set_value(1.0); let mut v_coarse = ceed.vector(ndofs_coarse).unwrap(); v_coarse.set_value(0.0); let mut v_fine = ceed.vector(ndofs_fine).unwrap(); v_fine.set_value(0.0); let mut multiplicity = ceed.vector(ndofs_fine).unwrap(); multiplicity.set_value(1.0); // Restrictions let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; for i in 0..ne { for j in 0..p_coarse { indu_coarse[p_coarse * i + j] = (i + j) as i32; } } let ru_coarse = ceed .elem_restriction( ne, p_coarse, 1, 1, ndofs_coarse, MemType::Host, &indu_coarse, ) .unwrap(); let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; for i in 0..ne { for j in 0..p_fine { indu_fine[p_fine * i + j] = (i + j) as i32; } } let ru_fine = ceed .elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu_coarse = ceed .basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss) .unwrap(); let bu_fine = ceed .basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap(); let op_mass_fine = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru_fine, &bu_fine, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru_fine, &bu_fine, VectorOpt::Active) .unwrap(); // Multigrid setup let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine .create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse) .unwrap(); // Coarse problem u_coarse.set_value(1.0); op_mass_coarse.apply(&u_coarse, &mut v_coarse).unwrap(); // Check let sum: f64 = v_coarse.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" ); // Prolong op_prolong.apply(&u_coarse, &mut u_fine).unwrap(); // Fine problem op_mass_fine.apply(&u_fine, &mut v_fine).unwrap(); // Check let sum: f64 = v_fine.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" ); // Restrict op_restrict.apply(&v_fine, &mut v_coarse).unwrap(); // Check let sum: f64 = v_coarse.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" );
pub fn create_multigrid_level_tensor_H1(
&self,
p_mult_fine: &Vector<'_>,
rstr_coarse: &ElemRestriction<'_>,
basis_coarse: &Basis<'_>,
interpCtoF: &Vec<f64>
) -> Result<(Operator<'_>, Operator<'_>, Operator<'_>), CeedError>
[src]
&self,
p_mult_fine: &Vector<'_>,
rstr_coarse: &ElemRestriction<'_>,
basis_coarse: &Basis<'_>,
interpCtoF: &Vec<f64>
) -> Result<(Operator<'_>, Operator<'_>, Operator<'_>), CeedError>
Create a multigrid coarse Operator and level transfer Operators for a given Operator with a tensor basis for the active basis
p_mult_fine
- Lvector multiplicity in parallel gather/scatterrstr_coarse
- Coarse grid restrictionbasis_coarse
- Coarse grid active vector basisinterp_c_to_f
- Matrix for coarse to fine
let ne = 15; let p_coarse = 3; let p_fine = 5; let q = 6; let ndofs_coarse = p_coarse * ne - ne + 1; let ndofs_fine = p_fine * ne - ne + 1; // Vectors let x_array = (0..ne + 1) .map(|i| 2.0 * i as f64 / ne as f64 - 1.0) .collect::<Vec<f64>>(); let x = ceed.vector_from_slice(&x_array).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let mut u_coarse = ceed.vector(ndofs_coarse).unwrap(); u_coarse.set_value(1.0); let mut u_fine = ceed.vector(ndofs_fine).unwrap(); u_fine.set_value(1.0); let mut v_coarse = ceed.vector(ndofs_coarse).unwrap(); v_coarse.set_value(0.0); let mut v_fine = ceed.vector(ndofs_fine).unwrap(); v_fine.set_value(0.0); let mut multiplicity = ceed.vector(ndofs_fine).unwrap(); multiplicity.set_value(1.0); // Restrictions let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; for i in 0..ne { for j in 0..p_coarse { indu_coarse[p_coarse * i + j] = (i + j) as i32; } } let ru_coarse = ceed .elem_restriction( ne, p_coarse, 1, 1, ndofs_coarse, MemType::Host, &indu_coarse, ) .unwrap(); let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; for i in 0..ne { for j in 0..p_fine { indu_fine[p_fine * i + j] = (i + j) as i32; } } let ru_fine = ceed .elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu_coarse = ceed .basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss) .unwrap(); let bu_fine = ceed .basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap(); let op_mass_fine = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru_fine, &bu_fine, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru_fine, &bu_fine, VectorOpt::Active) .unwrap(); // Multigrid setup let mut interp_c_to_f: Vec<f64> = vec![0.; p_coarse * p_fine]; { let mut coarse = ceed.vector(p_coarse).unwrap(); let mut fine = ceed.vector(p_fine).unwrap(); let basis_c_to_f = ceed .basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto) .unwrap(); for i in 0..p_coarse { coarse.set_value(0.0); { let mut array = coarse.view_mut(); array[i] = 1.; } basis_c_to_f .apply( 1, TransposeMode::NoTranspose, EvalMode::Interp, &coarse, &mut fine, ) .unwrap(); let array = fine.view(); for j in 0..p_fine { interp_c_to_f[j * p_coarse + i] = array[j]; } } } let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine .create_multigrid_level_tensor_H1(&multiplicity, &ru_coarse, &bu_coarse, &interp_c_to_f) .unwrap(); // Coarse problem u_coarse.set_value(1.0); op_mass_coarse.apply(&u_coarse, &mut v_coarse).unwrap(); // Check let sum: f64 = v_coarse.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" ); // Prolong op_prolong.apply(&u_coarse, &mut u_fine).unwrap(); // Fine problem op_mass_fine.apply(&u_fine, &mut v_fine).unwrap(); // Check let sum: f64 = v_fine.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" ); // Restrict op_restrict.apply(&v_fine, &mut v_coarse).unwrap(); // Check let sum: f64 = v_coarse.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" );
pub fn create_multigrid_level_H1(
&self,
p_mult_fine: &Vector<'_>,
rstr_coarse: &ElemRestriction<'_>,
basis_coarse: &Basis<'_>,
interpCtoF: &[f64]
) -> Result<(Operator<'_>, Operator<'_>, Operator<'_>), CeedError>
[src]
&self,
p_mult_fine: &Vector<'_>,
rstr_coarse: &ElemRestriction<'_>,
basis_coarse: &Basis<'_>,
interpCtoF: &[f64]
) -> Result<(Operator<'_>, Operator<'_>, Operator<'_>), CeedError>
Create a multigrid coarse Operator and level transfer Operators for a given Operator with a non-tensor basis for the active basis
p_mult_fine
- Lvector multiplicity in parallel gather/scatterrstr_coarse
- Coarse grid restrictionbasis_coarse
- Coarse grid active vector basisinterp_c_to_f
- Matrix for coarse to fine
let ne = 15; let p_coarse = 3; let p_fine = 5; let q = 6; let ndofs_coarse = p_coarse * ne - ne + 1; let ndofs_fine = p_fine * ne - ne + 1; // Vectors let x_array = (0..ne + 1) .map(|i| 2.0 * i as f64 / ne as f64 - 1.0) .collect::<Vec<f64>>(); let x = ceed.vector_from_slice(&x_array).unwrap(); let mut qdata = ceed.vector(ne * q).unwrap(); qdata.set_value(0.0); let mut u_coarse = ceed.vector(ndofs_coarse).unwrap(); u_coarse.set_value(1.0); let mut u_fine = ceed.vector(ndofs_fine).unwrap(); u_fine.set_value(1.0); let mut v_coarse = ceed.vector(ndofs_coarse).unwrap(); v_coarse.set_value(0.0); let mut v_fine = ceed.vector(ndofs_fine).unwrap(); v_fine.set_value(0.0); let mut multiplicity = ceed.vector(ndofs_fine).unwrap(); multiplicity.set_value(1.0); // Restrictions let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, q, 1, q * ne, strides) .unwrap(); let mut indx: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { indx[2 * i + 0] = i as i32; indx[2 * i + 1] = (i + 1) as i32; } let rx = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx) .unwrap(); let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; for i in 0..ne { for j in 0..p_coarse { indu_coarse[p_coarse * i + j] = (i + j) as i32; } } let ru_coarse = ceed .elem_restriction( ne, p_coarse, 1, 1, ndofs_coarse, MemType::Host, &indu_coarse, ) .unwrap(); let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; for i in 0..ne { for j in 0..p_fine { indu_fine[p_fine * i + j] = (i + j) as i32; } } let ru_fine = ceed .elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine) .unwrap(); // Bases let bx = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); let bu_coarse = ceed .basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss) .unwrap(); let bu_fine = ceed .basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss) .unwrap(); // Build quadrature data let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &rx, &bx, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap() .apply(&x, &mut qdata) .unwrap(); // Mass operator let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap(); let op_mass_fine = ceed .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("u", &ru_fine, &bu_fine, VectorOpt::Active) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, &qdata) .unwrap() .field("v", &ru_fine, &bu_fine, VectorOpt::Active) .unwrap(); // Multigrid setup let mut interp_c_to_f: Vec<f64> = vec![0.; p_coarse * p_fine]; { let mut coarse = ceed.vector(p_coarse).unwrap(); let mut fine = ceed.vector(p_fine).unwrap(); let basis_c_to_f = ceed .basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto) .unwrap(); for i in 0..p_coarse { coarse.set_value(0.0); { let mut array = coarse.view_mut(); array[i] = 1.; } basis_c_to_f .apply( 1, TransposeMode::NoTranspose, EvalMode::Interp, &coarse, &mut fine, ) .unwrap(); let array = fine.view(); for j in 0..p_fine { interp_c_to_f[j * p_coarse + i] = array[j]; } } } let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine .create_multigrid_level_H1(&multiplicity, &ru_coarse, &bu_coarse, &interp_c_to_f) .unwrap(); // Coarse problem u_coarse.set_value(1.0); op_mass_coarse.apply(&u_coarse, &mut v_coarse).unwrap(); // Check let sum: f64 = v_coarse.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" ); // Prolong op_prolong.apply(&u_coarse, &mut u_fine).unwrap(); // Fine problem op_mass_fine.apply(&u_fine, &mut v_fine).unwrap(); // Check let sum: f64 = v_fine.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" ); // Restrict op_restrict.apply(&v_fine, &mut v_coarse).unwrap(); // Check let sum: f64 = v_coarse.view().iter().sum(); assert!( (sum - 2.0).abs() < 1e-15, "Incorrect interval length computed" );
Trait Implementations
impl<'a> Display for Operator<'a>
[src]
View an Operator
let qf = ceed.q_function_interior_by_name("Mass1DBuild").unwrap(); // Operator field arguments let ne = 3; let q = 4 as usize; let mut ind: Vec<i32> = vec![0; 2 * ne]; for i in 0..ne { ind[2 * i + 0] = i as i32; ind[2 * i + 1] = (i + 1) as i32; } let r = ceed .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind) .unwrap(); let strides: [i32; 3] = [1, q as i32, q as i32]; let rq = ceed .strided_elem_restriction(ne, 2, 1, q * ne, strides) .unwrap(); let b = ceed .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss) .unwrap(); // Operator fields let op = ceed .operator(&qf, QFunctionOpt::None, QFunctionOpt::None) .unwrap() .field("dx", &r, &b, VectorOpt::Active) .unwrap() .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None) .unwrap() .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active) .unwrap(); println!("{}", op);
Auto Trait Implementations
impl<'a> RefUnwindSafe for Operator<'a>
impl<'a> !Send for Operator<'a>
impl<'a> !Sync for Operator<'a>
impl<'a> Unpin for Operator<'a>
impl<'a> UnwindSafe for Operator<'a>
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
pub fn borrow_mut(&mut self) -> &mut T
[src]
impl<T> From<T> for T
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T> ToString for T where
T: Display + ?Sized,
[src]
T: Display + ?Sized,
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
pub fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,