Commit b937c3e0 authored by Tammy Kolda's avatar Tammy Kolda

Fixing scaling for output of fg_est.

Scaled the function and gradient values provided by fg_est so that
they match fg in expectation (up to factor of 2 due to choice of
the objective function).
parent e43bc660
......@@ -17,4 +17,4 @@ function X = spones(X)
% require a license from the United States Government.
% The full license terms can be found in the file LICENSE.txt
X.vals = ones(size(X.vals));
\ No newline at end of file
X.vals = ones(size(X.vals));
......@@ -243,7 +243,8 @@ if (nargin == 1)
end % nargin == 1
% SPECIAL CASE for INTERACTION WITH MEX FILES
% SPECIAL CASE for INTERACTION WITH MEX FILES OR DIRECT CREATION OF
% SPTENSOR WITHOUT ANY SORTING OR OTHER STANDARD CHECKS
if (nargin == 4) && (isnumeric(varargin{4})) && (varargin{4} == 0)
% Store everything
......
function V = mttkrps(X,U)
%MTTKRPS Sequence of MTTKRP calculations for a tensor.
%
% V = MTTKRPS(X,U) computes a cell array V such that
% V{k} = mttkrp(X, U, k) for k = 1,...,ndims(X).
%
% See also MTTKRP.
%
% MATLAB Tensor Toolbox.
% Copyright 2018, Sandia Corporation.
% Written by J. Duersch, 2018.
%
% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others.
% http://www.sandia.gov/~tgkolda/TensorToolbox.
% Copyright (2018) Sandia Corporation. Under the terms of Contract
% DE-AC04-94AL85000, there is a non-exclusive license for use of this
% work by or on behalf of the U.S. Government. Export of this data may
% require a license from the United States Government.
% The full license terms can be found in the file LICENSE.txt
% Obtain dimensions and optimal splitting.
sz = size(X);
d = length(sz);
s = min_split(sz);
% Output sequence V{k} = mttkrp(X,U,k)
V = cell(d,1);
% KRP over modes s+1:d.
K = khatrirao(U{s+1:d},'r');
% Partial MTTKRP with remaining modes <m1, m2, ... , ms, C>
W = reshape(X.data,[],size(K,1)) * K;
for k=1:s-1
% Loop entry invariant: W has modes <mk, ... , ms, C>
V{k} = mttv_mid(W, U(k+1:s));
% Satisfy invariant.
W = mttv_left(W, U{k});
end
% Exit state: W has modes <ms, C>.
V{s} = W;
% KRP over modes 1:s.
K = khatrirao(U{1:s},'r');
% Partial MTTKRP with remaining modes <m{s+1}, m{s+2}, ... , md, C>
W = reshape(X.data,size(K,1),[])' * K;
for k=s+1:d-1
% Loop entry invariant: W has modes <mk, ... , md, C>
V{k} = mttv_mid(W, U(k+1:d));
% Satisfy invariant.
W = mttv_left(W, U{k});
end
% Exit state: W has modes <md, C>.
V{d} = W;
end
function W_out = mttv_left(W_in, U1)
% W_out = mttv_left(W_in, U_left)
% Contract leading mode in partial MTTKRP W_in using the matching factor
% matrix U1. The leading mode is defined as the mode for which consecutive
% increases in the corresponding index address elements at consecutive
% increases in the memory offset.
%
% W_in has modes in natural descending order: <m1, m2, ... , mN, C>.
% Mode m1 is either the first mode or an intermediate mode of the
% original tensor. Mode m2 through mN are subsequence original modes.
% The last mode C is the component mode (indexed over rank-1 components
% 1:r) corresponding to columns in factor matrices.
% U1 is the corresponding factor matrix with modes <m1, C>.
% W_out has modes: <m2, ... , mN, C>
r = size(U1,2);
W_in = reshape(W_in, size(U1,1), [], r);
W_out = zeros(size(W_in,2), r);
for j=1:r
W_out(:,j) = W_in(:,:,j)' * U1(:,j);
end
end
function V = mttv_mid(W_in, U_mid)
% V = mttv_mid(W_in, U_mid)
% Contract all intermediate modes in partial MTTKRP W_in using the matching
% cell array U_mid.
%
% W_in has modes in natural descending order: <m1, m2, ... , mN, C>.
% Mode m1 is either the first mode or an intermediate mode of the
% original tensor. Mode m2 through mN are subsequence original modes.
% The last mode C is the component mode (indexed over rank-1 components
% 1:r) corresponding to columns in factor matrices.
% U_mid is the corresponding cell array of factor matrices. That is,
% U_mid{1} has modes <m2, C>, U_mid{2} has modes <m3, C>, etc. The cell
% array must exactly match all intermediate uncontracted modes.
% V is the final MTTKRP with modes: <m1, C>.
if isempty(U_mid)
V = W_in;
else
K = khatrirao(U_mid,'r');
r = size(K,2);
W_in = reshape(W_in, [], size(K,1), r);
V = zeros(size(W_in,1), r);
for j=1:r
V(:,j) = W_in(:,:,j)*K(:,j);
end
end
end
function [s_min]=min_split(sz)
% [s_min]=min_split(sz)
% Scan for optimal splitting with minimal memory footprint.
%
% sz gives sizes of each dimension in the original tensor in natural
% descending order.
% s_min gives optimal splitting to minimize partial MTTKRP memory
% footprint. Modes 1:s_min will contract in left-partial computation and
% modes s_min+1:d will contract in right-partial.
m_left=sz(1);
m_right=prod(sz(2:end));
s_min=1;
% Minimize: m_left + m_right.
for s=2:length(sz)-1
% Peel mode s off right and test placement.
m_right = m_right/sz(s);
if (m_left < m_right)
% The sum is reduced by placing mode s on the left.
s_min = s;
m_left = m_left*sz(s);
else
% The sum would be reduced by placing mode s back on the right.
% There is no further benefit to collecting modes on the left.
break;
end
end
end
......@@ -27,6 +27,7 @@
% mrdivide - Slash right division for tensors.
% mtimes - tensor-scalar multiplication.
% mttkrp - Matricized tensor times Khatri-Rao product for tensor.
% mttkrps - Sequence of MTTKRP calculations for dense tensor.
% ndims - Return the number of dimensions of a tensor.
% ne - Not equal (~=) for tensors.
% nnz - Number of nonzeros for tensors.
......
......@@ -14,7 +14,8 @@
- [ ] Add a pointer to this documentation file in `doc\html\helptoc.xml`
- [ ] Add pointers in any related higher-level files, e.g., a new method for CP should be referenced in the `cp.html` file
- [ ] Add link to HTML documentation from help comments in function
- [ ] Update search database by running: builddocsearchdb('[full path to tensor_toolbox/doc/html directory]')
- [ ] **Tests** Create or update tests in the `tests` directory, especially for bug fixes or strongly encouraged for new code.
- [ ] **Contents** If new functions were added to a class, go to the `maintenance` directory and run `update_classlist('Class',XXX)` to add the new functions to the class XXX help information. If new functions were added at
......
......@@ -7,9 +7,10 @@ Primary POC: Tamara G. Kolda, [email protected]
* Robert Bassett - Cleaning up symmetric tensor methods and classes (`cp_sym`, `symtensor`, `symktensor`)
* Casey Battaglino - Randomized CP (`cp_arls`)
* Eric C. Chi - Alternating Poisson Regression with Mulitplicative Updates (`cp_apr`)
* Jed Duersch - Improved K-tensor full ('ktensor/full')
* Jed Duersch - Improved K-tensor full ('ktensor/full') and Generalized CP (`gcp_opt`)
* Daniel M. Dunlavy - CP with optimization and missing data (`cp_opt`, `cp_wopt`), various fixes
* Samantha Hansen - Alternating Poisson Regression with QN or Newton Updates (`cp_apr`)
* David Hong - Generalized CP (`gcp_opt`)
* Jackson Mayo - Eigenvalue methods (`eig_sshopm`, `eig_sshopmc`, `eig_geap`)
* Todd Plantenga - Alternating Poisson Regression with QN or Newton Updates (`cp_apr`)
* Jimeng Sun - Memory-efficient Tucker (distributed with version 2.6 or earlier)
......
% Tensor Toolbox (Sandia National Labs)
% Version 3.0-dev 05-Apr-2018
% Version 3.0-dev 26-Apr-2019
% Tensor Toolbox for dense, sparse, and decomposed n-way arrays.
%
% Tensor Toolbox Classes:
......@@ -14,36 +14,38 @@
% sptenmat - Sparse tensor as matrix.
%
% Tensor Toolbox Functions:
% cp_als - Compute a CP decomposition of any type of tensor.
% cp_apr - Compute nonnegative CP with alternating Poisson regression.
% cp_arls - CP decomposition of dense tensor via randomized least squares.
% cp_nmu - Compute nonnegative CP with multiplicative updates.
% cp_opt - Fits a CP model to a tensor via optimization.
% cp_sym - Fit a symmetric P model to the symmetric input tensor.
% cp_wopt - Fits a weighted CP model to a tensor via optimization.
% create_guess - Creates initial guess for CP or Tucker fitting.
% create_problem - Create test problems for tensor factorizations.
% eig_geap - Shifted power method for generalized tensor eigenproblem.
% eig_sshopm - Shifted power method for finding real eigenpair of real tensor.
% eig_sshopmc - Shifted power method for real/complex eigenpair of tensor.
% export_data - Export tensor-related data to a file.
% hosvd - Compute sequentially-truncated higher-order SVD (Tucker).
% import_data - Import tensor-related data to a file.
% khatrirao - Khatri-Rao product of matrices.
% matrandcong - Create a random matrix with a fixed congruence.
% matrandnorm - Normalizes columns of X so that each is unit 2-norm.
% matrandorth - Generates random n x n orthogonal real matrix.
% sptendiag - Creates a sparse tensor with v on the diagonal.
% sptenrand - Sparse uniformly distributed random tensor.
% tendiag - Creates a tensor with v on the diagonal.
% teneye - Create identity tensor of specified size.
% tenones - Ones tensor.
% tenrand - Uniformly distributed pseudo-random tensor.
% tenrandblk - Generate nearly block diagonal tensor.
% tenzeros - Create zeros tensor.
% tt_ind2sub - Multiple subscripts from linear indices.
% tt_sub2ind - Converts multidimensional subscripts to linear indices.
% tucker_als - Higher-order orthogonal iteration.
% tucker_sym - Symmetric Tucker approximation.
% cp_als - Compute a CP decomposition of any type of tensor.
% cp_apr - Compute nonnegative CP with alternating Poisson regression.
% cp_arls - CP decomposition of dense tensor via randomized least squares.
% cp_nmu - Compute nonnegative CP with multiplicative updates.
% cp_opt - Fits a CP model to a tensor via optimization.
% cp_sym - Fit a symmetric P model to the symmetric input tensor.
% cp_wopt - Fits a weighted CP model to a tensor via optimization.
% create_guess - Creates initial guess for CP or Tucker fitting.
% create_problem - Create test problems for tensor factorizations.
% create_problem_binary - Creates random low-rank 0/1 tensor.
% eig_geap - Shifted power method for generalized tensor eigenproblem.
% eig_sshopm - Shifted power method for finding real eigenpair of real tensor.
% eig_sshopmc - Shifted power method for real/complex eigenpair of tensor.
% export_data - Export tensor-related data to a file.
% gcp_opt - Fits Generalized CP decomposition with user-specified function.
% hosvd - Compute sequentially-truncated higher-order SVD (Tucker).
% import_data - Import tensor-related data to a file.
% khatrirao - Khatri-Rao product of matrices.
% matrandcong - Create a random matrix with a fixed congruence.
% matrandnorm - Normalizes columns of X so that each is unit 2-norm.
% matrandorth - Generates random n x n orthogonal real matrix.
% sptendiag - Creates a sparse tensor with v on the diagonal.
% sptenrand - Sparse uniformly distributed random tensor.
% tendiag - Creates a tensor with v on the diagonal.
% teneye - Create identity tensor of specified size.
% tenones - Ones tensor.
% tenrand - Uniformly distributed pseudo-random tensor.
% tenrandblk - Generate nearly block diagonal tensor.
% tenzeros - Create zeros tensor.
% tt_ind2sub - Multiple subscripts from linear indices.
% tt_sub2ind - Converts multidimensional subscripts to linear indices.
% tucker_als - Higher-order orthogonal iteration.
% tucker_sym - Symmetric Tucker approximation.
%
% <a href="matlab:web(strcat('file://',fullfile(getfield(what('tensor_toolbox'),'path'),'doc','html','index.html')))">Documentation page for Tensor Toolbox</a>
......@@ -17,7 +17,11 @@ Changes from Version 2.6 (February 6, 2015)
without actually forming the result.
- Added new CP-ARLS method that does alternating *RANDOMIZED* least
squares fitting for the CP decomposition per Battaglino et al.
- Add SPTENSOR/SPONES function that replaces nonzero sparse tensor
- New GCP_OPT method for generalized CP.
- New CREATE_PROBLEM_BINARY method for generating problems where the
low-rank model corresponds to the odds of a 1.
- Improve KTENSOR/FULL function.
- Added SPTENSOR/SPONES function that replaces nonzero sparse tensor
elements with ones.
- Removed memory-efficient Tucker (met) code.
- Fixed formatting of lambda in ktensor/disp.
......
......@@ -232,6 +232,7 @@ printOuterItn = params.Results.printitn;
stoptime = params.Results.stoptime;
stoptol = params.Results.stoptol;
out = [];
% Extract the number of modes in tensor X.
N = ndims(X);
......@@ -259,6 +260,7 @@ end
% Initialize output arrays.
fnEvals = zeros(maxOuterIters,1);
fnVals = zeros(maxOuterIters,1);
kktViolations = -ones(maxOuterIters,1);
nInnerIters = zeros(maxOuterIters,1);
nzeros = zeros(maxOuterIters,1);
......@@ -487,8 +489,9 @@ for iter = 1:maxOuterIters
% Print outer iteration status.
if (mod(iter,printOuterItn) == 0)
fnVals(iter) = -tt_loglikelihood(X,M);
fprintf('%4d. Ttl Inner Its: %d, KKT viol = %.2e, obj = %.8e, nz: %d\n', ...
iter, nInnerIters(iter), kktViolations(iter), tt_loglikelihood(X,M), ...
iter, nInnerIters(iter), kktViolations(iter), fnVals(iter), ...
num_zero);
end
......@@ -530,6 +533,7 @@ out = struct;
out.params = params.Results;
out.obj = loglike;
out.kktViolations = kktViolations(1:iter);
out.fnVals = fnVals(1:iter);
out.fnEvals = fnEvals(1:iter);
out.nInnerIters = nInnerIters(1:iter);
out.nZeros = nzeros(1:iter);
......@@ -751,6 +755,7 @@ else
end
% Initialize output arrays.
fnVals = zeros(maxOuterIters,1);
fnEvals = zeros(maxOuterIters,1);
kktViolations = -ones(maxOuterIters,1);
nInnerIters = zeros(maxOuterIters,1);
......@@ -943,8 +948,9 @@ for iter = 1:maxOuterIters
% Print outer iteration status.
if (mod(iter,printOuterItn) == 0)
fnVals(iter) = -tt_loglikelihood(X,M);
fprintf('%4d. Ttl Inner Its: %d, KKT viol = %.2e, obj = %.8e, nz: %d\n', ...
iter, nInnerIters(iter), kktViolations(iter), tt_loglikelihood(X,M), ...
iter, nInnerIters(iter), kktViolations(iter), fnVals(iter), ...
num_zero);
end
......@@ -990,6 +996,7 @@ out.params = params.Results;
out.obj = loglike;
out.kktViolations = kktViolations(1:iter);
out.fnEvals = fnEvals(1:iter);
out.fnVals = fnVals(1:iter);
out.nInnerIters = nInnerIters(1:iter);
out.nZeros = nzeros(1:iter);
out.times = times(1:iter);
......
function [X,Mtrue,info] = create_problem_binary(sz,r,varargin)
%CREATE_PROBLEM_BINARY Creates random low-rank 0/1 tensor.
%
% [X,M,INFO] = CREATE_PROBLEM_BINARY(SZ,R,'param','value') creates an
% sptensor X of size SZ from the low-rank ktensor M of rank R that
% corresponds to the *odds* of a 1 in each position. The parameters that
% control this are as follows:
%
% 'state' - State of random number generator, for reproducing results.
% 'loprob' - Probability of 'noise' one. Default: 0.01.
% 'hiprob' - Probability of 'structural' one. Default: 0.90.
% 'density' - Density of structural entries. Default: 1/r.
% 'verbosity' - Output: 0: None, 1: Minimal (default), 2: Detailed.
% 'spgen' - Avoid explicitly forming low-rank tensor. Default: False.
%
% REFERENCES:
% * T. G. Kolda, D. Hong, J. Duersch. Stochastic Gradients for
% Large-Scale Tensor Decomposition, 2019.
%
% See also: GCP_OPT, CREATE_PROBLEM.
%
%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation.
% Created by Tamara G. Kolda, Fall 2018. Includes work with
%% Random set-up
defaultStream = RandStream.getGlobalStream;
%% Set algorithm parameters from input or by using defaults
params = inputParser;
params.addParameter('state', defaultStream.State);
params.addParameter('loprob', 0.01, @(x) isscalar(x) && x > 0 && x < 0.1);
params.addParameter('hiprob', 0.9, @(x) isscalar(x) && x > 0 && x < 1);
params.addParameter('density', []);
params.addParameter('verbosity', 1);
params.addParameter('spgen',false);
params.addParameter('Mtrue',[]);
params.parse(varargin{:});
info.params = params.Results;
%% Initialize random number generator with specified state
defaultStream.State = params.Results.state;
%% Extract parameters
loprob = params.Results.loprob;
hiprob = params.Results.hiprob;
density = params.Results.density;
verbosity = params.Results.verbosity;
spgen = params.Results.spgen;
Mtrue = params.Results.Mtrue;
%% Setup
if verbosity > 0
fprintf('Creating random problem instance\n');
end
%% Set up for creating factor matrices
% Density specifies the density of high values in the first r-1 columns of
% the factor matrices
if isempty(density)
density = 1/r;
end
% Extract the order of the tensor
d = length(sz);
% Convert the high and low probabilities to the dth root of the
% corresponding odds.
loval = nthroot(loprob/(1-loprob),d);
hival = nthroot(hiprob/(1-hiprob),d);
%% Populate factor matrices
% The first (r-1) columns of each factor matrix is sparse per the
% specified denstiy. The nonzero values are normal distributed around the
% hival odds ration with a standard deviation of 0.5.
% The last column of each factor matrices is dense but low-valued set to
% the constant loval, corresponding to the general noisyness of binary
% observations.
if isempty(Mtrue)
A = cell(d,1);
for k = 1:d
if r > 1
A1v = random('Normal', hival, 0.5, [sz(k),r-1]);
A1p = rand(sz(k),r-1) < density;
A1 = max(A1v .* A1p, 0);
else
A1 = [];
end
A2 = loval * ones(sz(k),1);
A{k} = [A1,A2];
end
Mtrue = ktensor(A); % Correct solution
else
A = Mtrue.u;
if verbosity > 0
fprintf('Using user-specified choice for Mtrue\n');
end
end
%%
if spgen
% --- Create all-zero sparse tensor ---
X = sptensor(sz);
% --- Compute big entries of X, which are expected to be few ---
if verbosity > 1
fprintf('Generating high probability entries...\n');
end
% Find possible high values correspond to each component
subs = [];
for j = 1:r-1
% Identify nonzeros in each mode
modeidx = cell(d,1);
for k = 1:d
tmp = A{k}(:,j);
modeidx{k} = find(tmp > 0);
end
% Count nnzs in each factor
cnts = cellfun(@length, modeidx);
% Compute total number of entries from this factor
fcnt = prod(cnts);
if fcnt > 0
% Create the subscripts of those entries
csubs = tt_ind2sub(cnts',(1:fcnt)');
fsubs = zeros(fcnt,d);
for k = 1:d
fsubs(:,k) = modeidx{k}(csubs(:,k));
end
subs = [subs; fsubs];
end
end
subs = unique(subs,'rows');
nhigh_max = size(subs,1);
if verbosity > 1
fprintf('\tmax # high entries = %d\n',nhigh_max);
end
if nhigh_max > 0
% Compute the probablities at those entries
Mvals = Mtrue(subs);
Pvals = Mvals ./ (1 + Mvals);
Xvals = random('Binomial',1,Pvals);
tf = (Xvals == 1);
% Remove the subscripts that don't correspond to ones
hisubs = subs(tf,:);
X(hisubs) = 1;
nhigh = sum(Xvals);
else
hisubs = [];
nhigh = 0;
end
if verbosity > 1
fprintf('\t# high entries = %d\n',nhigh);
end
% --- Compute the 'noise' from the rest of the entries ---
if verbosity > 1
fprintf('Generating low probability (aka noise) entries...\n');
end
% Number of remaining entries
nloprob = prod(sz) - nhigh_max;
% Randomly compute how many will be 1's, using binomial,
% which we estimate using Poisson since nloprob is large and loprob is
% small.
nlow = random('Poisson', nloprob * loprob, 1);
if verbosity > 1
fprintf('\t# low entries = %d\n',nlow);
end
if nlow > 0
% Choose that many indicies
losubs = tt_sample_zeros(X, tt_sub2ind(sz,hisubs), nlow, 1.1, false);
X(losubs) = 1;
end
if verbosity > 1
fprintf('\tFinished\n');
end
info.nlow = nlow;
info.nhigh_max = nhigh_max;
info.nhigh = nhigh;
else
Mtruef = full(Mtrue);
P = Mtruef ./ (1 + Mtruef);
X = sptensor(random('Binomial',1,double(P)));
end
%% GCP-OPT Examples with Amino Acids Dataset
%
% For more details, see <gcp_opt_doc.html Generalized CP Tensor
% Decomposition>.
%% Setup
% We use the well known amino acids dataset for some tests. This data has
% some negative values, but the factorization itself should be nonnegative.
% Load the data
load(fullfile(getfield(what('tensor_toolbox'),'path'),'doc','aminoacids.mat'))
clear M fit
vizopts = {'PlotCommands',{@bar,@(x,y) plot(x,y,'r'),@(x,y) plot(x,y,'g')},...
'BottomSpace',0.1, 'HorzSpace', 0.04, 'Normalize', @(x) normalize(x,'sort',2)};
%% CP-ALS
% Just a reminder of what CP-ALS does.
cnt = 1;
tic, M{cnt} = cp_als(X,3,'printitn',10); toc
fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
fprintf('Fit: %g\n', fit(cnt));
viz(M{cnt},'Figure',cnt,vizopts{:});
%% GCP with Gaussian
% We can instead call the GCP with the Gaussian function.
cnt = 2;
M{cnt} = gcp_opt(X,3,'type','Gaussian','printitn',10);
fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
fprintf('Fit: %g\n', fit(cnt));
viz(M{cnt},'Figure',cnt,vizopts{:});
%% GCP with Gaussian and Missing Data
% What is some data is missing?
cnt = 3;
% Proportion of missing data
p = 0.35;
% Create a mask with the missing entries set to 0 and everything else 1
W = tensor(double(rand(size(X))>p));
% Fit the model, using the 'mask' option
M{cnt} = gcp_opt(X.*W,3,'type','Gaussian','mask',W,'printitn',10);
fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
fprintf('Fit: %g\n', fit(cnt));
viz(M{cnt},'Figure',cnt,vizopts{:});
%% GCP with ADAM
% We can also use stochastic gradient, though it's pretty slow for such a
% small tensor.
cnt = 4;
% Specify 'opt' = 'adam'
M{cnt} = gcp_opt(X,3,'type','Gaussian','opt','adam','printitn',1,'fsamp',5000,'gsamp',500);
fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
fprintf('Fit: %g\n', fit(cnt));
viz(M{cnt},'Figure',cnt,vizopts{:});
%% GCP with Gamma (terrible!)
% We can try Gamma, but it's not really the right distribution and produces
% a terrible result.
cnt = 5;
Y = tensor(X(:) .* (X(:) > 0), size(X));
M{cnt} = gcp_opt(Y,3,'type','Gamma','printitn',25);
fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
fprintf('Fit: %g\n', fit(cnt));
viz(M{cnt},'Figure',cnt,vizopts{:});
%% GCP with Huber + Lower Bound
% Huber works well. By default, Huber has no lower bound. To add one, we
% have to pass in the func/grad/lower information explicitly. We can use