1053 lines
46 KiB
Ruby
Generated
1053 lines
46 KiB
Ruby
Generated
# typed: true
|
|
|
|
# DO NOT EDIT MANUALLY
|
|
# This is an autogenerated file for types exported from the `rumale-linear_model` gem.
|
|
# Please instead update this file by running `bin/tapioca gem rumale-linear_model`.
|
|
|
|
|
|
# Rumale is a machine learning library in Ruby.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#5
|
|
module Rumale; end
|
|
|
|
# This module consists of the classes that implement generalized linear models.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#7
|
|
module Rumale::LinearModel; end
|
|
|
|
# BaseEstimator is an abstract class for implementation of linear model. This class is used internally.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#9
|
|
class Rumale::LinearModel::BaseEstimator < ::Rumale::Base::Estimator
|
|
# Return the bias term (a.k.a. intercept).
|
|
#
|
|
# @return [Numo::DFloat] (shape: [n_outputs/n_classes])
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#16
|
|
def bias_term; end
|
|
|
|
# Return the weight vector.
|
|
#
|
|
# @return [Numo::DFloat] (shape: [n_outputs/n_classes, n_features])
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#12
|
|
def weight_vec; end
|
|
|
|
private
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#22
|
|
def expand_feature(x); end
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#41
|
|
def fit_bias?; end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#27
|
|
def split_weight(w); end
|
|
end
|
|
|
|
# ElasticNet is a class that implements Elastic-net Regression with cordinate descent optimization.
|
|
#
|
|
# *Reference*
|
|
# - Friedman, J., Hastie, T., and Tibshirani, R., "Regularization Paths for Generalized Linear Models via Coordinate Descent," Journal of Statistical Software, 33 (1), pp. 1--22, 2010.
|
|
# - Simon, N., Friedman, J., and Hastie, T., "A Blockwise Descent Algorithm for Group-penalized Multiresponse and Multinomial Regression," arXiv preprint arXiv:1311.6529, 2013.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/elastic_net'
|
|
#
|
|
# estimator = Rumale::LinearModel::ElasticNet.new(reg_param: 0.1, l1_ratio: 0.5)
|
|
# estimator.fit(training_samples, traininig_values)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#23
|
|
class Rumale::LinearModel::ElasticNet < ::Rumale::LinearModel::BaseEstimator
|
|
include ::Rumale::Base::Regressor
|
|
|
|
# Create a new Elastic-net regressor.
|
|
#
|
|
# @param reg_param [Float] The regularization parameter.
|
|
# @param l1_ratio [Float] The elastic-net mixing parameter.
|
|
# If l1_ratio = 1, the regularization is similar to Lasso.
|
|
# If l1_ratio = 0, the regularization is similar to Ridge.
|
|
# If 0 < l1_ratio < 1, the regularization is a combination of L1 and L2.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# @return [ElasticNet] a new instance of ElasticNet
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#42
|
|
def initialize(reg_param: T.unsafe(nil), l1_ratio: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil)); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
|
|
# @return [ElasticNet] The learned regressor itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#59
|
|
def fit(x, y); end
|
|
|
|
# Return the number of iterations performed in coordinate descent optimization.
|
|
#
|
|
# @return [Integer]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#28
|
|
def n_iter; end
|
|
|
|
# Predict values for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#80
|
|
def predict(x); end
|
|
|
|
private
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#88
|
|
def partial_fit(x, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#119
|
|
def partial_fit_multi(x, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#155
|
|
def sign(z); end
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#161
|
|
def single_target?(y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#151
|
|
def soft_threshold(z, threshold); end
|
|
end
|
|
|
|
# Lasso is a class that implements Lasso Regression with coordinate descent optimization.
|
|
#
|
|
# *Reference*
|
|
# - Friedman, J., Hastie, T., and Tibshirani, R., "Regularization Paths for Generalized Linear Models via Coordinate Descent," Journal of Statistical Software, 33 (1), pp. 1--22, 2010.
|
|
# - Simon, N., Friedman, J., and Hastie, T., "A Blockwise Descent Algorithm for Group-penalized Multiresponse and Multinomial Regression," arXiv preprint arXiv:1311.6529, 2013.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/lasso'
|
|
#
|
|
# estimator = Rumale::LinearModel::Lasso.new(reg_param: 0.1)
|
|
# estimator.fit(training_samples, traininig_values)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#23
|
|
class Rumale::LinearModel::Lasso < ::Rumale::LinearModel::BaseEstimator
|
|
include ::Rumale::Base::Regressor
|
|
|
|
# Create a new Lasso regressor.
|
|
#
|
|
# @param reg_param [Float] The regularization parameter.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# @return [Lasso] a new instance of Lasso
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#38
|
|
def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil)); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
|
|
# @return [Lasso] The learned regressor itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#54
|
|
def fit(x, y); end
|
|
|
|
# Return the number of iterations performed in coordinate descent optimization.
|
|
#
|
|
# @return [Integer]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#28
|
|
def n_iter; end
|
|
|
|
# Predict values for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#75
|
|
def predict(x); end
|
|
|
|
private
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#83
|
|
def partial_fit(x, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#112
|
|
def partial_fit_multi(x, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#146
|
|
def sign(z); end
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#152
|
|
def single_target?(y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#142
|
|
def soft_threshold(z, threshold); end
|
|
end
|
|
|
|
# LinearRegression is a class that implements ordinary least square linear regression
|
|
# with singular value decomposition (SVD) or L-BFGS optimization.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/linear_regression'
|
|
#
|
|
# estimator = Rumale::LinearModel::LinearRegression.new
|
|
# estimator.fit(training_samples, traininig_values)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# # If Numo::Linalg is installed, you can specify 'svd' for the solver option.
|
|
# require 'numo/linalg/autoloader'
|
|
# require 'rumale/linear_model/linear_regression'
|
|
#
|
|
# estimator = Rumale::LinearModel::LinearRegression.new(solver: 'svd')
|
|
# estimator.fit(training_samples, traininig_values)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#30
|
|
class Rumale::LinearModel::LinearRegression < ::Rumale::LinearModel::BaseEstimator
|
|
include ::Rumale::Base::Regressor
|
|
|
|
# Create a new ordinary least square linear regressor.
|
|
#
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# If solver is 'svd', this parameter is ignored.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# If solver is 'svd', this parameter is ignored.
|
|
# @param solver [String] The algorithm to calculate weights. ('auto', 'svd' or 'lbfgs').
|
|
# 'auto' chooses the 'svd' solver if Numo::Linalg is loaded. Otherwise, it chooses the 'lbfgs' solver.
|
|
# 'svd' performs singular value decomposition of samples.
|
|
# 'lbfgs' uses the L-BFGS method for optimization.
|
|
# @param verbose [Boolean] The flag indicating whether to output loss during iteration.
|
|
# If solver is 'svd', this parameter is ignored.
|
|
# @return [LinearRegression] a new instance of LinearRegression
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#48
|
|
def initialize(fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), solver: T.unsafe(nil), verbose: T.unsafe(nil)); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
|
|
# @return [LinearRegression] The learned regressor itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#69
|
|
def fit(x, y); end
|
|
|
|
# Predict values for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#87
|
|
def predict(x); end
|
|
|
|
private
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#102
|
|
def partial_fit_lbfgs(base_x, base_y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#95
|
|
def partial_fit_svd(x, y); end
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#129
|
|
def single_target?(y); end
|
|
end
|
|
|
|
# LogisticRegression is a class that implements (multinomial) Logistic Regression.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/logistic_regression'
|
|
#
|
|
# estimator = Rumale::LinearModel::LogisticRegression.new(reg_param: 1.0)
|
|
# estimator.fit(training_samples, traininig_labels)
|
|
# results = estimator.predict(testing_samples)
|
|
# @note Rumale::SVM provides Logistic Regression based on LIBLINEAR.
|
|
# If you prefer execution speed, you should use Rumale::SVM::LogisticRegression.
|
|
# https://github.com/yoshoku/rumale-svm
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#26
|
|
class Rumale::LinearModel::LogisticRegression < ::Rumale::LinearModel::BaseEstimator
|
|
include ::Rumale::Base::Classifier
|
|
|
|
# Create a new classifier with Logisitc Regression.
|
|
#
|
|
# @param reg_param [Float] The regularization parameter.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# If fit_bias is true, the feature vector v becoms [v; bias_scale].
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# @param n_jobs [Integer] The number of jobs for running the predict methods in parallel.
|
|
# If nil is given, the methods do not execute in parallel.
|
|
# If zero or less is given, it becomes equal to the number of processors.
|
|
# This parameter is ignored if the Parallel gem is not loaded.
|
|
# @param verbose [Boolean] The flag indicating whether to output loss during iteration.
|
|
# 'iterate.dat' file is generated by lbfgsb.rb.
|
|
# @return [LogisticRegression] a new instance of LogisticRegression
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#48
|
|
def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil)); end
|
|
|
|
# Return the class labels.
|
|
#
|
|
# @return [Numo::Int32] (shape: [n_classes])
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#31
|
|
def classes; end
|
|
|
|
# Calculate confidence scores for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#81
|
|
def decision_function(x); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
|
|
# @return [LogisticRegression] The learned classifier itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#66
|
|
def fit(x, y); end
|
|
|
|
# Predict class labels for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
|
|
# @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#91
|
|
def predict(x); end
|
|
|
|
# Predict probability for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#108
|
|
def predict_proba(x); end
|
|
|
|
private
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#178
|
|
def multiclass_problem?; end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#123
|
|
def partial_fit(base_x, base_y); end
|
|
end
|
|
|
|
# This module consists of the classes that implement loss function for linear model.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#86
|
|
module Rumale::LinearModel::Loss; end
|
|
|
|
# EpsilonInsensitive is a class that calculates epsilon insensitive for support vector regressor.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#158
|
|
class Rumale::LinearModel::Loss::EpsilonInsensitive
|
|
# @return [EpsilonInsensitive] a new instance of EpsilonInsensitive
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#160
|
|
def initialize(epsilon: T.unsafe(nil)); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#170
|
|
def dloss(out, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#165
|
|
def loss(out, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#180
|
|
def name; end
|
|
end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#185
|
|
Rumale::LinearModel::Loss::EpsilonInsensitive::NAME = T.let(T.unsafe(nil), String)
|
|
|
|
# HingeLoss is a class that calculates hinge loss for support vector classifier.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#133
|
|
class Rumale::LinearModel::Loss::HingeLoss
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#140
|
|
def dloss(out, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#135
|
|
def loss(out, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#148
|
|
def name; end
|
|
end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#153
|
|
Rumale::LinearModel::Loss::HingeLoss::NAME = T.let(T.unsafe(nil), String)
|
|
|
|
# LogLoss is a class that calculates logistic loss for logistic regression.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#111
|
|
class Rumale::LinearModel::Loss::LogLoss
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#118
|
|
def dloss(out, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#113
|
|
def loss(out, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#123
|
|
def name; end
|
|
end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#128
|
|
Rumale::LinearModel::Loss::LogLoss::NAME = T.let(T.unsafe(nil), String)
|
|
|
|
# MeanSquaredError is a class that calculates mean squared error for linear regression model.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#89
|
|
class Rumale::LinearModel::Loss::MeanSquaredError
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#96
|
|
def dloss(out, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#91
|
|
def loss(out, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#101
|
|
def name; end
|
|
end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#106
|
|
Rumale::LinearModel::Loss::MeanSquaredError::NAME = T.let(T.unsafe(nil), String)
|
|
|
|
# NNLS is a class that implements non-negative least squares regression.
|
|
# NNLS solves least squares problem under non-negative constraints on the coefficient using L-BFGS-B method.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/nnls'
|
|
#
|
|
# estimator = Rumale::LinearModel::NNLS.new(reg_param: 0.01)
|
|
# estimator.fit(training_samples, traininig_values)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#22
|
|
class Rumale::LinearModel::NNLS < ::Rumale::LinearModel::BaseEstimator
|
|
include ::Rumale::Base::Regressor
|
|
|
|
# Create a new regressor with non-negative least squares method.
|
|
#
|
|
# @param reg_param [Float] The regularization parameter for L2 regularization term.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# If solver = 'svd', this parameter is ignored.
|
|
# @param verbose [Boolean] The flag indicating whether to output loss during iteration.
|
|
# @return [NNLS] a new instance of NNLS
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#39
|
|
def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil)); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
|
|
# @return [NonneagtiveLeastSquare] The learned regressor itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#56
|
|
def fit(x, y); end
|
|
|
|
# Returns the number of iterations when converged.
|
|
#
|
|
# @return [Integer]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#27
|
|
def n_iter; end
|
|
|
|
# Predict values for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#86
|
|
def predict(x); end
|
|
|
|
private
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#94
|
|
def nnls_fnc(w, x, y, alpha); end
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#104
|
|
def single_target?(y); end
|
|
end
|
|
|
|
# This module consists of the class that implements stochastic gradient descent (SGD) optimizer.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#52
|
|
module Rumale::LinearModel::Optimizer; end
|
|
|
|
# SGD is a class that implements SGD optimizer.
|
|
# This class is used internally.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#56
|
|
class Rumale::LinearModel::Optimizer::SGD
|
|
# Create a new SGD optimizer.
|
|
#
|
|
# @param learning_rate [Float] The initial value of learning rate.
|
|
# @param momentum [Float] The initial value of momentum.
|
|
# @param decay [Float] The smooting parameter.
|
|
# @return [SGD] a new instance of SGD
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#62
|
|
def initialize(learning_rate: T.unsafe(nil), momentum: T.unsafe(nil), decay: T.unsafe(nil)); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#75
|
|
def call(weight, gradient); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#70
|
|
def current_learning_rate; end
|
|
end
|
|
|
|
# This module consists of the classes that implement penalty (regularization) term.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#9
|
|
module Rumale::LinearModel::Penalty; end
|
|
|
|
# L1Penalty is a class that applies L1 penalty to weight vector of linear model.
|
|
# This class is used internally.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#28
|
|
class Rumale::LinearModel::Penalty::L1Penalty
|
|
# @return [L1Penalty] a new instance of L1Penalty
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#30
|
|
def initialize(reg_param:); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#36
|
|
def call(weight, lr); end
|
|
end
|
|
|
|
# L2Penalty is a class that applies L2 penalty to weight vector of linear model.
|
|
# This class is used internally.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#13
|
|
class Rumale::LinearModel::Penalty::L2Penalty
|
|
# @return [L2Penalty] a new instance of L2Penalty
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#15
|
|
def initialize(reg_param:); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#20
|
|
def call(weight, lr); end
|
|
end
|
|
|
|
# Ridge is a class that implements Ridge Regression
|
|
# with singular value decomposition (SVD) or L-BFGS optimization.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/ridge'
|
|
#
|
|
# estimator = Rumale::LinearModel::Ridge.new(reg_param: 0.1)
|
|
# estimator.fit(training_samples, traininig_values)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# # If Numo::Linalg is installed, you can specify 'svd' for the solver option.
|
|
# require 'numo/linalg/autoloader'
|
|
# require 'rumale/linear_model/ridge'
|
|
#
|
|
# estimator = Rumale::LinearModel::Ridge.new(reg_param: 0.1, solver: 'svd')
|
|
# estimator.fit(training_samples, traininig_values)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#29
|
|
class Rumale::LinearModel::Ridge < ::Rumale::LinearModel::BaseEstimator
|
|
include ::Rumale::Base::Regressor
|
|
|
|
# Create a new Ridge regressor.
|
|
#
|
|
# @param reg_param [Float] The regularization parameter.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# If solver is 'svd', this parameter is ignored.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# If solver is 'svd', this parameter is ignored.
|
|
# @param solver [String] The algorithm to calculate weights. ('auto', 'svd', or 'lbfgs').
|
|
# 'auto' chooses the 'svd' solver if Numo::Linalg is loaded. Otherwise, it chooses the 'lbfgs' solver.
|
|
# 'svd' performs singular value decomposition of samples.
|
|
# 'lbfgs' uses the L-BFGS method for optimization.
|
|
# @param verbose [Boolean] The flag indicating whether to output loss during iteration.
|
|
# If solver is 'svd', this parameter is ignored.
|
|
# @return [Ridge] a new instance of Ridge
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#48
|
|
def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), solver: T.unsafe(nil), verbose: T.unsafe(nil)); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
|
|
# @return [Ridge] The learned regressor itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#70
|
|
def fit(x, y); end
|
|
|
|
# Predict values for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#88
|
|
def predict(x); end
|
|
|
|
private
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#105
|
|
def partial_fit_lbfgs(base_x, base_y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#96
|
|
def partial_fit_svd(x, y); end
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#132
|
|
def single_target?(y); end
|
|
end
|
|
|
|
# SGDClassifier is a class that implements linear classifier with stochastic gradient descent optimization.
|
|
#
|
|
# *Reference*
|
|
# - Shalev-Shwartz, S., and Singer, Y., "Pegasos: Primal Estimated sub-GrAdient SOlver for SVM," Proc. ICML'07, pp. 807--814, 2007.
|
|
# - Tsuruoka, Y., Tsujii, J., and Ananiadou, S., "Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty," Proc. ACL'09, pp. 477--485, 2009.
|
|
# - Bottou, L., "Large-Scale Machine Learning with Stochastic Gradient Descent," Proc. COMPSTAT'10, pp. 177--186, 2010.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/sgd_classifier'
|
|
#
|
|
# estimator =
|
|
# Rumale::LinearModel::SGDClassifier.new(loss: 'hinge', reg_param: 1.0, max_iter: 1000, batch_size: 50, random_seed: 1)
|
|
# estimator.fit(training_samples, traininig_labels)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#25
|
|
class Rumale::LinearModel::SGDClassifier < ::Rumale::LinearModel::SGDEstimator
|
|
include ::Rumale::Base::Classifier
|
|
|
|
# Create a new linear classifier with stochastic gradient descent optimization.
|
|
#
|
|
# @param loss [String] The loss function to be used ('hinge' and 'log_loss').
|
|
# @param learning_rate [Float] The initial value of learning rate.
|
|
# The learning rate decreases as the iteration proceeds according to the equation: learning_rate / (1 + decay * t).
|
|
# @param decay [Float] The smoothing parameter for decreasing learning rate as the iteration proceeds.
|
|
# If nil is given, the decay sets to 'reg_param * learning_rate'.
|
|
# @param momentum [Float] The momentum factor.
|
|
# @param penalty [String] The regularization type to be used ('l1', 'l2', and 'elasticnet').
|
|
# @param l1_ratio [Float] The elastic-net type regularization mixing parameter.
|
|
# If penalty set to 'l2' or 'l1', this parameter is ignored.
|
|
# If l1_ratio = 1, the regularization is similar to Lasso.
|
|
# If l1_ratio = 0, the regularization is similar to Ridge.
|
|
# If 0 < l1_ratio < 1, the regularization is a combination of L1 and L2.
|
|
# @param reg_param [Float] The regularization parameter.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# @param batch_size [Integer] The size of the mini batches.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
|
|
# If nil is given, the methods do not execute in parallel.
|
|
# If zero or less is given, it becomes equal to the number of processors.
|
|
# This parameter is ignored if the Parallel gem is not loaded.
|
|
# @param verbose [Boolean] The flag indicating whether to output loss during iteration.
|
|
# @param random_seed [Integer] The seed value using to initialize the random generator.
|
|
# @return [SGDClassifier] a new instance of SGDClassifier
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#63
|
|
def initialize(loss: T.unsafe(nil), learning_rate: T.unsafe(nil), decay: T.unsafe(nil), momentum: T.unsafe(nil), penalty: T.unsafe(nil), reg_param: T.unsafe(nil), l1_ratio: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), batch_size: T.unsafe(nil), tol: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
|
|
|
|
# Return the class labels.
|
|
#
|
|
# @return [Numo::Int32] (shape: [n_classes])
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#30
|
|
def classes; end
|
|
|
|
# Calculate confidence scores for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#147
|
|
def decision_function(x); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
|
|
# @return [SGDClassifier] The learned classifier itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#105
|
|
def fit(x, y); end
|
|
|
|
# Perform 1-epoch of stochastic gradient descent optimization with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::Int32] (shape: [n_samples]) The binary labels to be used for fitting the model.
|
|
# @return [SGDClassifier] The learned classifier itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#122
|
|
def partial_fit(x, y); end
|
|
|
|
# Predict class labels for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
|
|
# @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#157
|
|
def predict(x); end
|
|
|
|
# Predict probability for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#167
|
|
def predict_proba(x); end
|
|
|
|
# Return the random generator for performing random sampling.
|
|
#
|
|
# @return [Random]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#34
|
|
def rng; end
|
|
|
|
private
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#175
|
|
def fit_hinge(x, y); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#207
|
|
def fit_log_loss(x, y); end
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#283
|
|
def multiclass_problem?; end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#256
|
|
def predict_hinge(x); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#272
|
|
def predict_log_loss(x); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#232
|
|
def predict_proba_hinge(x); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#245
|
|
def predict_proba_log_loss(x); end
|
|
end
|
|
|
|
# SGDEstimator is an abstract class for implementation of linear model with mini-batch stochastic gradient descent (SGD) optimization.
|
|
# This class is used internally.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#191
|
|
class Rumale::LinearModel::SGDEstimator < ::Rumale::LinearModel::BaseEstimator
|
|
# Create an initial linear model with SGD.
|
|
#
|
|
# @return [SGDEstimator] a new instance of SGDEstimator
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#193
|
|
def initialize; end
|
|
|
|
private
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#267
|
|
def apply_l1_penalty?; end
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#263
|
|
def apply_l2_penalty?; end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#218
|
|
def init_vars(n_features); end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#282
|
|
def l1_reg_param; end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#271
|
|
def l2_reg_param; end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#228
|
|
def partial_fit_(x, y, max_iter: T.unsafe(nil), init: T.unsafe(nil)); end
|
|
end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#214
|
|
Rumale::LinearModel::SGDEstimator::ELASTICNET_PENALTY = T.let(T.unsafe(nil), String)
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#213
|
|
Rumale::LinearModel::SGDEstimator::L1_PENALTY = T.let(T.unsafe(nil), String)
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#212
|
|
Rumale::LinearModel::SGDEstimator::L2_PENALTY = T.let(T.unsafe(nil), String)
|
|
|
|
# SGDRegressor is a class that implements linear regressor with stochastic gradient descent optimization.
|
|
#
|
|
# *Reference*
|
|
# - Shalev-Shwartz, S., and Singer, Y., "Pegasos: Primal Estimated sub-GrAdient SOlver for SVM," Proc. ICML'07, pp. 807--814, 2007.
|
|
# - Tsuruoka, Y., Tsujii, J., and Ananiadou, S., "Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty," Proc. ACL'09, pp. 477--485, 2009.
|
|
# - Bottou, L., "Large-Scale Machine Learning with Stochastic Gradient Descent," Proc. COMPSTAT'10, pp. 177--186, 2010.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/sgd_regressor'
|
|
#
|
|
# estimator =
|
|
# Rumale::LinearModel::SGDRegressor.new(loss: 'squared_error', reg_param: 1.0, max_iter: 1000, batch_size: 50, random_seed: 1)
|
|
# estimator.fit(training_samples, traininig_target_values)
|
|
# results = estimator.predict(testing_samples)
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#24
|
|
class Rumale::LinearModel::SGDRegressor < ::Rumale::LinearModel::SGDEstimator
|
|
include ::Rumale::Base::Regressor
|
|
|
|
# Create a new linear regressor with stochastic gradient descent optimization.
|
|
#
|
|
# @param loss [String] The loss function to be used ('squared_error' and 'epsilon_insensitive').
|
|
# @param learning_rate [Float] The initial value of learning rate.
|
|
# The learning rate decreases as the iteration proceeds according to the equation: learning_rate / (1 + decay * t).
|
|
# @param decay [Float] The smoothing parameter for decreasing learning rate as the iteration proceeds.
|
|
# If nil is given, the decay sets to 'reg_param * learning_rate'.
|
|
# @param momentum [Float] The momentum factor.
|
|
# @param penalty [String] The regularization type to be used ('l1', 'l2', and 'elasticnet').
|
|
# @param l1_ratio [Float] The elastic-net type regularization mixing parameter.
|
|
# If penalty set to 'l2' or 'l1', this parameter is ignored.
|
|
# If l1_ratio = 1, the regularization is similar to Lasso.
|
|
# If l1_ratio = 0, the regularization is similar to Ridge.
|
|
# If 0 < l1_ratio < 1, the regularization is a combination of L1 and L2.
|
|
# @param reg_param [Float] The regularization parameter.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param epsilon [Float] The margin of tolerance. If loss set to 'squared_error', this parameter is ignored.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# @param batch_size [Integer] The size of the mini batches.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
|
|
# If nil is given, the method does not execute in parallel.
|
|
# If zero or less is given, it becomes equal to the number of processors.
|
|
# This parameter is ignored if the Parallel gem is not loaded.
|
|
# @param verbose [Boolean] The flag indicating whether to output loss during iteration.
|
|
# @param random_seed [Integer] The seed value using to initialize the random generator.
|
|
# @return [SGDRegressor] a new instance of SGDRegressor
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#59
|
|
def initialize(loss: T.unsafe(nil), learning_rate: T.unsafe(nil), decay: T.unsafe(nil), momentum: T.unsafe(nil), penalty: T.unsafe(nil), reg_param: T.unsafe(nil), l1_ratio: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), epsilon: T.unsafe(nil), max_iter: T.unsafe(nil), batch_size: T.unsafe(nil), tol: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @retu:rn [SGDRegressor] The learned regressor itself.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#103
|
|
def fit(x, y); end
|
|
|
|
# Perform 1-epoch of stochastic gradient descent optimization with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::DFloat] (shape: [n_samples]) The single target variables to be used for fitting the model.
|
|
# @return [SGDRegressor] The learned regressor itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#132
|
|
def partial_fit(x, y); end
|
|
|
|
# Predict values for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#150
|
|
def predict(x); end
|
|
|
|
# Return the random generator for performing random sampling.
|
|
#
|
|
# @return [Random]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#29
|
|
def rng; end
|
|
end
|
|
|
|
# SVC is a class that implements Support Vector Classifier with the squared hinge loss.
|
|
# For multiclass classification problem, it uses one-vs-the-rest strategy.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/svc'
|
|
#
|
|
# estimator =
|
|
# Rumale::LinearModel::SVC.new(reg_param: 1.0)
|
|
# estimator.fit(training_samples, traininig_labels)
|
|
# results = estimator.predict(testing_samples)
|
|
# @note Rumale::SVM provides linear support vector classifier based on LIBLINEAR.
|
|
# If you prefer execution speed, you should use Rumale::SVM::LinearSVC.
|
|
# https://github.com/yoshoku/rumale-svm
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#28
|
|
class Rumale::LinearModel::SVC < ::Rumale::LinearModel::BaseEstimator
|
|
include ::Rumale::Base::Classifier
|
|
|
|
# Create a new linear classifier with Support Vector Machine with the squared hinge loss.
|
|
#
|
|
# @param reg_param [Float] The regularization parameter.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# @param probability [Boolean] The flag indicating whether to perform probability estimation.
|
|
# @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
|
|
# If nil is given, the methods do not execute in parallel.
|
|
# If zero or less is given, it becomes equal to the number of processors.
|
|
# This parameter is ignored if the Parallel gem is not loaded.
|
|
# @param verbose [Boolean] The flag indicating whether to output loss during iteration.
|
|
# 'iterate.dat' file is generated by lbfgsb.rb.
|
|
# @return [SVC] a new instance of SVC
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#50
|
|
def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), probability: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil)); end
|
|
|
|
# Return the class labels.
|
|
#
|
|
# @return [Numo::Int32] (shape: [n_classes])
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#33
|
|
def classes; end
|
|
|
|
# Calculate confidence scores for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#110
|
|
def decision_function(x); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
|
|
# @return [SVC] The learned classifier itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#70
|
|
def fit(x, y); end
|
|
|
|
# Predict class labels for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
|
|
# @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#120
|
|
def predict(x); end
|
|
|
|
# Predict probability for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#142
|
|
def predict_proba(x); end
|
|
|
|
private
|
|
|
|
# @return [Boolean]
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#191
|
|
def multiclass_problem?; end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#159
|
|
def partial_fit(base_x, bin_y); end
|
|
end
|
|
|
|
# SVR is a class that implements Support Vector Regressor with the squared epsilon-insensitive loss.
|
|
#
|
|
# @example
|
|
# require 'rumale/linear_model/svr'
|
|
#
|
|
# estimator = Rumale::LinearModel::SVR.new(reg_param: 1.0, epsilon: 0.1)
|
|
# estimator.fit(training_samples, traininig_target_values)
|
|
# results = estimator.predict(testing_samples)
|
|
# @note Rumale::SVM provides linear and kernel support vector regressor based on LIBLINEAR and LIBSVM.
|
|
# If you prefer execution speed, you should use Rumale::SVM::LinearSVR.
|
|
# https://github.com/yoshoku/rumale-svm
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svr.rb#25
|
|
class Rumale::LinearModel::SVR < ::Rumale::LinearModel::BaseEstimator
|
|
include ::Rumale::Base::Regressor
|
|
|
|
# Create a new regressor with Support Vector Machine by the SGD optimization.
|
|
#
|
|
# @param reg_param [Float] The regularization parameter.
|
|
# @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
|
|
# @param bias_scale [Float] The scale of the bias term.
|
|
# @param epsilon [Float] The margin of tolerance.
|
|
# @param max_iter [Integer] The maximum number of epochs that indicates
|
|
# how many times the whole data is given to the training process.
|
|
# @param tol [Float] The tolerance of loss for terminating optimization.
|
|
# @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
|
|
# If nil is given, the method does not execute in parallel.
|
|
# If zero or less is given, it becomes equal to the number of processors.
|
|
# This parameter is ignored if the Parallel gem is not loaded.
|
|
# @param verbose [Boolean] The flag indicating whether to output loss during iteration.
|
|
# @return [SVR] a new instance of SVR
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svr.rb#42
|
|
def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), epsilon: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil)); end
|
|
|
|
# Fit the model with given training data.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
|
# @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
|
|
# @return [SVR] The learned regressor itself.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svr.rb#62
|
|
def fit(x, y); end
|
|
|
|
# Predict values for samples.
|
|
#
|
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
|
|
# @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
|
|
#
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svr.rb#90
|
|
def predict(x); end
|
|
|
|
private
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/svr.rb#98
|
|
def partial_fit(base_x, single_y); end
|
|
end
|
|
|
|
# source://rumale-linear_model//lib/rumale/linear_model/version.rb#8
|
|
Rumale::LinearModel::VERSION = T.let(T.unsafe(nil), String)
|