diff --git a/Gemfile b/Gemfile
index 151531ae..67efc587 100644
--- a/Gemfile
+++ b/Gemfile
@@ -167,6 +167,8 @@ gem "timeout"
group :development do
gem "prettier_print"
gem "syntax_tree", "~> 6.2"
+ gem "unicode_plot" # For terminal-based data visualization (Ruby API)
+ gem "rumale" # Professional machine learning library for Ruby
end
gem "cssbundling-rails", "~> 1.4"
diff --git a/Gemfile.lock b/Gemfile.lock
index a6164f28..6150c8ac 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -148,6 +148,7 @@ GEM
crass (1.0.6)
cssbundling-rails (1.4.1)
railties (>= 6.0.0)
+ csv (3.3.5)
curb (1.0.6)
daemons (1.4.1)
date (3.4.1)
@@ -178,6 +179,7 @@ GEM
rubyzip (~> 2.0)
domain_name (0.6.20240107)
drb (2.2.1)
+ enumerable-statistics (2.0.8)
erubi (1.13.1)
et-orbi (1.2.11)
tzinfo
@@ -254,6 +256,8 @@ GEM
activerecord
kaminari-core (= 1.2.2)
kaminari-core (1.2.2)
+ lbfgsb (0.6.0)
+ numo-narray (>= 0.9.1)
libmf (0.4.0)
ffi
listen (3.9.0)
@@ -277,6 +281,7 @@ GEM
method_source (1.1.0)
mini_mime (1.1.5)
minitest (5.25.4)
+ mmh3 (1.2.0)
msgpack (1.7.5)
multi_json (1.15.0)
neighbor (0.5.1)
@@ -453,6 +458,91 @@ GEM
logger
rubyzip (2.3.2)
rufo (0.18.0)
+ rumale (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-clustering (~> 1.0.0)
+ rumale-core (~> 1.0.0)
+ rumale-decomposition (~> 1.0.0)
+ rumale-ensemble (~> 1.0.0)
+ rumale-evaluation_measure (~> 1.0.0)
+ rumale-feature_extraction (~> 1.0.0)
+ rumale-kernel_approximation (~> 1.0.0)
+ rumale-kernel_machine (~> 1.0.0)
+ rumale-linear_model (~> 1.0.0)
+ rumale-manifold (~> 1.0.0)
+ rumale-metric_learning (~> 1.0.0)
+ rumale-model_selection (~> 1.0.0)
+ rumale-naive_bayes (~> 1.0.0)
+ rumale-nearest_neighbors (~> 1.0.0)
+ rumale-neural_network (~> 1.0.0)
+ rumale-pipeline (~> 1.0.0)
+ rumale-preprocessing (~> 1.0.0)
+ rumale-tree (~> 1.0.0)
+ rumale-clustering (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-core (1.0.0)
+ csv (>= 3.1.9)
+ numo-narray (>= 0.9.1)
+ rumale-decomposition (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-ensemble (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-linear_model (~> 1.0.0)
+ rumale-model_selection (~> 1.0.0)
+ rumale-preprocessing (~> 1.0.0)
+ rumale-tree (~> 1.0.0)
+ rumale-evaluation_measure (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-feature_extraction (1.0.0)
+ mmh3 (~> 1.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-kernel_approximation (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-kernel_machine (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-linear_model (1.0.0)
+ lbfgsb (>= 0.3.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-manifold (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-decomposition (~> 1.0.0)
+ rumale-metric_learning (1.0.0)
+ lbfgsb (>= 0.3.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-decomposition (~> 1.0.0)
+ rumale-model_selection (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-evaluation_measure (~> 1.0.0)
+ rumale-preprocessing (~> 1.0.0)
+ rumale-naive_bayes (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-nearest_neighbors (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-neural_network (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-pipeline (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-preprocessing (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
+ rumale-tree (1.0.0)
+ numo-narray (>= 0.9.1)
+ rumale-core (~> 1.0.0)
sanitize (6.1.3)
crass (~> 1.0.2)
nokogiri (>= 1.12.0)
@@ -538,6 +628,8 @@ GEM
railties (>= 6.0.0)
tzinfo (2.0.6)
concurrent-ruby (~> 1.0)
+ unicode_plot (0.0.5)
+ enumerable-statistics (>= 2.0.1)
useragent (0.16.11)
warden (1.2.9)
rack (>= 2.0.9)
@@ -639,6 +731,7 @@ DEPENDENCIES
ruby-prof-speedscope
ruby-vips
rufo
+ rumale
sanitize (~> 6.1)
sd_notify
selenium-webdriver
@@ -662,6 +755,7 @@ DEPENDENCIES
timeout
turbo-rails
tzinfo-data
+ unicode_plot
web-console
webdrivers
xdiff!
diff --git a/lib/tasks/stats.rake b/lib/tasks/stats.rake
new file mode 100644
index 00000000..4a86a159
--- /dev/null
+++ b/lib/tasks/stats.rake
@@ -0,0 +1,526 @@
+# typed: strict
+# frozen_string_literal: true
+T.bind(self, T.all(Rake::DSL, Object))
+
+require "unicode_plot"
+require "rumale"
+require "rumale/linear_model/linear_regression"
+require "rumale/preprocessing/polynomial_features"
+require "rumale/pipeline/pipeline"
+
+namespace :stats do
+ desc "Generate graphs of FaFavIdAndDate models with linear and quadratic regression lines. Usage: rake stats:fa_fav_graph[max_points]"
+ task :fa_fav_graph, [:max_points] => :environment do |task, args|
+ puts "🔍 Analyzing FaFavIdAndDate data..."
+
+ # Parse max_points parameter (default to no limit)
+ max_points = args[:max_points]&.to_i
+
+ # Query and sample data
+ records_array = StatsHelpers.sample_records(max_points)
+
+ # Create normalizer with raw data
+ normalizer = DataNormalizer.new(records_array)
+
+ puts "📈 X-axis range (fav_fa_id): #{normalizer.x_range}"
+ puts "📈 Y-axis range (date): #{normalizer.y_range}"
+
+ # Run regressions using normalized data
+ results = RegressionAnalyzer.new(normalizer).analyze
+
+ # Display results (automatically denormalized)
+ puts "\n📊 Linear Regression Results:"
+ puts " #{results.linear.equation}"
+ puts " R² = #{StatsHelpers.format_r_squared(results.linear.r_squared)}"
+
+ puts "\n📊 Quadratic Regression Results:"
+ puts " #{results.quadratic.equation}"
+ puts " R² = #{StatsHelpers.format_r_squared(results.quadratic.r_squared)}"
+
+ # Generate visualizations
+ puts "\n🎨 Generating visualizations with UnicodePlot..."
+ plotter = StatsPlotter.new
+
+ plotter.plot_scatter(
+ "Original Data",
+ normalizer.x_values,
+ normalizer.y_values,
+ )
+ plotter.plot_regression("Linear Regression", results.linear)
+ plotter.plot_regression("Quadratic Regression", results.quadratic)
+ plotter.plot_combined(normalizer.x_values, normalizer.y_values, results)
+
+ puts "\n✅ Graph generation completed!"
+ end
+end
+
+# Helper methods extracted to avoid private method issues in Rake context
+module StatsHelpers
+ extend T::Sig
+
+ sig do
+ params(max_points: T.nilable(Integer)).returns(
+ T::Array[Domain::FaFavIdAndDate],
+ )
+ end
+ def self.sample_records(max_points)
+ records = Domain::FaFavIdAndDate.complete
+
+ if records.empty?
+ puts "❌ No complete FaFavIdAndDate records found"
+ exit 1
+ end
+
+ total_records = records.count
+ puts "📊 Found #{total_records} complete records"
+ records = records.select(:id, :fav_fa_id, :date)
+
+ records_array = records.to_a
+ if max_points && total_records > max_points
+ puts "🎲 Randomly sampling #{max_points} points from #{total_records} total records"
+ srand(42) # Fixed seed for reproducibility
+ records_array =
+ T.cast(
+ records_array.sample(max_points),
+ T::Array[Domain::FaFavIdAndDate],
+ )
+ puts "📊 Using #{records_array.length} sampled records for analysis"
+ else
+ message =
+ (
+ if max_points
+ "within max_points limit of #{max_points}"
+ else
+ "no sampling limit specified"
+ end
+ )
+ puts "📊 Using all #{records_array.length} records (#{message})"
+ end
+
+ records_array
+ end
+
+ sig { params(value: Float).returns(Float) }
+ def self.format_r_squared(value)
+ value.round(3).to_f
+ end
+end
+
+# Handles data normalization and denormalization to prevent numerical instability
+class DataNormalizer
+ extend T::Sig
+
+ class Range < T::ImmutableStruct
+ extend T::Sig
+
+ const :min, Float
+ const :max, Float
+
+ sig { returns(Float) }
+ def scale
+ max - min
+ end
+
+ sig { returns(T::Range[Float]) }
+ def range
+ min..max
+ end
+
+ sig { params(value: Float).returns(Float) }
+ def normalize(value)
+ (value - min) / scale
+ end
+
+ sig { params(value: Float).returns(Float) }
+ def denormalize(value)
+ value * scale + min
+ end
+
+ sig do
+ params(
+ mapper: T.nilable(T.proc.params(arg: Float).returns(String)),
+ ).returns(String)
+ end
+ def as_string(&mapper)
+ mapper ||= ->(x) { x }
+ "#{mapper.call(min)} to #{mapper.call(max)}"
+ end
+ end
+
+ sig { returns(T::Array[Float]) }
+ attr_reader :x_values
+
+ sig { returns(T::Array[Float]) }
+ attr_reader :y_values
+
+ sig { params(records: T::Array[Domain::FaFavIdAndDate]).void }
+ def initialize(records)
+ data_points =
+ records.map do |record|
+ {
+ x: record.fav_fa_id.to_f,
+ y: T.cast(record.date&.to_time&.to_i&.to_f, Float),
+ }
+ end
+
+ data_points.sort_by! { |point| point[:x] }
+ @x_values = T.let(data_points.map { |p| p[:x] }, T::Array[Float])
+ @y_values = T.let(data_points.map { |p| p[:y] }, T::Array[Float])
+
+ # Calculate min/max for normalization
+ x_minmax = T.cast(@x_values.minmax, [Float, Float])
+ y_minmax = T.cast(@y_values.minmax, [Float, Float])
+ @x = T.let(Range.new(min: x_minmax[0], max: x_minmax[1]), Range)
+ @y = T.let(Range.new(min: y_minmax[0], max: y_minmax[1]), Range)
+ end
+
+ sig { returns(String) }
+ def x_range
+ @x.as_string
+ end
+
+ sig { returns(String) }
+ def y_range
+ @y.as_string { |x| Time.at(x) }
+ end
+
+ # Convert raw data to normalized [0,1] scale for Rumale
+ sig { returns(T::Array[T::Array[Float]]) }
+ def normalized_x_matrix
+ @x_values.map { |x| [@x.normalize(x)] }
+ end
+
+ sig { returns(T::Array[Float]) }
+ def normalized_y_vector
+ @y_values.map { |y| @y.normalize(y) }
+ end
+
+ # Generate regression line points in original scale
+ sig { returns(T::Array[Float]) }
+ def regression_x_range
+ step_size = @x.scale / 50.0
+ @x.range.step(step_size).to_a
+ end
+
+ # Denormalize linear regression results back to original scale
+ sig do
+ params(
+ regression_x: T::Array[Float],
+ norm_slope: Float,
+ norm_intercept: Float,
+ ).returns(T::Array[Float])
+ end
+ def denormalize_linear(regression_x, norm_slope, norm_intercept)
+ regression_x.map do |x|
+ x_norm = @x.normalize(x)
+ y_norm = norm_slope * x_norm + norm_intercept
+ @y.denormalize(y_norm)
+ end
+ end
+
+ # Denormalize quadratic regression results back to original scale
+ sig do
+ params(
+ regression_x: T::Array[Float],
+ norm_a: Float,
+ norm_b: Float,
+ norm_c: Float,
+ ).returns(T::Array[Float])
+ end
+ def denormalize_quadratic(regression_x, norm_a, norm_b, norm_c)
+ regression_x.map do |x|
+ x_norm = @x.normalize(x)
+ y_norm = norm_a * x_norm * x_norm + norm_b * x_norm + norm_c
+ @y.denormalize(y_norm)
+ end
+ end
+
+ # Generate equation strings with coefficients in original scale
+ sig { params(norm_slope: Float, norm_intercept: Float).returns(String) }
+ def linear_equation(norm_slope, norm_intercept)
+ slope_orig = norm_slope * @y.scale / @x.scale
+ intercept_orig = (norm_intercept * @y.scale + @y.min) - slope_orig * @x.min
+
+ "y = #{polynomial_equation([slope_orig, intercept_orig])}"
+ end
+
+ sig { params(norm_a: Float, norm_b: Float, norm_c: Float).returns(String) }
+ def quadratic_equation(norm_a, norm_b, norm_c)
+ a_orig = norm_a * @y.scale / (@x.scale * @x.scale)
+ b_orig = norm_b * @y.scale / @x.scale - 2 * a_orig * @x.min
+ c_orig =
+ (norm_c * @y.scale + @y.min) - b_orig * @x.min - a_orig * @x.min * @x.min
+
+ "y = #{polynomial_equation([a_orig, b_orig, c_orig])}"
+ end
+
+ # Convert array of coefficients into polynomial equation string
+ sig { params(coefficients: T::Array[Float]).returns(String) }
+ def polynomial_equation(coefficients)
+ terms =
+ coefficients.each_with_index.map do |coeff, power|
+ next if coeff.zero?
+
+ term = format_number(coeff)
+ case power
+ when 0
+ term
+ when 1
+ "#{term}x"
+ else
+ "#{term}x#{power.to_s.tr("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹")}"
+ end
+ end
+
+ terms.compact.reverse.join(" + ").gsub("+ -", "- ")
+ end
+
+ # Format a number with significant figures and scientific notation when needed
+ sig { params(num: Float, sig_figs: Integer).returns(String) }
+ def format_number(num, sig_figs = 3)
+ # Handle zero case
+ return "0.0" if num.zero?
+
+ # Get order of scale
+ order = Math.log10(num.abs).floor
+
+ # Use scientific notation for very large or small numbers
+ if order >= 6 || order <= -3
+ # Scale number between 1 and 10
+ scaled = num / (10.0**order)
+ # Round to sig figs
+ rounded = scaled.round(sig_figs - 1)
+ "#{rounded}e#{order}"
+ else
+ # For normal range numbers, just round to appropriate decimal places
+ decimal_places = sig_figs - (order + 1)
+ decimal_places = 0 if decimal_places < 0
+ num.round(decimal_places).to_s
+ end
+ end
+end
+
+# Immutable struct representing a single regression analysis result
+class RegressionResult < T::ImmutableStruct
+ extend T::Sig
+
+ const :equation, String
+ const :r_squared, Float
+ const :x_values, T::Array[Float]
+ const :y_values, T::Array[Float]
+end
+
+# Immutable struct representing the complete analysis results
+class AnalysisResults < T::ImmutableStruct
+ extend T::Sig
+
+ const :linear, RegressionResult
+ const :quadratic, RegressionResult
+end
+
+# Handles regression analysis using Rumale with normalized data
+class RegressionAnalyzer
+ extend T::Sig
+
+ sig { params(normalizer: DataNormalizer).void }
+ def initialize(normalizer)
+ @normalizer = normalizer
+ end
+
+ sig { returns(AnalysisResults) }
+ def analyze
+ # Use normalized data for Rumale calculations to prevent numerical instability
+ x_matrix = @normalizer.normalized_x_matrix
+ y_vector = @normalizer.normalized_y_vector
+ regression_x = @normalizer.regression_x_range
+
+ AnalysisResults.new(
+ linear: analyze_linear(x_matrix, y_vector, regression_x),
+ quadratic: analyze_quadratic(x_matrix, y_vector, regression_x),
+ )
+ end
+
+ private
+
+ sig do
+ params(
+ x_matrix: T::Array[T::Array[Float]],
+ y_vector: T::Array[Float],
+ regression_x: T::Array[Float],
+ ).returns(RegressionResult)
+ end
+ def analyze_linear(x_matrix, y_vector, regression_x)
+ poly_features = Rumale::Preprocessing::PolynomialFeatures.new(degree: 1)
+ regressor = Rumale::LinearModel::LinearRegression.new
+ pipeline =
+ Rumale::Pipeline::Pipeline.new(
+ steps: {
+ transformer: poly_features,
+ estimator: regressor,
+ },
+ )
+
+ pipeline.fit(x_matrix, y_vector)
+
+ # Extract normalized coefficients
+ weight_vec = pipeline.steps[:estimator].weight_vec
+ norm_intercept = weight_vec[0]
+ norm_slope = weight_vec[1]
+ r_squared = pipeline.score(x_matrix, y_vector)
+
+ # Generate regression line data in original scale
+ linear_y =
+ @normalizer.denormalize_linear(regression_x, norm_slope, norm_intercept)
+
+ RegressionResult.new(
+ equation: @normalizer.linear_equation(norm_slope, norm_intercept),
+ r_squared: r_squared,
+ x_values: regression_x,
+ y_values: linear_y,
+ )
+ end
+
+ sig do
+ params(
+ x_matrix: T::Array[T::Array[Float]],
+ y_vector: T::Array[Float],
+ regression_x: T::Array[Float],
+ ).returns(RegressionResult)
+ end
+ def analyze_quadratic(x_matrix, y_vector, regression_x)
+ # Use pipeline approach as recommended in documentation
+ poly_features = Rumale::Preprocessing::PolynomialFeatures.new(degree: 2)
+ regressor = Rumale::LinearModel::LinearRegression.new(fit_bias: true)
+
+ pipeline =
+ Rumale::Pipeline::Pipeline.new(
+ steps: {
+ transformer: poly_features,
+ estimator: regressor,
+ },
+ )
+
+ # Fit the pipeline
+ pipeline.fit(x_matrix, y_vector)
+ r_squared = pipeline.score(x_matrix, y_vector)
+ weight_vec = pipeline.steps[:estimator].weight_vec
+ norm_c = weight_vec[0] # constant term
+ norm_b = weight_vec[1] # x coefficient
+ norm_a = weight_vec[2] # x² coefficient
+
+ # Generate regression line data in original scale
+ quadratic_y =
+ @normalizer.denormalize_quadratic(regression_x, norm_a, norm_b, norm_c)
+
+ RegressionResult.new(
+ equation: @normalizer.quadratic_equation(norm_a, norm_b, norm_c),
+ r_squared: r_squared,
+ x_values: regression_x,
+ y_values: quadratic_y,
+ )
+ end
+end
+
+# Simplified plotting class with extracted common functionality
+class StatsPlotter
+ extend T::Sig
+
+ sig do
+ params(
+ title: String,
+ x_values: T::Array[Float],
+ y_values: T::Array[Float],
+ ).void
+ end
+ def plot_scatter(title, x_values, y_values)
+ plot_with_error_handling(title) do
+ UnicodePlot.scatterplot(
+ x_values,
+ y_values,
+ title: title,
+ width: 80,
+ height: 20,
+ xlabel: "fav_fa_id",
+ ylabel: date_axis_label(y_values),
+ )
+ end
+ end
+
+ sig { params(title: String, result: RegressionResult).void }
+ def plot_regression(title, result)
+ subtitle = "#{title.split.first} fit (R² = #{result.r_squared.round(3)})"
+ plot_with_error_handling("#{title} - #{subtitle}") do
+ UnicodePlot.lineplot(
+ result.x_values,
+ result.y_values,
+ title: title,
+ width: 80,
+ height: 20,
+ xlabel: "fav_fa_id",
+ ylabel: date_axis_label(result.y_values),
+ )
+ end
+ end
+
+ sig do
+ params(
+ x_values: T::Array[Float],
+ y_values: T::Array[Float],
+ results: AnalysisResults,
+ ).void
+ end
+ def plot_combined(x_values, y_values, results)
+ plot_with_error_handling("📈 Combined Visualization:") do
+ # Base scatter plot
+ plot =
+ UnicodePlot.scatterplot(
+ x_values,
+ y_values,
+ title: "FaFavIdAndDate Analysis: Original Data vs Regression Models",
+ name: "Original Data",
+ width: 100,
+ height: 25,
+ xlabel: "fav_fa_id",
+ ylabel: date_axis_label(y_values),
+ )
+
+ # Add regression lines
+ UnicodePlot.lineplot!(
+ plot,
+ results.linear.x_values,
+ results.linear.y_values,
+ name: "Linear (R²=#{results.linear.r_squared.round(3)})",
+ )
+ UnicodePlot.lineplot!(
+ plot,
+ results.quadratic.x_values,
+ results.quadratic.y_values,
+ name: "Quadratic (R²=#{results.quadratic.r_squared.round(3)})",
+ )
+ plot
+ end
+ end
+
+ private
+
+ sig { params(y_values: T::Array[Float]).returns(String) }
+ def date_axis_label(y_values)
+ y_min, y_max = y_values.minmax
+ start_date = Time.at(y_min).strftime("%Y-%m-%d")
+ end_date = Time.at(y_max).strftime("%Y-%m-%d")
+ "Date (#{start_date} to #{end_date})"
+ end
+
+ sig { params(title: String, block: T.proc.returns(T.untyped)).void }
+ def plot_with_error_handling(title, &block)
+ puts "\n#{title}"
+ begin
+ plot = block.call
+ puts plot.render
+ rescue LoadError
+ puts "⚠️ UnicodePlot gem not available. Install with: gem install unicode_plot"
+ rescue => e
+ puts "⚠️ Error generating plot: #{e.message}"
+ end
+ end
+end
diff --git a/rake/blob_file.rake b/rake/blob_file.rake
index e124e163..57371442 100644
--- a/rake/blob_file.rake
+++ b/rake/blob_file.rake
@@ -1,3 +1,7 @@
+# typed: false
+# frozen_string_literal: true
+T.bind(self, T.all(Rake::DSL, Object))
+
require "find"
namespace :blob_file do
diff --git a/rake/e621.rake b/rake/e621.rake
index 3c3c6489..90450994 100644
--- a/rake/e621.rake
+++ b/rake/e621.rake
@@ -1,3 +1,7 @@
+# typed: false
+# frozen_string_literal: true
+T.bind(self, T.all(Rake::DSL, Object))
+
namespace :e621 do
desc "run a single e621 posts index job"
task posts_index_job: :environment do
diff --git a/rake/fa.rake b/rake/fa.rake
index caa3bfe9..fdc533bf 100644
--- a/rake/fa.rake
+++ b/rake/fa.rake
@@ -1,3 +1,7 @@
+# typed: false
+# frozen_string_literal: true
+T.bind(self, T.all(Rake::DSL, Object))
+
namespace :fa do
desc "enqueue waiting posts"
task enqueue_waiting_posts: %i[set_logger_stdout environment] do |t, args|
diff --git a/rake/fingerprint.rake b/rake/fingerprint.rake
index 120704cc..04a4970c 100644
--- a/rake/fingerprint.rake
+++ b/rake/fingerprint.rake
@@ -1,3 +1,7 @@
+# typed: false
+# frozen_string_literal: true
+T.bind(self, T.all(Rake::DSL, Object))
+
namespace :fingerprint do
desc "Create missing fingerprints"
task create_missing: :environment do
diff --git a/rake/ib.rake b/rake/ib.rake
index 012b1c5d..4897125c 100644
--- a/rake/ib.rake
+++ b/rake/ib.rake
@@ -1,3 +1,7 @@
+# typed: false
+# frozen_string_literal: true
+T.bind(self, T.all(Rake::DSL, Object))
+
namespace :ib do
desc "run a single e621 posts index job"
task latest_posts_job: :environment do
@@ -28,15 +32,4 @@ namespace :ib do
puts "auth credentials set to #{username} / #{password}"
end
-
- desc "Perform FileJob for missing files"
- task perform_file_jobs: :environment do
- Domain::Inkbunny::File
- .where(state: :ok)
- .where(blob_entry_sha256: nil)
- .where("url_str <> ?", "")
- .find_each do |file|
- Domain::Inkbunny::Job::StaticFileJob.new.perform(file: file)
- end
- end
end
diff --git a/rake/metrics.rake b/rake/metrics.rake
index d6e86b44..934b57a5 100644
--- a/rake/metrics.rake
+++ b/rake/metrics.rake
@@ -1,9 +1,13 @@
+# typed: false
+# frozen_string_literal: true
+T.bind(self, T.all(Rake::DSL, Object))
+
namespace :metrics do
desc "run reporters periodically"
task report_all: %i[environment set_logger_stdout] do
schedule = {
Rake::Task["metrics:jobs"] => 60.seconds,
- Rake::Task["metrics:estimate_db_rows"] => 60.seconds
+ Rake::Task["metrics:estimate_db_rows"] => 60.seconds,
}
last_ran = {}
diff --git a/rake/twitter.rake b/rake/twitter.rake
index 86c72859..6bf26455 100644
--- a/rake/twitter.rake
+++ b/rake/twitter.rake
@@ -1,21 +1,25 @@
+# typed: false
+# frozen_string_literal: true
+T.bind(self, T.all(Rake::DSL, Object))
+
namespace :twitter do
desc "scan timeline of a user"
- task :timeline => [:set_logger_stdout, :environment] do |t, args|
+ task timeline: %i[set_logger_stdout environment] do |t, args|
force_scan = ENV["force_scan"] || false
name = ENV["name"] || raise("must provide name")
- Domain::Twitter::Job::UserTimelineTweetsJob.
- set(priority: -10).
- perform_later({ name: name, force_scan: force_scan })
+ Domain::Twitter::Job::UserTimelineTweetsJob.set(
+ priority: -10,
+ ).perform_later({ name: name, force_scan: force_scan })
puts "timeline for #{name}"
end
- task :timeline_file => [:set_logger_stdout, :environment] do
+ task timeline_file: %i[set_logger_stdout environment] do
file = ENV["file"]
names = File.read(file).split("\n").map(&:strip).map(&:chomp)
names.each do |name|
- Domain::Twitter::Job::UserTimelineTweetsJob.
- set(priority: -10).
- perform_later({ name: name, force_scan: false })
+ Domain::Twitter::Job::UserTimelineTweetsJob.set(
+ priority: -10,
+ ).perform_later({ name: name, force_scan: false })
puts "timeline for #{name}"
end
end
diff --git a/sorbet/config b/sorbet/config
index 12bbc51c..43fd007d 100644
--- a/sorbet/config
+++ b/sorbet/config
@@ -1,5 +1,9 @@
--dir
.
+--allowed-extension=.rb
+--allowed-extension=.rbi
+--allowed-extension=.rake
+--allowed-extension=Rakefile
--enable-experimental-requires-ancestor
--ignore=tmp/
--ignore=vendor/
diff --git a/sorbet/rbi/gems/csv@3.3.5.rbi b/sorbet/rbi/gems/csv@3.3.5.rbi
new file mode 100644
index 00000000..134c751f
--- /dev/null
+++ b/sorbet/rbi/gems/csv@3.3.5.rbi
@@ -0,0 +1,4785 @@
+# typed: false
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `csv` gem.
+# Please instead update this file by running `bin/tapioca gem csv`.
+
+
+# source://csv//lib/csv/core_ext/array.rb#1
+class Array
+ include ::Enumerable
+
+ # Equivalent to CSV::generate_line(self, options)
+ #
+ # ["CSV", "data"].to_csv
+ # #=> "CSV,data\n"
+ #
+ # source://csv//lib/csv/core_ext/array.rb#6
+ def to_csv(**options); end
+end
+
+# == \CSV
+#
+# === \CSV Data
+#
+# \CSV (comma-separated values) data is a text representation of a table:
+# - A _row_ _separator_ delimits table rows.
+# A common row separator is the newline character "\n".
+# - A _column_ _separator_ delimits fields in a row.
+# A common column separator is the comma character ",".
+#
+# This \CSV \String, with row separator "\n"
+# and column separator ",",
+# has three rows and two columns:
+# "foo,0\nbar,1\nbaz,2\n"
+#
+# Despite the name \CSV, a \CSV representation can use different separators.
+#
+# For more about tables, see the Wikipedia article
+# "{Table (information)}[https://en.wikipedia.org/wiki/Table_(information)]",
+# especially its section
+# "{Simple table}[https://en.wikipedia.org/wiki/Table_(information)#Simple_table]"
+#
+# == \Class \CSV
+#
+# Class \CSV provides methods for:
+# - Parsing \CSV data from a \String object, a \File (via its file path), or an \IO object.
+# - Generating \CSV data to a \String object.
+#
+# To make \CSV available:
+# require 'csv'
+#
+# All examples here assume that this has been done.
+#
+# == Keeping It Simple
+#
+# A \CSV object has dozens of instance methods that offer fine-grained control
+# of parsing and generating \CSV data.
+# For many needs, though, simpler approaches will do.
+#
+# This section summarizes the singleton methods in \CSV
+# that allow you to parse and generate without explicitly
+# creating \CSV objects.
+# For details, follow the links.
+#
+# === Simple Parsing
+#
+# Parsing methods commonly return either of:
+# - An \Array of Arrays of Strings:
+# - The outer \Array is the entire "table".
+# - Each inner \Array is a row.
+# - Each \String is a field.
+# - A CSV::Table object. For details, see
+# {\CSV with Headers}[#class-CSV-label-CSV+with+Headers].
+#
+# ==== Parsing a \String
+#
+# The input to be parsed can be a string:
+# string = "foo,0\nbar,1\nbaz,2\n"
+#
+# \Method CSV.parse returns the entire \CSV data:
+# CSV.parse(string) # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+#
+# \Method CSV.parse_line returns only the first row:
+# CSV.parse_line(string) # => ["foo", "0"]
+#
+# \CSV extends class \String with instance method String#parse_csv,
+# which also returns only the first row:
+# string.parse_csv # => ["foo", "0"]
+#
+# ==== Parsing Via a \File Path
+#
+# The input to be parsed can be in a file:
+# string = "foo,0\nbar,1\nbaz,2\n"
+# path = 't.csv'
+# File.write(path, string)
+#
+# \Method CSV.read returns the entire \CSV data:
+# CSV.read(path) # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+#
+# \Method CSV.foreach iterates, passing each row to the given block:
+# CSV.foreach(path) do |row|
+# p row
+# end
+# Output:
+# ["foo", "0"]
+# ["bar", "1"]
+# ["baz", "2"]
+#
+# \Method CSV.table returns the entire \CSV data as a CSV::Table object:
+# CSV.table(path) # => #
+#
+# ==== Parsing from an Open \IO Stream
+#
+# The input to be parsed can be in an open \IO stream:
+#
+# \Method CSV.read returns the entire \CSV data:
+# File.open(path) do |file|
+# CSV.read(file)
+# end # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+#
+# As does method CSV.parse:
+# File.open(path) do |file|
+# CSV.parse(file)
+# end # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+#
+# \Method CSV.parse_line returns only the first row:
+# File.open(path) do |file|
+# CSV.parse_line(file)
+# end # => ["foo", "0"]
+#
+# \Method CSV.foreach iterates, passing each row to the given block:
+# File.open(path) do |file|
+# CSV.foreach(file) do |row|
+# p row
+# end
+# end
+# Output:
+# ["foo", "0"]
+# ["bar", "1"]
+# ["baz", "2"]
+#
+# \Method CSV.table returns the entire \CSV data as a CSV::Table object:
+# File.open(path) do |file|
+# CSV.table(file)
+# end # => #
+#
+# === Simple Generating
+#
+# \Method CSV.generate returns a \String;
+# this example uses method CSV#<< to append the rows
+# that are to be generated:
+# output_string = CSV.generate do |csv|
+# csv << ['foo', 0]
+# csv << ['bar', 1]
+# csv << ['baz', 2]
+# end
+# output_string # => "foo,0\nbar,1\nbaz,2\n"
+#
+# \Method CSV.generate_line returns a \String containing the single row
+# constructed from an \Array:
+# CSV.generate_line(['foo', '0']) # => "foo,0\n"
+#
+# \CSV extends class \Array with instance method Array#to_csv,
+# which forms an \Array into a \String:
+# ['foo', '0'].to_csv # => "foo,0\n"
+#
+# === "Filtering" \CSV
+#
+# \Method CSV.filter provides a Unix-style filter for \CSV data.
+# The input data is processed to form the output data:
+# in_string = "foo,0\nbar,1\nbaz,2\n"
+# out_string = ''
+# CSV.filter(in_string, out_string) do |row|
+# row[0] = row[0].upcase
+# row[1] *= 4
+# end
+# out_string # => "FOO,0000\nBAR,1111\nBAZ,2222\n"
+#
+# == \CSV Objects
+#
+# There are three ways to create a \CSV object:
+# - \Method CSV.new returns a new \CSV object.
+# - \Method CSV.instance returns a new or cached \CSV object.
+# - \Method \CSV() also returns a new or cached \CSV object.
+#
+# === Instance Methods
+#
+# \CSV has three groups of instance methods:
+# - Its own internally defined instance methods.
+# - Methods included by module Enumerable.
+# - Methods delegated to class IO. See below.
+#
+# ==== Delegated Methods
+#
+# For convenience, a CSV object will delegate to many methods in class IO.
+# (A few have wrapper "guard code" in \CSV.) You may call:
+# * IO#binmode
+# * #binmode?
+# * IO#close
+# * IO#close_read
+# * IO#close_write
+# * IO#closed?
+# * #eof
+# * #eof?
+# * IO#external_encoding
+# * IO#fcntl
+# * IO#fileno
+# * #flock
+# * IO#flush
+# * IO#fsync
+# * IO#internal_encoding
+# * #ioctl
+# * IO#isatty
+# * #path
+# * IO#pid
+# * IO#pos
+# * IO#pos=
+# * IO#reopen
+# * #rewind
+# * IO#seek
+# * #stat
+# * IO#string
+# * IO#sync
+# * IO#sync=
+# * IO#tell
+# * #to_i
+# * #to_io
+# * IO#truncate
+# * IO#tty?
+#
+# === Options
+#
+# The default values for options are:
+# DEFAULT_OPTIONS = {
+# # For both parsing and generating.
+# col_sep: ",",
+# row_sep: :auto,
+# quote_char: '"',
+# # For parsing.
+# field_size_limit: nil,
+# converters: nil,
+# unconverted_fields: nil,
+# headers: false,
+# return_headers: false,
+# header_converters: nil,
+# skip_blanks: false,
+# skip_lines: nil,
+# liberal_parsing: false,
+# nil_value: nil,
+# empty_value: "",
+# strip: false,
+# # For generating.
+# write_headers: nil,
+# quote_empty: true,
+# force_quotes: false,
+# write_converters: nil,
+# write_nil_value: nil,
+# write_empty_value: "",
+# }
+#
+# ==== Options for Parsing
+#
+# Options for parsing, described in detail below, include:
+# - +row_sep+: Specifies the row separator; used to delimit rows.
+# - +col_sep+: Specifies the column separator; used to delimit fields.
+# - +quote_char+: Specifies the quote character; used to quote fields.
+# - +field_size_limit+: Specifies the maximum field size + 1 allowed.
+# Deprecated since 3.2.3. Use +max_field_size+ instead.
+# - +max_field_size+: Specifies the maximum field size allowed.
+# - +converters+: Specifies the field converters to be used.
+# - +unconverted_fields+: Specifies whether unconverted fields are to be available.
+# - +headers+: Specifies whether data contains headers,
+# or specifies the headers themselves.
+# - +return_headers+: Specifies whether headers are to be returned.
+# - +header_converters+: Specifies the header converters to be used.
+# - +skip_blanks+: Specifies whether blanks lines are to be ignored.
+# - +skip_lines+: Specifies how comments lines are to be recognized.
+# - +strip+: Specifies whether leading and trailing whitespace are to be
+# stripped from fields. This must be compatible with +col_sep+; if it is not,
+# then an +ArgumentError+ exception will be raised.
+# - +liberal_parsing+: Specifies whether \CSV should attempt to parse
+# non-compliant data.
+# - +nil_value+: Specifies the object that is to be substituted for each null (no-text) field.
+# - +empty_value+: Specifies the object that is to be substituted for each empty field.
+#
+# :include: ../doc/csv/options/common/row_sep.rdoc
+#
+# :include: ../doc/csv/options/common/col_sep.rdoc
+#
+# :include: ../doc/csv/options/common/quote_char.rdoc
+#
+# :include: ../doc/csv/options/parsing/field_size_limit.rdoc
+#
+# :include: ../doc/csv/options/parsing/converters.rdoc
+#
+# :include: ../doc/csv/options/parsing/unconverted_fields.rdoc
+#
+# :include: ../doc/csv/options/parsing/headers.rdoc
+#
+# :include: ../doc/csv/options/parsing/return_headers.rdoc
+#
+# :include: ../doc/csv/options/parsing/header_converters.rdoc
+#
+# :include: ../doc/csv/options/parsing/skip_blanks.rdoc
+#
+# :include: ../doc/csv/options/parsing/skip_lines.rdoc
+#
+# :include: ../doc/csv/options/parsing/strip.rdoc
+#
+# :include: ../doc/csv/options/parsing/liberal_parsing.rdoc
+#
+# :include: ../doc/csv/options/parsing/nil_value.rdoc
+#
+# :include: ../doc/csv/options/parsing/empty_value.rdoc
+#
+# ==== Options for Generating
+#
+# Options for generating, described in detail below, include:
+# - +row_sep+: Specifies the row separator; used to delimit rows.
+# - +col_sep+: Specifies the column separator; used to delimit fields.
+# - +quote_char+: Specifies the quote character; used to quote fields.
+# - +write_headers+: Specifies whether headers are to be written.
+# - +force_quotes+: Specifies whether each output field is to be quoted.
+# - +quote_empty+: Specifies whether each empty output field is to be quoted.
+# - +write_converters+: Specifies the field converters to be used in writing.
+# - +write_nil_value+: Specifies the object that is to be substituted for each +nil+-valued field.
+# - +write_empty_value+: Specifies the object that is to be substituted for each empty field.
+#
+# :include: ../doc/csv/options/common/row_sep.rdoc
+#
+# :include: ../doc/csv/options/common/col_sep.rdoc
+#
+# :include: ../doc/csv/options/common/quote_char.rdoc
+#
+# :include: ../doc/csv/options/generating/write_headers.rdoc
+#
+# :include: ../doc/csv/options/generating/force_quotes.rdoc
+#
+# :include: ../doc/csv/options/generating/quote_empty.rdoc
+#
+# :include: ../doc/csv/options/generating/write_converters.rdoc
+#
+# :include: ../doc/csv/options/generating/write_nil_value.rdoc
+#
+# :include: ../doc/csv/options/generating/write_empty_value.rdoc
+#
+# === \CSV with Headers
+#
+# CSV allows to specify column names of CSV file, whether they are in data, or
+# provided separately. If headers are specified, reading methods return an instance
+# of CSV::Table, consisting of CSV::Row.
+#
+# # Headers are part of data
+# data = CSV.parse(<<~ROWS, headers: true)
+# Name,Department,Salary
+# Bob,Engineering,1000
+# Jane,Sales,2000
+# John,Management,5000
+# ROWS
+#
+# data.class #=> CSV::Table
+# data.first #=> #
+# data.first.to_h #=> {"Name"=>"Bob", "Department"=>"Engineering", "Salary"=>"1000"}
+#
+# # Headers provided by developer
+# data = CSV.parse('Bob,Engineering,1000', headers: %i[name department salary])
+# data.first #=> #
+#
+# === \Converters
+#
+# By default, each value (field or header) parsed by \CSV is formed into a \String.
+# You can use a _field_ _converter_ or _header_ _converter_
+# to intercept and modify the parsed values:
+# - See {Field Converters}[#class-CSV-label-Field+Converters].
+# - See {Header Converters}[#class-CSV-label-Header+Converters].
+#
+# Also by default, each value to be written during generation is written 'as-is'.
+# You can use a _write_ _converter_ to modify values before writing.
+# - See {Write Converters}[#class-CSV-label-Write+Converters].
+#
+# ==== Specifying \Converters
+#
+# You can specify converters for parsing or generating in the +options+
+# argument to various \CSV methods:
+# - Option +converters+ for converting parsed field values.
+# - Option +header_converters+ for converting parsed header values.
+# - Option +write_converters+ for converting values to be written (generated).
+#
+# There are three forms for specifying converters:
+# - A converter proc: executable code to be used for conversion.
+# - A converter name: the name of a stored converter.
+# - A converter list: an array of converter procs, converter names, and converter lists.
+#
+# ===== Converter Procs
+#
+# This converter proc, +strip_converter+, accepts a value +field+
+# and returns field.strip:
+# strip_converter = proc {|field| field.strip }
+# In this call to CSV.parse,
+# the keyword argument converters: string_converter
+# specifies that:
+# - \Proc +string_converter+ is to be called for each parsed field.
+# - The converter's return value is to replace the +field+ value.
+# Example:
+# string = " foo , 0 \n bar , 1 \n baz , 2 \n"
+# array = CSV.parse(string, converters: strip_converter)
+# array # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+#
+# A converter proc can receive a second argument, +field_info+,
+# that contains details about the field.
+# This modified +strip_converter+ displays its arguments:
+# strip_converter = proc do |field, field_info|
+# p [field, field_info]
+# field.strip
+# end
+# string = " foo , 0 \n bar , 1 \n baz , 2 \n"
+# array = CSV.parse(string, converters: strip_converter)
+# array # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+# Output:
+# [" foo ", #]
+# [" 0 ", #]
+# [" bar ", #]
+# [" 1 ", #]
+# [" baz ", #]
+# [" 2 ", #]
+# Each CSV::FieldInfo object shows:
+# - The 0-based field index.
+# - The 1-based line index.
+# - The field header, if any.
+#
+# ===== Stored \Converters
+#
+# A converter may be given a name and stored in a structure where
+# the parsing methods can find it by name.
+#
+# The storage structure for field converters is the \Hash CSV::Converters.
+# It has several built-in converter procs:
+# - :integer: converts each \String-embedded integer into a true \Integer.
+# - :float: converts each \String-embedded float into a true \Float.
+# - :date: converts each \String-embedded date into a true \Date.
+# - :date_time: converts each \String-embedded date-time into a true \DateTime
+# - :time: converts each \String-embedded time into a true \Time
+# .
+# This example creates a converter proc, then stores it:
+# strip_converter = proc {|field| field.strip }
+# CSV::Converters[:strip] = strip_converter
+# Then the parsing method call can refer to the converter
+# by its name, :strip:
+# string = " foo , 0 \n bar , 1 \n baz , 2 \n"
+# array = CSV.parse(string, converters: :strip)
+# array # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+#
+# The storage structure for header converters is the \Hash CSV::HeaderConverters,
+# which works in the same way.
+# It also has built-in converter procs:
+# - :downcase: Downcases each header.
+# - :symbol: Converts each header to a \Symbol.
+#
+# There is no such storage structure for write headers.
+#
+# In order for the parsing methods to access stored converters in non-main-Ractors, the
+# storage structure must be made shareable first.
+# Therefore, Ractor.make_shareable(CSV::Converters) and
+# Ractor.make_shareable(CSV::HeaderConverters) must be called before the creation
+# of Ractors that use the converters stored in these structures. (Since making the storage
+# structures shareable involves freezing them, any custom converters that are to be used
+# must be added first.)
+#
+# ===== Converter Lists
+#
+# A _converter_ _list_ is an \Array that may include any assortment of:
+# - Converter procs.
+# - Names of stored converters.
+# - Nested converter lists.
+#
+# Examples:
+# numeric_converters = [:integer, :float]
+# date_converters = [:date, :date_time]
+# [numeric_converters, strip_converter]
+# [strip_converter, date_converters, :float]
+#
+# Like a converter proc, a converter list may be named and stored in either
+# \CSV::Converters or CSV::HeaderConverters:
+# CSV::Converters[:custom] = [strip_converter, date_converters, :float]
+# CSV::HeaderConverters[:custom] = [:downcase, :symbol]
+#
+# There are two built-in converter lists:
+# CSV::Converters[:numeric] # => [:integer, :float]
+# CSV::Converters[:all] # => [:date_time, :numeric]
+#
+# ==== Field \Converters
+#
+# With no conversion, all parsed fields in all rows become Strings:
+# string = "foo,0\nbar,1\nbaz,2\n"
+# ary = CSV.parse(string)
+# ary # => # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+#
+# When you specify a field converter, each parsed field is passed to the converter;
+# its return value becomes the stored value for the field.
+# A converter might, for example, convert an integer embedded in a \String
+# into a true \Integer.
+# (In fact, that's what built-in field converter +:integer+ does.)
+#
+# There are three ways to use field \converters.
+#
+# - Using option {converters}[#class-CSV-label-Option+converters] with a parsing method:
+# ary = CSV.parse(string, converters: :integer)
+# ary # => [0, 1, 2] # => [["foo", 0], ["bar", 1], ["baz", 2]]
+# - Using option {converters}[#class-CSV-label-Option+converters] with a new \CSV instance:
+# csv = CSV.new(string, converters: :integer)
+# # Field converters in effect:
+# csv.converters # => [:integer]
+# csv.read # => [["foo", 0], ["bar", 1], ["baz", 2]]
+# - Using method #convert to add a field converter to a \CSV instance:
+# csv = CSV.new(string)
+# # Add a converter.
+# csv.convert(:integer)
+# csv.converters # => [:integer]
+# csv.read # => [["foo", 0], ["bar", 1], ["baz", 2]]
+#
+# Installing a field converter does not affect already-read rows:
+# csv = CSV.new(string)
+# csv.shift # => ["foo", "0"]
+# # Add a converter.
+# csv.convert(:integer)
+# csv.converters # => [:integer]
+# csv.read # => [["bar", 1], ["baz", 2]]
+#
+# There are additional built-in \converters, and custom \converters are also supported.
+#
+# ===== Built-In Field \Converters
+#
+# The built-in field converters are in \Hash CSV::Converters:
+# - Each key is a field converter name.
+# - Each value is one of:
+# - A \Proc field converter.
+# - An \Array of field converter names.
+#
+# Display:
+# CSV::Converters.each_pair do |name, value|
+# if value.kind_of?(Proc)
+# p [name, value.class]
+# else
+# p [name, value]
+# end
+# end
+# Output:
+# [:integer, Proc]
+# [:float, Proc]
+# [:numeric, [:integer, :float]]
+# [:date, Proc]
+# [:date_time, Proc]
+# [:time, Proc]
+# [:all, [:date_time, :numeric]]
+#
+# Each of these converters transcodes values to UTF-8 before attempting conversion.
+# If a value cannot be transcoded to UTF-8 the conversion will
+# fail and the value will remain unconverted.
+#
+# Converter +:integer+ converts each field that Integer() accepts:
+# data = '0,1,2,x'
+# # Without the converter
+# csv = CSV.parse_line(data)
+# csv # => ["0", "1", "2", "x"]
+# # With the converter
+# csv = CSV.parse_line(data, converters: :integer)
+# csv # => [0, 1, 2, "x"]
+#
+# Converter +:float+ converts each field that Float() accepts:
+# data = '1.0,3.14159,x'
+# # Without the converter
+# csv = CSV.parse_line(data)
+# csv # => ["1.0", "3.14159", "x"]
+# # With the converter
+# csv = CSV.parse_line(data, converters: :float)
+# csv # => [1.0, 3.14159, "x"]
+#
+# Converter +:numeric+ converts with both +:integer+ and +:float+..
+#
+# Converter +:date+ converts each field that Date::parse accepts:
+# data = '2001-02-03,x'
+# # Without the converter
+# csv = CSV.parse_line(data)
+# csv # => ["2001-02-03", "x"]
+# # With the converter
+# csv = CSV.parse_line(data, converters: :date)
+# csv # => [#, "x"]
+#
+# Converter +:date_time+ converts each field that DateTime::parse accepts:
+# data = '2020-05-07T14:59:00-05:00,x'
+# # Without the converter
+# csv = CSV.parse_line(data)
+# csv # => ["2020-05-07T14:59:00-05:00", "x"]
+# # With the converter
+# csv = CSV.parse_line(data, converters: :date_time)
+# csv # => [#, "x"]
+#
+# Converter +time+ converts each field that Time::parse accepts:
+# data = '2020-05-07T14:59:00-05:00,x'
+# # Without the converter
+# csv = CSV.parse_line(data)
+# csv # => ["2020-05-07T14:59:00-05:00", "x"]
+# # With the converter
+# csv = CSV.parse_line(data, converters: :time)
+# csv # => [2020-05-07 14:59:00 -0500, "x"]
+#
+# Converter +:numeric+ converts with both +:date_time+ and +:numeric+..
+#
+# As seen above, method #convert adds \converters to a \CSV instance,
+# and method #converters returns an \Array of the \converters in effect:
+# csv = CSV.new('0,1,2')
+# csv.converters # => []
+# csv.convert(:integer)
+# csv.converters # => [:integer]
+# csv.convert(:date)
+# csv.converters # => [:integer, :date]
+#
+# ===== Custom Field \Converters
+#
+# You can define a custom field converter:
+# strip_converter = proc {|field| field.strip }
+# string = " foo , 0 \n bar , 1 \n baz , 2 \n"
+# array = CSV.parse(string, converters: strip_converter)
+# array # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+# You can register the converter in \Converters \Hash,
+# which allows you to refer to it by name:
+# CSV::Converters[:strip] = strip_converter
+# string = " foo , 0 \n bar , 1 \n baz , 2 \n"
+# array = CSV.parse(string, converters: :strip)
+# array # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+#
+# ==== Header \Converters
+#
+# Header converters operate only on headers (and not on other rows).
+#
+# There are three ways to use header \converters;
+# these examples use built-in header converter +:downcase+,
+# which downcases each parsed header.
+#
+# - Option +header_converters+ with a singleton parsing method:
+# string = "Name,Count\nFoo,0\n,Bar,1\nBaz,2"
+# tbl = CSV.parse(string, headers: true, header_converters: :downcase)
+# tbl.class # => CSV::Table
+# tbl.headers # => ["name", "count"]
+#
+# - Option +header_converters+ with a new \CSV instance:
+# csv = CSV.new(string, header_converters: :downcase)
+# # Header converters in effect:
+# csv.header_converters # => [:downcase]
+# tbl = CSV.parse(string, headers: true)
+# tbl.headers # => ["Name", "Count"]
+#
+# - Method #header_convert adds a header converter to a \CSV instance:
+# csv = CSV.new(string)
+# # Add a header converter.
+# csv.header_convert(:downcase)
+# csv.header_converters # => [:downcase]
+# tbl = CSV.parse(string, headers: true)
+# tbl.headers # => ["Name", "Count"]
+#
+# ===== Built-In Header \Converters
+#
+# The built-in header \converters are in \Hash CSV::HeaderConverters.
+# The keys there are the names of the \converters:
+# CSV::HeaderConverters.keys # => [:downcase, :symbol]
+#
+# Converter +:downcase+ converts each header by downcasing it:
+# string = "Name,Count\nFoo,0\n,Bar,1\nBaz,2"
+# tbl = CSV.parse(string, headers: true, header_converters: :downcase)
+# tbl.class # => CSV::Table
+# tbl.headers # => ["name", "count"]
+#
+# Converter +:symbol+ converts each header by making it into a \Symbol:
+# string = "Name,Count\nFoo,0\n,Bar,1\nBaz,2"
+# tbl = CSV.parse(string, headers: true, header_converters: :symbol)
+# tbl.headers # => [:name, :count]
+# Details:
+# - Strips leading and trailing whitespace.
+# - Downcases the header.
+# - Replaces embedded spaces with underscores.
+# - Removes non-word characters.
+# - Makes the string into a \Symbol.
+#
+# ===== Custom Header \Converters
+#
+# You can define a custom header converter:
+# upcase_converter = proc {|header| header.upcase }
+# string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+# table = CSV.parse(string, headers: true, header_converters: upcase_converter)
+# table # => #
+# table.headers # => ["NAME", "VALUE"]
+# You can register the converter in \HeaderConverters \Hash,
+# which allows you to refer to it by name:
+# CSV::HeaderConverters[:upcase] = upcase_converter
+# table = CSV.parse(string, headers: true, header_converters: :upcase)
+# table # => #
+# table.headers # => ["NAME", "VALUE"]
+#
+# ===== Write \Converters
+#
+# When you specify a write converter for generating \CSV,
+# each field to be written is passed to the converter;
+# its return value becomes the new value for the field.
+# A converter might, for example, strip whitespace from a field.
+#
+# Using no write converter (all fields unmodified):
+# output_string = CSV.generate do |csv|
+# csv << [' foo ', 0]
+# csv << [' bar ', 1]
+# csv << [' baz ', 2]
+# end
+# output_string # => " foo ,0\n bar ,1\n baz ,2\n"
+# Using option +write_converters+ with two custom write converters:
+# strip_converter = proc {|field| field.respond_to?(:strip) ? field.strip : field }
+# upcase_converter = proc {|field| field.respond_to?(:upcase) ? field.upcase : field }
+# write_converters = [strip_converter, upcase_converter]
+# output_string = CSV.generate(write_converters: write_converters) do |csv|
+# csv << [' foo ', 0]
+# csv << [' bar ', 1]
+# csv << [' baz ', 2]
+# end
+# output_string # => "FOO,0\nBAR,1\nBAZ,2\n"
+#
+# === Character Encodings (M17n or Multilingualization)
+#
+# This new CSV parser is m17n savvy. The parser works in the Encoding of the IO
+# or String object being read from or written to. Your data is never transcoded
+# (unless you ask Ruby to transcode it for you) and will literally be parsed in
+# the Encoding it is in. Thus CSV will return Arrays or Rows of Strings in the
+# Encoding of your data. This is accomplished by transcoding the parser itself
+# into your Encoding.
+#
+# Some transcoding must take place, of course, to accomplish this multiencoding
+# support. For example, :col_sep, :row_sep, and
+# :quote_char must be transcoded to match your data. Hopefully this
+# makes the entire process feel transparent, since CSV's defaults should just
+# magically work for your data. However, you can set these values manually in
+# the target Encoding to avoid the translation.
+#
+# It's also important to note that while all of CSV's core parser is now
+# Encoding agnostic, some features are not. For example, the built-in
+# converters will try to transcode data to UTF-8 before making conversions.
+# Again, you can provide custom converters that are aware of your Encodings to
+# avoid this translation. It's just too hard for me to support native
+# conversions in all of Ruby's Encodings.
+#
+# Anyway, the practical side of this is simple: make sure IO and String objects
+# passed into CSV have the proper Encoding set and everything should just work.
+# CSV methods that allow you to open IO objects (CSV::foreach(), CSV::open(),
+# CSV::read(), and CSV::readlines()) do allow you to specify the Encoding.
+#
+# One minor exception comes when generating CSV into a String with an Encoding
+# that is not ASCII compatible. There's no existing data for CSV to use to
+# prepare itself and thus you will probably need to manually specify the desired
+# Encoding for most of those cases. It will try to guess using the fields in a
+# row of output though, when using CSV::generate_line() or Array#to_csv().
+#
+# I try to point out any other Encoding issues in the documentation of methods
+# as they come up.
+#
+# This has been tested to the best of my ability with all non-"dummy" Encodings
+# Ruby ships with. However, it is brave new code and may have some bugs.
+# Please feel free to {report}[mailto:james@grayproductions.net] any issues you
+# find with it.
+#
+# source://csv//lib/csv/fields_converter.rb#3
+class CSV
+ include ::Enumerable
+ extend ::Forwardable
+
+ # :call-seq:
+ # CSV.new(string)
+ # CSV.new(io)
+ # CSV.new(string, **options)
+ # CSV.new(io, **options)
+ #
+ # Returns the new \CSV object created using +string+ or +io+
+ # and the specified +options+.
+ #
+ # - Argument +string+ should be a \String object;
+ # it will be put into a new StringIO object positioned at the beginning.
+ # :include: ../doc/csv/arguments/io.rdoc
+ # - Argument +options+: See:
+ # * {Options for Parsing}[#class-CSV-label-Options+for+Parsing]
+ # * {Options for Generating}[#class-CSV-label-Options+for+Generating]
+ # For performance reasons, the options cannot be overridden
+ # in a \CSV object, so those specified here will endure.
+ #
+ # In addition to the \CSV instance methods, several \IO methods are delegated.
+ # See {Delegated Methods}[#class-CSV-label-Delegated+Methods].
+ #
+ # ---
+ #
+ # Create a \CSV object from a \String object:
+ # csv = CSV.new('foo,0')
+ #
+ # Create a \CSV object from a \File object:
+ # File.write('t.csv', 'foo,0')
+ # csv = CSV.new(File.open('t.csv'))
+ #
+ # ---
+ #
+ # Raises an exception if the argument is +nil+:
+ # # Raises ArgumentError (Cannot parse nil as CSV):
+ # CSV.new(nil)
+ #
+ # @raise [ArgumentError]
+ # @return [CSV] a new instance of CSV
+ #
+ # source://csv//lib/csv.rb#2034
+ def initialize(data, col_sep: T.unsafe(nil), row_sep: T.unsafe(nil), quote_char: T.unsafe(nil), field_size_limit: T.unsafe(nil), max_field_size: T.unsafe(nil), converters: T.unsafe(nil), unconverted_fields: T.unsafe(nil), headers: T.unsafe(nil), return_headers: T.unsafe(nil), write_headers: T.unsafe(nil), header_converters: T.unsafe(nil), skip_blanks: T.unsafe(nil), force_quotes: T.unsafe(nil), skip_lines: T.unsafe(nil), liberal_parsing: T.unsafe(nil), internal_encoding: T.unsafe(nil), external_encoding: T.unsafe(nil), encoding: T.unsafe(nil), nil_value: T.unsafe(nil), empty_value: T.unsafe(nil), strip: T.unsafe(nil), quote_empty: T.unsafe(nil), write_converters: T.unsafe(nil), write_nil_value: T.unsafe(nil), write_empty_value: T.unsafe(nil)); end
+
+ # :call-seq:
+ # csv << row -> self
+ #
+ # Appends a row to +self+.
+ #
+ # - Argument +row+ must be an \Array object or a CSV::Row object.
+ # - The output stream must be open for writing.
+ #
+ # ---
+ #
+ # Append Arrays:
+ # CSV.generate do |csv|
+ # csv << ['foo', 0]
+ # csv << ['bar', 1]
+ # csv << ['baz', 2]
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # Append CSV::Rows:
+ # headers = []
+ # CSV.generate do |csv|
+ # csv << CSV::Row.new(headers, ['foo', 0])
+ # csv << CSV::Row.new(headers, ['bar', 1])
+ # csv << CSV::Row.new(headers, ['baz', 2])
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # Headers in CSV::Row objects are not appended:
+ # headers = ['Name', 'Count']
+ # CSV.generate do |csv|
+ # csv << CSV::Row.new(headers, ['foo', 0])
+ # csv << CSV::Row.new(headers, ['bar', 1])
+ # csv << CSV::Row.new(headers, ['baz', 2])
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # ---
+ #
+ # Raises an exception if +row+ is not an \Array or \CSV::Row:
+ # CSV.generate do |csv|
+ # # Raises NoMethodError (undefined method `collect' for :foo:Symbol)
+ # csv << :foo
+ # end
+ #
+ # Raises an exception if the output stream is not opened for writing:
+ # path = 't.csv'
+ # File.write(path, '')
+ # File.open(path) do |file|
+ # CSV.open(file) do |csv|
+ # # Raises IOError (not opened for writing)
+ # csv << ['foo', 0]
+ # end
+ # end
+ #
+ # source://csv//lib/csv.rb#2507
+ def <<(row); end
+
+ # :call-seq:
+ # csv << row -> self
+ #
+ # Appends a row to +self+.
+ #
+ # - Argument +row+ must be an \Array object or a CSV::Row object.
+ # - The output stream must be open for writing.
+ #
+ # ---
+ #
+ # Append Arrays:
+ # CSV.generate do |csv|
+ # csv << ['foo', 0]
+ # csv << ['bar', 1]
+ # csv << ['baz', 2]
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # Append CSV::Rows:
+ # headers = []
+ # CSV.generate do |csv|
+ # csv << CSV::Row.new(headers, ['foo', 0])
+ # csv << CSV::Row.new(headers, ['bar', 1])
+ # csv << CSV::Row.new(headers, ['baz', 2])
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # Headers in CSV::Row objects are not appended:
+ # headers = ['Name', 'Count']
+ # CSV.generate do |csv|
+ # csv << CSV::Row.new(headers, ['foo', 0])
+ # csv << CSV::Row.new(headers, ['bar', 1])
+ # csv << CSV::Row.new(headers, ['baz', 2])
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # ---
+ #
+ # Raises an exception if +row+ is not an \Array or \CSV::Row:
+ # CSV.generate do |csv|
+ # # Raises NoMethodError (undefined method `collect' for :foo:Symbol)
+ # csv << :foo
+ # end
+ #
+ # Raises an exception if the output stream is not opened for writing:
+ # path = 't.csv'
+ # File.write(path, '')
+ # File.open(path) do |file|
+ # CSV.open(file) do |csv|
+ # # Raises IOError (not opened for writing)
+ # csv << ['foo', 0]
+ # end
+ # end
+ #
+ # source://csv//lib/csv.rb#2507
+ def add_row(row); end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2396
+ def binmode?; end
+
+ # :call-seq:
+ # csv.col_sep -> string
+ #
+ # Returns the encoded column separator; used for parsing and writing;
+ # see {Option +col_sep+}[#class-CSV-label-Option+col_sep]:
+ # CSV.new('').col_sep # => ","
+ #
+ # source://csv//lib/csv.rb#2144
+ def col_sep; end
+
+ # :call-seq:
+ # convert(converter_name) -> array_of_procs
+ # convert {|field, field_info| ... } -> array_of_procs
+ #
+ # - With no block, installs a field converter (a \Proc).
+ # - With a block, defines and installs a custom field converter.
+ # - Returns the \Array of installed field converters.
+ #
+ # - Argument +converter_name+, if given, should be the name
+ # of an existing field converter.
+ #
+ # See {Field Converters}[#class-CSV-label-Field+Converters].
+ # ---
+ #
+ # With no block, installs a field converter:
+ # csv = CSV.new('')
+ # csv.convert(:integer)
+ # csv.convert(:float)
+ # csv.convert(:date)
+ # csv.converters # => [:integer, :float, :date]
+ #
+ # ---
+ #
+ # The block, if given, is called for each field:
+ # - Argument +field+ is the field value.
+ # - Argument +field_info+ is a CSV::FieldInfo object
+ # containing details about the field.
+ #
+ # The examples here assume the prior execution of:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ #
+ # Example giving a block:
+ # csv = CSV.open(path)
+ # csv.convert {|field, field_info| p [field, field_info]; field.upcase }
+ # csv.read # => [["FOO", "0"], ["BAR", "1"], ["BAZ", "2"]]
+ #
+ # Output:
+ # ["foo", #]
+ # ["0", #]
+ # ["bar", #]
+ # ["1", #]
+ # ["baz", #]
+ # ["2", #]
+ #
+ # The block need not return a \String object:
+ # csv = CSV.open(path)
+ # csv.convert {|field, field_info| field.to_sym }
+ # csv.read # => [[:foo, :"0"], [:bar, :"1"], [:baz, :"2"]]
+ #
+ # If +converter_name+ is given, the block is not called:
+ # csv = CSV.open(path)
+ # csv.convert(:integer) {|field, field_info| fail 'Cannot happen' }
+ # csv.read # => [["foo", 0], ["bar", 1], ["baz", 2]]
+ #
+ # ---
+ #
+ # Raises a parse-time exception if +converter_name+ is not the name of a built-in
+ # field converter:
+ # csv = CSV.open(path)
+ # csv.convert(:nosuch) => [nil]
+ # # Raises NoMethodError (undefined method `arity' for nil:NilClass)
+ # csv.read
+ #
+ # source://csv//lib/csv.rb#2578
+ def convert(name = T.unsafe(nil), &converter); end
+
+ # :call-seq:
+ # csv.converters -> array
+ #
+ # Returns an \Array containing field converters;
+ # see {Field Converters}[#class-CSV-label-Field+Converters]:
+ # csv = CSV.new('')
+ # csv.converters # => []
+ # csv.convert(:integer)
+ # csv.converters # => [:integer]
+ # csv.convert(proc {|x| x.to_s })
+ # csv.converters
+ #
+ # Notes that you need to call
+ # +Ractor.make_shareable(CSV::Converters)+ on the main Ractor to use
+ # this method.
+ #
+ # source://csv//lib/csv.rb#2217
+ def converters; end
+
+ # :call-seq:
+ # csv.each -> enumerator
+ # csv.each {|row| ...}
+ #
+ # Calls the block with each successive row.
+ # The data source must be opened for reading.
+ #
+ # Without headers:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.each do |row|
+ # p row
+ # end
+ # Output:
+ # ["foo", "0"]
+ # ["bar", "1"]
+ # ["baz", "2"]
+ #
+ # With headers:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string, headers: true)
+ # csv.each do |row|
+ # p row
+ # end
+ # Output:
+ #
+ #
+ #
+ #
+ # ---
+ #
+ # Raises an exception if the source is not opened for reading:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.close
+ # # Raises IOError (not opened for reading)
+ # csv.each do |row|
+ # p row
+ # end
+ #
+ # source://csv//lib/csv.rb#2689
+ def each(&block); end
+
+ # :call-seq:
+ # csv.encoding -> encoding
+ #
+ # Returns the encoding used for parsing and generating;
+ # see {Character Encodings (M17n or Multilingualization)}[#class-CSV-label-Character+Encodings+-28M17n+or+Multilingualization-29]:
+ # CSV.new('').encoding # => #
+ #
+ # source://csv//lib/csv.rb#2327
+ def encoding; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2432
+ def eof; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2432
+ def eof?; end
+
+ # :call-seq:
+ # csv.field_size_limit -> integer or nil
+ #
+ # Returns the limit for field size; used for parsing;
+ # see {Option +field_size_limit+}[#class-CSV-label-Option+field_size_limit]:
+ # CSV.new('').field_size_limit # => nil
+ #
+ # Deprecated since 3.2.3. Use +max_field_size+ instead.
+ #
+ # source://csv//lib/csv.rb#2176
+ def field_size_limit; end
+
+ # @raise [NotImplementedError]
+ #
+ # source://csv//lib/csv.rb#2404
+ def flock(*args); end
+
+ # :call-seq:
+ # csv.force_quotes? -> true or false
+ #
+ # Returns the value that determines whether all output fields are to be quoted;
+ # used for generating;
+ # see {Option +force_quotes+}[#class-CSV-label-Option+force_quotes]:
+ # CSV.new('').force_quotes? # => false
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2307
+ def force_quotes?; end
+
+ # :call-seq:
+ # csv.shift -> array, csv_row, or nil
+ #
+ # Returns the next row of data as:
+ # - An \Array if no headers are used.
+ # - A CSV::Row object if headers are used.
+ #
+ # The data source must be opened for reading.
+ #
+ # Without headers:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.shift # => ["foo", "0"]
+ # csv.shift # => ["bar", "1"]
+ # csv.shift # => ["baz", "2"]
+ # csv.shift # => nil
+ #
+ # With headers:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string, headers: true)
+ # csv.shift # => #
+ # csv.shift # => #
+ # csv.shift # => #
+ # csv.shift # => nil
+ #
+ # ---
+ #
+ # Raises an exception if the source is not opened for reading:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.close
+ # # Raises IOError (not opened for reading)
+ # csv.shift
+ #
+ # source://csv//lib/csv.rb#2803
+ def gets; end
+
+ # The block need not return a \String object:
+ # csv = CSV.open(path, headers: true)
+ # csv.header_convert {|header, field_info| header.to_sym }
+ # table = csv.read
+ # table.headers # => [:Name, :Value]
+ #
+ # If +converter_name+ is given, the block is not called:
+ # csv = CSV.open(path, headers: true)
+ # csv.header_convert(:downcase) {|header, field_info| fail 'Cannot happen' }
+ # table = csv.read
+ # table.headers # => ["name", "value"]
+ # ---
+ #
+ # Raises a parse-time exception if +converter_name+ is not the name of a built-in
+ # field converter:
+ # csv = CSV.open(path, headers: true)
+ # csv.header_convert(:nosuch)
+ # # Raises NoMethodError (undefined method `arity' for nil:NilClass)
+ # csv.read
+ #
+ # source://csv//lib/csv.rb#2644
+ def header_convert(name = T.unsafe(nil), &converter); end
+
+ # :call-seq:
+ # csv.header_converters -> array
+ #
+ # Returns an \Array containing header converters; used for parsing;
+ # see {Header Converters}[#class-CSV-label-Header+Converters]:
+ # CSV.new('').header_converters # => []
+ #
+ # Notes that you need to call
+ # +Ractor.make_shareable(CSV::HeaderConverters)+ on the main Ractor
+ # to use this method.
+ #
+ # source://csv//lib/csv.rb#2283
+ def header_converters; end
+
+ # :call-seq:
+ # csv.header_row? -> true or false
+ #
+ # Returns +true+ if the next row to be read is a header row\;
+ # +false+ otherwise.
+ #
+ # Without headers:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.header_row? # => false
+ #
+ # With headers:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string, headers: true)
+ # csv.header_row? # => true
+ # csv.shift # => #
+ # csv.header_row? # => false
+ #
+ # ---
+ #
+ # Raises an exception if the source is not opened for reading:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.close
+ # # Raises IOError (not opened for reading)
+ # csv.header_row?
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2766
+ def header_row?; end
+
+ # :call-seq:
+ # csv.headers -> object
+ #
+ # Returns the value that determines whether headers are used; used for parsing;
+ # see {Option +headers+}[#class-CSV-label-Option+headers]:
+ # CSV.new('').headers # => nil
+ #
+ # source://csv//lib/csv.rb#2241
+ def headers; end
+
+ # :call-seq:
+ # csv.inspect -> string
+ #
+ # Returns a \String showing certain properties of +self+:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string, headers: true)
+ # s = csv.inspect
+ #
+ # source://csv//lib/csv.rb#2825
+ def inspect; end
+
+ # @raise [NotImplementedError]
+ #
+ # source://csv//lib/csv.rb#2409
+ def ioctl(*args); end
+
+ # :call-seq:
+ # csv.liberal_parsing? -> true or false
+ #
+ # Returns the value that determines whether illegal input is to be handled; used for parsing;
+ # see {Option +liberal_parsing+}[#class-CSV-label-Option+liberal_parsing]:
+ # CSV.new('').liberal_parsing? # => false
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2317
+ def liberal_parsing?; end
+
+ # :call-seq:
+ # csv.line -> array
+ #
+ # Returns the line most recently read:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # CSV.open(path) do |csv|
+ # csv.each do |row|
+ # p [csv.lineno, csv.line]
+ # end
+ # end
+ # Output:
+ # [1, "foo,0\n"]
+ # [2, "bar,1\n"]
+ # [3, "baz,2\n"]
+ #
+ # source://csv//lib/csv.rb#2382
+ def line; end
+
+ # :call-seq:
+ # csv.line_no -> integer
+ #
+ # Returns the count of the rows parsed or generated.
+ #
+ # Parsing:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # CSV.open(path) do |csv|
+ # csv.each do |row|
+ # p [csv.lineno, row]
+ # end
+ # end
+ # Output:
+ # [1, ["foo", "0"]]
+ # [2, ["bar", "1"]]
+ # [3, ["baz", "2"]]
+ #
+ # Generating:
+ # CSV.generate do |csv|
+ # p csv.lineno; csv << ['foo', 0]
+ # p csv.lineno; csv << ['bar', 1]
+ # p csv.lineno; csv << ['baz', 2]
+ # end
+ # Output:
+ # 0
+ # 1
+ # 2
+ #
+ # source://csv//lib/csv.rb#2358
+ def lineno; end
+
+ # :call-seq:
+ # csv.max_field_size -> integer or nil
+ #
+ # Returns the limit for field size; used for parsing;
+ # see {Option +max_field_size+}[#class-CSV-label-Option+max_field_size]:
+ # CSV.new('').max_field_size # => nil
+ #
+ # Since 3.2.3.
+ #
+ # source://csv//lib/csv.rb#2188
+ def max_field_size; end
+
+ # source://csv//lib/csv.rb#2414
+ def path; end
+
+ # :call-seq:
+ # csv << row -> self
+ #
+ # Appends a row to +self+.
+ #
+ # - Argument +row+ must be an \Array object or a CSV::Row object.
+ # - The output stream must be open for writing.
+ #
+ # ---
+ #
+ # Append Arrays:
+ # CSV.generate do |csv|
+ # csv << ['foo', 0]
+ # csv << ['bar', 1]
+ # csv << ['baz', 2]
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # Append CSV::Rows:
+ # headers = []
+ # CSV.generate do |csv|
+ # csv << CSV::Row.new(headers, ['foo', 0])
+ # csv << CSV::Row.new(headers, ['bar', 1])
+ # csv << CSV::Row.new(headers, ['baz', 2])
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # Headers in CSV::Row objects are not appended:
+ # headers = ['Name', 'Count']
+ # CSV.generate do |csv|
+ # csv << CSV::Row.new(headers, ['foo', 0])
+ # csv << CSV::Row.new(headers, ['bar', 1])
+ # csv << CSV::Row.new(headers, ['baz', 2])
+ # end # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # ---
+ #
+ # Raises an exception if +row+ is not an \Array or \CSV::Row:
+ # CSV.generate do |csv|
+ # # Raises NoMethodError (undefined method `collect' for :foo:Symbol)
+ # csv << :foo
+ # end
+ #
+ # Raises an exception if the output stream is not opened for writing:
+ # path = 't.csv'
+ # File.write(path, '')
+ # File.open(path) do |file|
+ # CSV.open(file) do |csv|
+ # # Raises IOError (not opened for writing)
+ # csv << ['foo', 0]
+ # end
+ # end
+ #
+ # source://csv//lib/csv.rb#2507
+ def puts(row); end
+
+ # :call-seq:
+ # csv.quote_char -> character
+ #
+ # Returns the encoded quote character; used for parsing and writing;
+ # see {Option +quote_char+}[#class-CSV-label-Option+quote_char]:
+ # CSV.new('').quote_char # => "\""
+ #
+ # source://csv//lib/csv.rb#2164
+ def quote_char; end
+
+ # :call-seq:
+ # csv.read -> array or csv_table
+ #
+ # Forms the remaining rows from +self+ into:
+ # - A CSV::Table object, if headers are in use.
+ # - An \Array of Arrays, otherwise.
+ #
+ # The data source must be opened for reading.
+ #
+ # Without headers:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # csv = CSV.open(path)
+ # csv.read # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # With headers:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # csv = CSV.open(path, headers: true)
+ # csv.read # => #
+ #
+ # ---
+ #
+ # Raises an exception if the source is not opened for reading:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.close
+ # # Raises IOError (not opened for reading)
+ # csv.read
+ #
+ # source://csv//lib/csv.rb#2730
+ def read; end
+
+ # :call-seq:
+ # csv.shift -> array, csv_row, or nil
+ #
+ # Returns the next row of data as:
+ # - An \Array if no headers are used.
+ # - A CSV::Row object if headers are used.
+ #
+ # The data source must be opened for reading.
+ #
+ # Without headers:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.shift # => ["foo", "0"]
+ # csv.shift # => ["bar", "1"]
+ # csv.shift # => ["baz", "2"]
+ # csv.shift # => nil
+ #
+ # With headers:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string, headers: true)
+ # csv.shift # => #
+ # csv.shift # => #
+ # csv.shift # => #
+ # csv.shift # => nil
+ #
+ # ---
+ #
+ # Raises an exception if the source is not opened for reading:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.close
+ # # Raises IOError (not opened for reading)
+ # csv.shift
+ #
+ # source://csv//lib/csv.rb#2803
+ def readline; end
+
+ # :call-seq:
+ # csv.read -> array or csv_table
+ #
+ # Forms the remaining rows from +self+ into:
+ # - A CSV::Table object, if headers are in use.
+ # - An \Array of Arrays, otherwise.
+ #
+ # The data source must be opened for reading.
+ #
+ # Without headers:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # csv = CSV.open(path)
+ # csv.read # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # With headers:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # csv = CSV.open(path, headers: true)
+ # csv.read # => #
+ #
+ # ---
+ #
+ # Raises an exception if the source is not opened for reading:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.close
+ # # Raises IOError (not opened for reading)
+ # csv.read
+ #
+ # source://csv//lib/csv.rb#2730
+ def readlines; end
+
+ # :call-seq:
+ # csv.return_headers? -> true or false
+ #
+ # Returns the value that determines whether headers are to be returned; used for parsing;
+ # see {Option +return_headers+}[#class-CSV-label-Option+return_headers]:
+ # CSV.new('').return_headers? # => false
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2259
+ def return_headers?; end
+
+ # Rewinds the underlying IO object and resets CSV's lineno() counter.
+ #
+ # source://csv//lib/csv.rb#2447
+ def rewind; end
+
+ # :call-seq:
+ # csv.row_sep -> string
+ #
+ # Returns the encoded row separator; used for parsing and writing;
+ # see {Option +row_sep+}[#class-CSV-label-Option+row_sep]:
+ # CSV.new('').row_sep # => "\n"
+ #
+ # source://csv//lib/csv.rb#2154
+ def row_sep; end
+
+ # :call-seq:
+ # csv.shift -> array, csv_row, or nil
+ #
+ # Returns the next row of data as:
+ # - An \Array if no headers are used.
+ # - A CSV::Row object if headers are used.
+ #
+ # The data source must be opened for reading.
+ #
+ # Without headers:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.shift # => ["foo", "0"]
+ # csv.shift # => ["bar", "1"]
+ # csv.shift # => ["baz", "2"]
+ # csv.shift # => nil
+ #
+ # With headers:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string, headers: true)
+ # csv.shift # => #
+ # csv.shift # => #
+ # csv.shift # => #
+ # csv.shift # => nil
+ #
+ # ---
+ #
+ # Raises an exception if the source is not opened for reading:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # csv = CSV.new(string)
+ # csv.close
+ # # Raises IOError (not opened for reading)
+ # csv.shift
+ #
+ # source://csv//lib/csv.rb#2803
+ def shift; end
+
+ # :call-seq:
+ # csv.skip_blanks? -> true or false
+ #
+ # Returns the value that determines whether blank lines are to be ignored; used for parsing;
+ # see {Option +skip_blanks+}[#class-CSV-label-Option+skip_blanks]:
+ # CSV.new('').skip_blanks? # => false
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2296
+ def skip_blanks?; end
+
+ # :call-seq:
+ # csv.skip_lines -> regexp or nil
+ #
+ # Returns the \Regexp used to identify comment lines; used for parsing;
+ # see {Option +skip_lines+}[#class-CSV-label-Option+skip_lines]:
+ # CSV.new('').skip_lines # => nil
+ #
+ # source://csv//lib/csv.rb#2198
+ def skip_lines; end
+
+ # @raise [NotImplementedError]
+ #
+ # source://csv//lib/csv.rb#2418
+ def stat(*args); end
+
+ # @raise [NotImplementedError]
+ #
+ # source://csv//lib/csv.rb#2423
+ def to_i; end
+
+ # source://csv//lib/csv.rb#2428
+ def to_io; end
+
+ # :call-seq:
+ # csv.unconverted_fields? -> object
+ #
+ # Returns the value that determines whether unconverted fields are to be
+ # available; used for parsing;
+ # see {Option +unconverted_fields+}[#class-CSV-label-Option+unconverted_fields]:
+ # CSV.new('').unconverted_fields? # => nil
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2231
+ def unconverted_fields?; end
+
+ # :call-seq:
+ # csv.write_headers? -> true or false
+ #
+ # Returns the value that determines whether headers are to be written; used for generating;
+ # see {Option +write_headers+}[#class-CSV-label-Option+write_headers]:
+ # CSV.new('').write_headers? # => nil
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv.rb#2269
+ def write_headers?; end
+
+ private
+
+ # source://csv//lib/csv.rb#2957
+ def build_fields_converter(initial_converters, options); end
+
+ # source://csv//lib/csv.rb#2939
+ def build_header_fields_converter; end
+
+ # source://csv//lib/csv.rb#2927
+ def build_parser_fields_converter; end
+
+ # source://csv//lib/csv.rb#2952
+ def build_writer_fields_converter; end
+
+ # Processes +fields+ with @converters, or @header_converters
+ # if +headers+ is passed as +true+, returning the converted field set. Any
+ # converter that changes the field into something other than a String halts
+ # the pipeline of conversion for that field. This is primarily an efficiency
+ # shortcut.
+ #
+ # source://csv//lib/csv.rb#2902
+ def convert_fields(fields, headers = T.unsafe(nil)); end
+
+ # source://csv//lib/csv.rb#2865
+ def determine_encoding(encoding, internal_encoding); end
+
+ # source://csv//lib/csv.rb#2935
+ def header_fields_converter; end
+
+ # source://csv//lib/csv.rb#2880
+ def normalize_converters(converters); end
+
+ # source://csv//lib/csv.rb#2965
+ def parser; end
+
+ # source://csv//lib/csv.rb#2974
+ def parser_enumerator; end
+
+ # source://csv//lib/csv.rb#2923
+ def parser_fields_converter; end
+
+ # source://csv//lib/csv.rb#2969
+ def parser_options; end
+
+ # Returns the encoding of the internal IO object.
+ #
+ # source://csv//lib/csv.rb#2913
+ def raw_encoding; end
+
+ # source://csv//lib/csv.rb#2978
+ def writer; end
+
+ # source://csv//lib/csv.rb#2948
+ def writer_fields_converter; end
+
+ # source://csv//lib/csv.rb#2982
+ def writer_options; end
+
+ class << self
+ # :call-seq:
+ # filter(in_string_or_io, **options) {|row| ... } -> array_of_arrays or csv_table
+ # filter(in_string_or_io, out_string_or_io, **options) {|row| ... } -> array_of_arrays or csv_table
+ # filter(**options) {|row| ... } -> array_of_arrays or csv_table
+ #
+ # - Parses \CSV from a source (\String, \IO stream, or ARGF).
+ # - Calls the given block with each parsed row:
+ # - Without headers, each row is an \Array.
+ # - With headers, each row is a CSV::Row.
+ # - Generates \CSV to an output (\String, \IO stream, or STDOUT).
+ # - Returns the parsed source:
+ # - Without headers, an \Array of \Arrays.
+ # - With headers, a CSV::Table.
+ #
+ # When +in_string_or_io+ is given, but not +out_string_or_io+,
+ # parses from the given +in_string_or_io+
+ # and generates to STDOUT.
+ #
+ # \String input without headers:
+ #
+ # in_string = "foo,0\nbar,1\nbaz,2"
+ # CSV.filter(in_string) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end # => [["FOO", 0], ["BAR", -1], ["BAZ", -2]]
+ #
+ # Output (to STDOUT):
+ #
+ # FOO,0
+ # BAR,-1
+ # BAZ,-2
+ #
+ # \String input with headers:
+ #
+ # in_string = "Name,Value\nfoo,0\nbar,1\nbaz,2"
+ # CSV.filter(in_string, headers: true) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end # => #
+ #
+ # Output (to STDOUT):
+ #
+ # Name,Value
+ # FOO,0
+ # BAR,-1
+ # BAZ,-2
+ #
+ # \IO stream input without headers:
+ #
+ # File.write('t.csv', "foo,0\nbar,1\nbaz,2")
+ # File.open('t.csv') do |in_io|
+ # CSV.filter(in_io) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end
+ # end # => [["FOO", 0], ["BAR", -1], ["BAZ", -2]]
+ #
+ # Output (to STDOUT):
+ #
+ # FOO,0
+ # BAR,-1
+ # BAZ,-2
+ #
+ # \IO stream input with headers:
+ #
+ # File.write('t.csv', "Name,Value\nfoo,0\nbar,1\nbaz,2")
+ # File.open('t.csv') do |in_io|
+ # CSV.filter(in_io, headers: true) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end
+ # end # => #
+ #
+ # Output (to STDOUT):
+ #
+ # Name,Value
+ # FOO,0
+ # BAR,-1
+ # BAZ,-2
+ #
+ # When both +in_string_or_io+ and +out_string_or_io+ are given,
+ # parses from +in_string_or_io+ and generates to +out_string_or_io+.
+ #
+ # \String output without headers:
+ #
+ # in_string = "foo,0\nbar,1\nbaz,2"
+ # out_string = ''
+ # CSV.filter(in_string, out_string) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end # => [["FOO", 0], ["BAR", -1], ["BAZ", -2]]
+ # out_string # => "FOO,0\nBAR,-1\nBAZ,-2\n"
+ #
+ # \String output with headers:
+ #
+ # in_string = "Name,Value\nfoo,0\nbar,1\nbaz,2"
+ # out_string = ''
+ # CSV.filter(in_string, out_string, headers: true) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end # => #
+ # out_string # => "Name,Value\nFOO,0\nBAR,-1\nBAZ,-2\n"
+ #
+ # \IO stream output without headers:
+ #
+ # in_string = "foo,0\nbar,1\nbaz,2"
+ # File.open('t.csv', 'w') do |out_io|
+ # CSV.filter(in_string, out_io) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end
+ # end # => [["FOO", 0], ["BAR", -1], ["BAZ", -2]]
+ # File.read('t.csv') # => "FOO,0\nBAR,-1\nBAZ,-2\n"
+ #
+ # \IO stream output with headers:
+ #
+ # in_string = "Name,Value\nfoo,0\nbar,1\nbaz,2"
+ # File.open('t.csv', 'w') do |out_io|
+ # CSV.filter(in_string, out_io, headers: true) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end
+ # end # => #
+ # File.read('t.csv') # => "Name,Value\nFOO,0\nBAR,-1\nBAZ,-2\n"
+ #
+ # When neither +in_string_or_io+ nor +out_string_or_io+ given,
+ # parses from {ARGF}[rdoc-ref:ARGF]
+ # and generates to STDOUT.
+ #
+ # Without headers:
+ #
+ # # Put Ruby code into a file.
+ # ruby = <<-EOT
+ # require 'csv'
+ # CSV.filter do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end
+ # EOT
+ # File.write('t.rb', ruby)
+ # # Put some CSV into a file.
+ # File.write('t.csv', "foo,0\nbar,1\nbaz,2")
+ # # Run the Ruby code with CSV filename as argument.
+ # system(Gem.ruby, "t.rb", "t.csv")
+ #
+ # Output (to STDOUT):
+ #
+ # FOO,0
+ # BAR,-1
+ # BAZ,-2
+ #
+ # With headers:
+ #
+ # # Put Ruby code into a file.
+ # ruby = <<-EOT
+ # require 'csv'
+ # CSV.filter(headers: true) do |row|
+ # row[0].upcase!
+ # row[1] = - row[1].to_i
+ # end
+ # EOT
+ # File.write('t.rb', ruby)
+ # # Put some CSV into a file.
+ # File.write('t.csv', "Name,Value\nfoo,0\nbar,1\nbaz,2")
+ # # Run the Ruby code with CSV filename as argument.
+ # system(Gem.ruby, "t.rb", "t.csv")
+ #
+ # Output (to STDOUT):
+ #
+ # Name,Value
+ # FOO,0
+ # BAR,-1
+ # BAZ,-2
+ #
+ # Arguments:
+ #
+ # * Argument +in_string_or_io+ must be a \String or an \IO stream.
+ # * Argument +out_string_or_io+ must be a \String or an \IO stream.
+ # * Arguments **options must be keyword options.
+ #
+ # - Each option defined as an {option for parsing}[#class-CSV-label-Options+for+Parsing]
+ # is used for parsing the filter input.
+ # - Each option defined as an {option for generating}[#class-CSV-label-Options+for+Generating]
+ # is used for generator the filter input.
+ #
+ # However, there are three options that may be used for both parsing and generating:
+ # +col_sep+, +quote_char+, and +row_sep+.
+ #
+ # Therefore for method +filter+ (and method +filter+ only),
+ # there are special options that allow these parsing and generating options
+ # to be specified separately:
+ #
+ # - Options +input_col_sep+ and +output_col_sep+
+ # (and their aliases +in_col_sep+ and +out_col_sep+)
+ # specify the column separators for parsing and generating.
+ # - Options +input_quote_char+ and +output_quote_char+
+ # (and their aliases +in_quote_char+ and +out_quote_char+)
+ # specify the quote characters for parsing and generting.
+ # - Options +input_row_sep+ and +output_row_sep+
+ # (and their aliases +in_row_sep+ and +out_row_sep+)
+ # specify the row separators for parsing and generating.
+ #
+ # Example options (for column separators):
+ #
+ # CSV.filter # Default for both parsing and generating.
+ # CSV.filter(in_col_sep: ';') # ';' for parsing, default for generating.
+ # CSV.filter(out_col_sep: '|') # Default for parsing, '|' for generating.
+ # CSV.filter(in_col_sep: ';', out_col_sep: '|') # ';' for parsing, '|' for generating.
+ #
+ # Note that for a special option (e.g., +input_col_sep+)
+ # and its corresponding "regular" option (e.g., +col_sep+),
+ # the two are mutually overriding.
+ #
+ # Another example (possibly surprising):
+ #
+ # CSV.filter(in_col_sep: ';', col_sep: '|') # '|' for both parsing(!) and generating.
+ #
+ # source://csv//lib/csv.rb#1259
+ def filter(input = T.unsafe(nil), output = T.unsafe(nil), **options); end
+
+ # :call-seq:
+ # foreach(path_or_io, mode='r', **options) {|row| ... )
+ # foreach(path_or_io, mode='r', **options) -> new_enumerator
+ #
+ # Calls the block with each row read from source +path_or_io+.
+ #
+ # \Path input without headers:
+ #
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # in_path = 't.csv'
+ # File.write(in_path, string)
+ # CSV.foreach(in_path) {|row| p row }
+ #
+ # Output:
+ #
+ # ["foo", "0"]
+ # ["bar", "1"]
+ # ["baz", "2"]
+ #
+ # \Path input with headers:
+ #
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # in_path = 't.csv'
+ # File.write(in_path, string)
+ # CSV.foreach(in_path, headers: true) {|row| p row }
+ #
+ # Output:
+ #
+ #
+ #
+ #
+ #
+ # \IO stream input without headers:
+ #
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # File.open('t.csv') do |in_io|
+ # CSV.foreach(in_io) {|row| p row }
+ # end
+ #
+ # Output:
+ #
+ # ["foo", "0"]
+ # ["bar", "1"]
+ # ["baz", "2"]
+ #
+ # \IO stream input with headers:
+ #
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # File.open('t.csv') do |in_io|
+ # CSV.foreach(in_io, headers: true) {|row| p row }
+ # end
+ #
+ # Output:
+ #
+ #
+ #
+ #
+ #
+ # With no block given, returns an \Enumerator:
+ #
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # CSV.foreach(path) # => #
+ #
+ # Arguments:
+ # * Argument +path_or_io+ must be a file path or an \IO stream.
+ # * Argument +mode+, if given, must be a \File mode.
+ # See {Access Modes}[https://docs.ruby-lang.org/en/master/File.html#class-File-label-Access+Modes].
+ # * Arguments **options must be keyword options.
+ # See {Options for Parsing}[#class-CSV-label-Options+for+Parsing].
+ # * This method optionally accepts an additional :encoding option
+ # that you can use to specify the Encoding of the data read from +path+ or +io+.
+ # You must provide this unless your data is in the encoding
+ # given by Encoding::default_external.
+ # Parsing will use this to determine how to parse the data.
+ # You may provide a second Encoding to
+ # have the data transcoded as it is read. For example,
+ # would read +UTF-32BE+ data from the file
+ # but transcode it to +UTF-8+ before parsing.
+ #
+ # source://csv//lib/csv.rb#1389
+ def foreach(path, mode = T.unsafe(nil), **options, &block); end
+
+ # :call-seq:
+ # generate(csv_string, **options) {|csv| ... }
+ # generate(**options) {|csv| ... }
+ #
+ # * Argument +csv_string+, if given, must be a \String object;
+ # defaults to a new empty \String.
+ # * Arguments +options+, if given, should be generating options.
+ # See {Options for Generating}[#class-CSV-label-Options+for+Generating].
+ #
+ # ---
+ #
+ # Creates a new \CSV object via CSV.new(csv_string, **options);
+ # calls the block with the \CSV object, which the block may modify;
+ # returns the \String generated from the \CSV object.
+ #
+ # Note that a passed \String *is* modified by this method.
+ # Pass csv_string.dup if the \String must be preserved.
+ #
+ # This method has one additional option: :encoding,
+ # which sets the base Encoding for the output if no no +str+ is specified.
+ # CSV needs this hint if you plan to output non-ASCII compatible data.
+ #
+ # ---
+ #
+ # Add lines:
+ # input_string = "foo,0\nbar,1\nbaz,2\n"
+ # output_string = CSV.generate(input_string) do |csv|
+ # csv << ['bat', 3]
+ # csv << ['bam', 4]
+ # end
+ # output_string # => "foo,0\nbar,1\nbaz,2\nbat,3\nbam,4\n"
+ # input_string # => "foo,0\nbar,1\nbaz,2\nbat,3\nbam,4\n"
+ # output_string.equal?(input_string) # => true # Same string, modified
+ #
+ # Add lines into new string, preserving old string:
+ # input_string = "foo,0\nbar,1\nbaz,2\n"
+ # output_string = CSV.generate(input_string.dup) do |csv|
+ # csv << ['bat', 3]
+ # csv << ['bam', 4]
+ # end
+ # output_string # => "foo,0\nbar,1\nbaz,2\nbat,3\nbam,4\n"
+ # input_string # => "foo,0\nbar,1\nbaz,2\n"
+ # output_string.equal?(input_string) # => false # Different strings
+ #
+ # Create lines from nothing:
+ # output_string = CSV.generate do |csv|
+ # csv << ['foo', 0]
+ # csv << ['bar', 1]
+ # csv << ['baz', 2]
+ # end
+ # output_string # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # ---
+ #
+ # Raises an exception if +csv_string+ is not a \String object:
+ # # Raises TypeError (no implicit conversion of Integer into String)
+ # CSV.generate(0)
+ #
+ # @yield [csv]
+ #
+ # source://csv//lib/csv.rb#1455
+ def generate(str = T.unsafe(nil), **options); end
+
+ # :call-seq:
+ # CSV.generate_line(ary)
+ # CSV.generate_line(ary, **options)
+ #
+ # Returns the \String created by generating \CSV from +ary+
+ # using the specified +options+.
+ #
+ # Argument +ary+ must be an \Array.
+ #
+ # Special options:
+ # * Option :row_sep defaults to "\n"> on Ruby 3.0 or later
+ # and $INPUT_RECORD_SEPARATOR ($/) otherwise.:
+ # $INPUT_RECORD_SEPARATOR # => "\n"
+ # * This method accepts an additional option, :encoding, which sets the base
+ # Encoding for the output. This method will try to guess your Encoding from
+ # the first non-+nil+ field in +row+, if possible, but you may need to use
+ # this parameter as a backup plan.
+ #
+ # For other +options+,
+ # see {Options for Generating}[#class-CSV-label-Options+for+Generating].
+ #
+ # ---
+ #
+ # Returns the \String generated from an \Array:
+ # CSV.generate_line(['foo', '0']) # => "foo,0\n"
+ #
+ # ---
+ #
+ # Raises an exception if +ary+ is not an \Array:
+ # # Raises NoMethodError (undefined method `find' for :foo:Symbol)
+ # CSV.generate_line(:foo)
+ #
+ # source://csv//lib/csv.rb#1503
+ def generate_line(row, **options); end
+
+ # :call-seq:
+ # CSV.generate_lines(rows)
+ # CSV.generate_lines(rows, **options)
+ #
+ # Returns the \String created by generating \CSV from
+ # using the specified +options+.
+ #
+ # Argument +rows+ must be an \Array of row. Row is \Array of \String or \CSV::Row.
+ #
+ # Special options:
+ # * Option :row_sep defaults to "\n" on Ruby 3.0 or later
+ # and $INPUT_RECORD_SEPARATOR ($/) otherwise.:
+ # $INPUT_RECORD_SEPARATOR # => "\n"
+ # * This method accepts an additional option, :encoding, which sets the base
+ # Encoding for the output. This method will try to guess your Encoding from
+ # the first non-+nil+ field in +row+, if possible, but you may need to use
+ # this parameter as a backup plan.
+ #
+ # For other +options+,
+ # see {Options for Generating}[#class-CSV-label-Options+for+Generating].
+ #
+ # ---
+ #
+ # Returns the \String generated from an
+ # CSV.generate_lines([['foo', '0'], ['bar', '1'], ['baz', '2']]) # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # ---
+ #
+ # Raises an exception
+ # # Raises NoMethodError (undefined method `each' for :foo:Symbol)
+ # CSV.generate_lines(:foo)
+ #
+ # source://csv//lib/csv.rb#1558
+ def generate_lines(rows, **options); end
+
+ # :call-seq:
+ # instance(string, **options)
+ # instance(io = $stdout, **options)
+ # instance(string, **options) {|csv| ... }
+ # instance(io = $stdout, **options) {|csv| ... }
+ #
+ # Creates or retrieves cached \CSV objects.
+ # For arguments and options, see CSV.new.
+ #
+ # This API is not Ractor-safe.
+ #
+ # ---
+ #
+ # With no block given, returns a \CSV object.
+ #
+ # The first call to +instance+ creates and caches a \CSV object:
+ # s0 = 's0'
+ # csv0 = CSV.instance(s0)
+ # csv0.class # => CSV
+ #
+ # Subsequent calls to +instance+ with that _same_ +string+ or +io+
+ # retrieve that same cached object:
+ # csv1 = CSV.instance(s0)
+ # csv1.class # => CSV
+ # csv1.equal?(csv0) # => true # Same CSV object
+ #
+ # A subsequent call to +instance+ with a _different_ +string+ or +io+
+ # creates and caches a _different_ \CSV object.
+ # s1 = 's1'
+ # csv2 = CSV.instance(s1)
+ # csv2.equal?(csv0) # => false # Different CSV object
+ #
+ # All the cached objects remains available:
+ # csv3 = CSV.instance(s0)
+ # csv3.equal?(csv0) # true # Same CSV object
+ # csv4 = CSV.instance(s1)
+ # csv4.equal?(csv2) # true # Same CSV object
+ #
+ # ---
+ #
+ # When a block is given, calls the block with the created or retrieved
+ # \CSV object; returns the block's return value:
+ # CSV.instance(s0) {|csv| :foo } # => :foo
+ #
+ # source://csv//lib/csv.rb#1026
+ def instance(data = T.unsafe(nil), **options); end
+
+ # :call-seq:
+ # open(path_or_io, mode = "rb", **options ) -> new_csv
+ # open(path_or_io, mode = "rb", **options ) { |csv| ... } -> object
+ #
+ # possible options elements:
+ # keyword form:
+ # :invalid => nil # raise error on invalid byte sequence (default)
+ # :invalid => :replace # replace invalid byte sequence
+ # :undef => :replace # replace undefined conversion
+ # :replace => string # replacement string ("?" or "\uFFFD" if not specified)
+ #
+ # * Argument +path_or_io+, must be a file path or an \IO stream.
+ # :include: ../doc/csv/arguments/io.rdoc
+ # * Argument +mode+, if given, must be a \File mode.
+ # See {Access Modes}[https://docs.ruby-lang.org/en/master/File.html#class-File-label-Access+Modes].
+ # * Arguments **options must be keyword options.
+ # See {Options for Generating}[#class-CSV-label-Options+for+Generating].
+ # * This method optionally accepts an additional :encoding option
+ # that you can use to specify the Encoding of the data read from +path+ or +io+.
+ # You must provide this unless your data is in the encoding
+ # given by Encoding::default_external.
+ # Parsing will use this to determine how to parse the data.
+ # You may provide a second Encoding to
+ # have the data transcoded as it is read. For example,
+ # would read +UTF-32BE+ data from the file
+ # but transcode it to +UTF-8+ before parsing.
+ #
+ # ---
+ #
+ # These examples assume prior execution of:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ #
+ # string_io = StringIO.new
+ # string_io << "foo,0\nbar,1\nbaz,2\n"
+ #
+ # ---
+ #
+ # With no block given, returns a new \CSV object.
+ #
+ # Create a \CSV object using a file path:
+ # csv = CSV.open(path)
+ #
+ # Create a \CSV object using an open \File:
+ # csv = CSV.open(File.open(path))
+ #
+ # Create a \CSV object using a \StringIO:
+ # csv = CSV.open(string_io)
+ # ---
+ #
+ # With a block given, calls the block with the created \CSV object;
+ # returns the block's return value:
+ #
+ # Using a file path:
+ # csv = CSV.open(path) {|csv| p csv}
+ # Output:
+ #
+ # Using an open \File:
+ # csv = CSV.open(File.open(path)) {|csv| p csv}
+ # Output:
+ #
+ # Using a \StringIO:
+ # csv = CSV.open(string_io) {|csv| p csv}
+ # Output:
+ # ---
+ #
+ # Raises an exception if the argument is not a \String object or \IO object:
+ # # Raises TypeError (no implicit conversion of Symbol into String)
+ # CSV.open(:foo)
+ #
+ # source://csv//lib/csv.rb#1647
+ def open(filename_or_io, mode = T.unsafe(nil), **options); end
+
+ # :call-seq:
+ # parse(string) -> array_of_arrays
+ # parse(io) -> array_of_arrays
+ # parse(string, headers: ..., **options) -> csv_table
+ # parse(io, headers: ..., **options) -> csv_table
+ # parse(string, **options) {|row| ... }
+ # parse(io, **options) {|row| ... }
+ #
+ # Parses +string+ or +io+ using the specified +options+.
+ #
+ # - Argument +string+ should be a \String object;
+ # it will be put into a new StringIO object positioned at the beginning.
+ # :include: ../doc/csv/arguments/io.rdoc
+ # - Argument +options+: see {Options for Parsing}[#class-CSV-label-Options+for+Parsing]
+ #
+ # ====== Without Option +headers+
+ #
+ # Without {option +headers+}[#class-CSV-label-Option+headers] case.
+ #
+ # These examples assume prior execution of:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ #
+ # ---
+ #
+ # With no block given, returns an \Array of Arrays formed from the source.
+ #
+ # Parse a \String:
+ # a_of_a = CSV.parse(string)
+ # a_of_a # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # Parse an open \File:
+ # a_of_a = File.open(path) do |file|
+ # CSV.parse(file)
+ # end
+ # a_of_a # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # ---
+ #
+ # With a block given, calls the block with each parsed row:
+ #
+ # Parse a \String:
+ # CSV.parse(string) {|row| p row }
+ #
+ # Output:
+ # ["foo", "0"]
+ # ["bar", "1"]
+ # ["baz", "2"]
+ #
+ # Parse an open \File:
+ # File.open(path) do |file|
+ # CSV.parse(file) {|row| p row }
+ # end
+ #
+ # Output:
+ # ["foo", "0"]
+ # ["bar", "1"]
+ # ["baz", "2"]
+ #
+ # ====== With Option +headers+
+ #
+ # With {option +headers+}[#class-CSV-label-Option+headers] case.
+ #
+ # These examples assume prior execution of:
+ # string = "Name,Count\nfoo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ #
+ # ---
+ #
+ # With no block given, returns a CSV::Table object formed from the source.
+ #
+ # Parse a \String:
+ # csv_table = CSV.parse(string, headers: ['Name', 'Count'])
+ # csv_table # => #
+ #
+ # Parse an open \File:
+ # csv_table = File.open(path) do |file|
+ # CSV.parse(file, headers: ['Name', 'Count'])
+ # end
+ # csv_table # => #
+ #
+ # ---
+ #
+ # With a block given, calls the block with each parsed row,
+ # which has been formed into a CSV::Row object:
+ #
+ # Parse a \String:
+ # CSV.parse(string, headers: ['Name', 'Count']) {|row| p row }
+ #
+ # Output:
+ # #
+ # #
+ # #
+ #
+ # Parse an open \File:
+ # File.open(path) do |file|
+ # CSV.parse(file, headers: ['Name', 'Count']) {|row| p row }
+ # end
+ #
+ # Output:
+ # #
+ # #
+ # #
+ #
+ # ---
+ #
+ # Raises an exception if the argument is not a \String object or \IO object:
+ # # Raises NoMethodError (undefined method `close' for :foo:Symbol)
+ # CSV.parse(:foo)
+ #
+ # ---
+ #
+ # Please make sure if your text contains \BOM or not. CSV.parse will not remove
+ # \BOM automatically. You might want to remove \BOM before calling CSV.parse :
+ # # remove BOM on calling File.open
+ # CSV.parse(file, headers: true) do |row|
+ # # you can get value by column name because BOM is removed
+ # p row['Name']
+ # end
+ # end
+ #
+ # Output:
+ # # "foo"
+ # # "bar"
+ # # "baz"
+ #
+ # source://csv//lib/csv.rb#1825
+ def parse(str, **options, &block); end
+
+ # :call-seq:
+ # CSV.parse_line(string) -> new_array or nil
+ # CSV.parse_line(io) -> new_array or nil
+ # CSV.parse_line(string, **options) -> new_array or nil
+ # CSV.parse_line(io, **options) -> new_array or nil
+ # CSV.parse_line(string, headers: true, **options) -> csv_row or nil
+ # CSV.parse_line(io, headers: true, **options) -> csv_row or nil
+ #
+ # Returns the data created by parsing the first line of +string+ or +io+
+ # using the specified +options+.
+ #
+ # - Argument +string+ should be a \String object;
+ # it will be put into a new StringIO object positioned at the beginning.
+ # :include: ../doc/csv/arguments/io.rdoc
+ # - Argument +options+: see {Options for Parsing}[#class-CSV-label-Options+for+Parsing]
+ #
+ # ====== Without Option +headers+
+ #
+ # Without option +headers+, returns the first row as a new \Array.
+ #
+ # These examples assume prior execution of:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ #
+ # Parse the first line from a \String object:
+ # CSV.parse_line(string) # => ["foo", "0"]
+ #
+ # Parse the first line from a File object:
+ # File.open(path) do |file|
+ # CSV.parse_line(file) # => ["foo", "0"]
+ # end # => ["foo", "0"]
+ #
+ # Returns +nil+ if the argument is an empty \String:
+ # CSV.parse_line('') # => nil
+ #
+ # ====== With Option +headers+
+ #
+ # With {option +headers+}[#class-CSV-label-Option+headers],
+ # returns the first row as a CSV::Row object.
+ #
+ # These examples assume prior execution of:
+ # string = "Name,Count\nfoo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ #
+ # Parse the first line from a \String object:
+ # CSV.parse_line(string, headers: true) # => #
+ #
+ # Parse the first line from a File object:
+ # File.open(path) do |file|
+ # CSV.parse_line(file, headers: true)
+ # end # => #
+ #
+ # ---
+ #
+ # Raises an exception if the argument is +nil+:
+ # # Raises ArgumentError (Cannot parse nil as CSV):
+ # CSV.parse_line(nil)
+ #
+ # source://csv//lib/csv.rb#1898
+ def parse_line(line, **options); end
+
+ # :call-seq:
+ # read(source, **options) -> array_of_arrays
+ # read(source, headers: true, **options) -> csv_table
+ #
+ # Opens the given +source+ with the given +options+ (see CSV.open),
+ # reads the source (see CSV#read), and returns the result,
+ # which will be either an \Array of Arrays or a CSV::Table.
+ #
+ # Without headers:
+ # string = "foo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # CSV.read(path) # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # With headers:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # CSV.read(path, headers: true) # => #
+ #
+ # source://csv//lib/csv.rb#1922
+ def read(path, **options); end
+
+ # :call-seq:
+ # CSV.readlines(source, **options)
+ #
+ # Alias for CSV.read.
+ #
+ # source://csv//lib/csv.rb#1930
+ def readlines(path, **options); end
+
+ # :call-seq:
+ # CSV.table(source, **options)
+ #
+ # Calls CSV.read with +source+, +options+, and certain default options:
+ # - +headers+: +true+
+ # - +converters+: +:numeric+
+ # - +header_converters+: +:symbol+
+ #
+ # Returns a CSV::Table object.
+ #
+ # Example:
+ # string = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # path = 't.csv'
+ # File.write(path, string)
+ # CSV.table(path) # => #
+ #
+ # source://csv//lib/csv.rb#1949
+ def table(path, **options); end
+
+ private
+
+ # source://csv//lib/csv.rb#1990
+ def create_stringio(str, mode, opts); end
+
+ # source://csv//lib/csv.rb#1963
+ def may_enable_bom_detection_automatically(filename_or_io, mode, options, file_opts); end
+ end
+end
+
+# The encoding used by all converters.
+#
+# source://csv//lib/csv.rb#895
+CSV::ConverterEncoding = T.let(T.unsafe(nil), Encoding)
+
+# A Regexp used to find and convert some common Date formats.
+#
+# source://csv//lib/csv.rb#884
+CSV::DateMatcher = T.let(T.unsafe(nil), Regexp)
+
+# A Regexp used to find and convert some common (Date)Time formats.
+#
+# source://csv//lib/csv.rb#887
+CSV::DateTimeMatcher = T.let(T.unsafe(nil), Regexp)
+
+# Note: Don't use this class directly. This is an internal class.
+#
+# source://csv//lib/csv/fields_converter.rb#5
+class CSV::FieldsConverter
+ include ::Enumerable
+
+ # A CSV::FieldsConverter is a data structure for storing the
+ # fields converter properties to be passed as a parameter
+ # when parsing a new file (e.g. CSV::Parser.new(@io, parser_options))
+ #
+ # @return [FieldsConverter] a new instance of FieldsConverter
+ #
+ # source://csv//lib/csv/fields_converter.rb#20
+ def initialize(options = T.unsafe(nil)); end
+
+ # source://csv//lib/csv/fields_converter.rb#30
+ def add_converter(name = T.unsafe(nil), &converter); end
+
+ # source://csv//lib/csv/fields_converter.rb#54
+ def convert(fields, headers, lineno, quoted_fields = T.unsafe(nil)); end
+
+ # source://csv//lib/csv/fields_converter.rb#46
+ def each(&block); end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/fields_converter.rb#50
+ def empty?; end
+
+ private
+
+ # source://csv//lib/csv/fields_converter.rb#92
+ def builtin_converters; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/fields_converter.rb#87
+ def need_convert?; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/fields_converter.rb#83
+ def need_static_convert?; end
+end
+
+# source://csv//lib/csv/fields_converter.rb#8
+CSV::FieldsConverter::NO_QUOTED_FIELDS = T.let(T.unsafe(nil), Array)
+
+# A \Hash containing the names and \Procs for the built-in header converters.
+# See {Built-In Header Converters}[#class-CSV-label-Built-In+Header+Converters].
+#
+# This \Hash is intentionally left unfrozen, and may be extended with
+# custom field converters.
+# See {Custom Header Converters}[#class-CSV-label-Custom+Header+Converters].
+#
+# source://csv//lib/csv.rb#944
+CSV::HeaderConverters = T.let(T.unsafe(nil), Hash)
+
+# source://csv//lib/csv/input_record_separator.rb#5
+module CSV::InputRecordSeparator
+ class << self
+ # source://csv//lib/csv/input_record_separator.rb#8
+ def value; end
+ end
+end
+
+# The error thrown when the parser encounters invalid encoding in CSV.
+#
+# source://csv//lib/csv.rb#862
+class CSV::InvalidEncodingError < ::CSV::MalformedCSVError
+ # @return [InvalidEncodingError] a new instance of InvalidEncodingError
+ #
+ # source://csv//lib/csv.rb#864
+ def initialize(encoding, line_number); end
+
+ # Returns the value of attribute encoding.
+ #
+ # source://csv//lib/csv.rb#863
+ def encoding; end
+end
+
+# The error thrown when the parser encounters illegal CSV formatting.
+#
+# source://csv//lib/csv.rb#852
+class CSV::MalformedCSVError < ::RuntimeError
+ # @return [MalformedCSVError] a new instance of MalformedCSVError
+ #
+ # source://csv//lib/csv.rb#855
+ def initialize(message, line_number); end
+
+ # Returns the value of attribute line_number.
+ #
+ # source://csv//lib/csv.rb#853
+ def line_number; end
+
+ # Returns the value of attribute line_number.
+ #
+ # source://csv//lib/csv.rb#853
+ def lineno; end
+end
+
+# Note: Don't use this class directly. This is an internal class.
+#
+# source://csv//lib/csv/parser.rb#11
+class CSV::Parser
+ # @return [Parser] a new instance of Parser
+ #
+ # source://csv//lib/csv/parser.rb#348
+ def initialize(input, options); end
+
+ # source://csv//lib/csv/parser.rb#356
+ def column_separator; end
+
+ # source://csv//lib/csv/parser.rb#368
+ def field_size_limit; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#388
+ def header_row?; end
+
+ # source://csv//lib/csv/parser.rb#384
+ def headers; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#400
+ def liberal_parsing?; end
+
+ # source://csv//lib/csv/parser.rb#408
+ def line; end
+
+ # source://csv//lib/csv/parser.rb#404
+ def lineno; end
+
+ # source://csv//lib/csv/parser.rb#372
+ def max_field_size; end
+
+ # source://csv//lib/csv/parser.rb#412
+ def parse(&block); end
+
+ # source://csv//lib/csv/parser.rb#364
+ def quote_character; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#392
+ def return_headers?; end
+
+ # source://csv//lib/csv/parser.rb#360
+ def row_separator; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#396
+ def skip_blanks?; end
+
+ # source://csv//lib/csv/parser.rb#376
+ def skip_lines; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#380
+ def unconverted_fields?; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#447
+ def use_headers?; end
+
+ private
+
+ # This method injects an instance variable unconverted_fields into
+ # +row+ and an accessor method for +row+ called unconverted_fields(). The
+ # variable is set to the contents of +fields+.
+ #
+ # source://csv//lib/csv/parser.rb#1294
+ def add_unconverted_fields(row, fields); end
+
+ # source://csv//lib/csv/parser.rb#806
+ def adjust_headers(headers, quoted_fields); end
+
+ # source://csv//lib/csv/parser.rb#881
+ def build_scanner; end
+
+ # source://csv//lib/csv/parser.rb#728
+ def detect_row_separator(sample, cr, lf); end
+
+ # @yield [row]
+ #
+ # source://csv//lib/csv/parser.rb#1265
+ def emit_row(row, quoted_fields = T.unsafe(nil), &block); end
+
+ # source://csv//lib/csv/parser.rb#1250
+ def ignore_broken_line; end
+
+ # source://csv//lib/csv/parser.rb#758
+ def last_line; end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#823
+ def may_quoted?; end
+
+ # source://csv//lib/csv/parser.rb#1205
+ def parse_column_end; end
+
+ # source://csv//lib/csv/parser.rb#1105
+ def parse_column_value; end
+
+ # source://csv//lib/csv/parser.rb#792
+ def parse_headers(row); end
+
+ # source://csv//lib/csv/parser.rb#945
+ def parse_no_quote(&block); end
+
+ # source://csv//lib/csv/parser.rb#974
+ def parse_quotable_loose(&block); end
+
+ # source://csv//lib/csv/parser.rb#1035
+ def parse_quotable_robust(&block); end
+
+ # source://csv//lib/csv/parser.rb#1163
+ def parse_quoted_column_value; end
+
+ # source://csv//lib/csv/parser.rb#1219
+ def parse_row_end; end
+
+ # source://csv//lib/csv/parser.rb#1135
+ def parse_unquoted_column_value; end
+
+ # A set of tasks to prepare the file in order to parse it
+ #
+ # source://csv//lib/csv/parser.rb#453
+ def prepare; end
+
+ # source://csv//lib/csv/parser.rb#508
+ def prepare_backslash; end
+
+ # source://csv//lib/csv/parser.rb#766
+ def prepare_header; end
+
+ # source://csv//lib/csv/parser.rb#752
+ def prepare_line; end
+
+ # source://csv//lib/csv/parser.rb#812
+ def prepare_parser; end
+
+ # source://csv//lib/csv/parser.rb#492
+ def prepare_quote_character; end
+
+ # source://csv//lib/csv/parser.rb#648
+ def prepare_quoted; end
+
+ # source://csv//lib/csv/parser.rb#580
+ def prepare_separators; end
+
+ # source://csv//lib/csv/parser.rb#523
+ def prepare_skip_lines; end
+
+ # source://csv//lib/csv/parser.rb#540
+ def prepare_strip; end
+
+ # source://csv//lib/csv/parser.rb#675
+ def prepare_unquoted; end
+
+ # source://csv//lib/csv/parser.rb#468
+ def prepare_variable; end
+
+ # source://csv//lib/csv/parser.rb#688
+ def resolve_row_separator(separator); end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#925
+ def skip_line?(line); end
+
+ # source://csv//lib/csv/parser.rb#908
+ def skip_needless_lines; end
+
+ # source://csv//lib/csv/parser.rb#1256
+ def start_row; end
+
+ # source://csv//lib/csv/parser.rb#1232
+ def strip_value(value); end
+
+ # @raise [MalformedCSVError]
+ #
+ # source://csv//lib/csv/parser.rb#937
+ def validate_field_size(field); end
+
+ # This method verifies that there are no (obvious) ambiguities with the
+ # provided +col_sep+ and +strip+ parsing options. For example, if +col_sep+
+ # and +strip+ were both equal to +\t+, then there would be no clear way to
+ # parse the input.
+ #
+ # source://csv//lib/csv/parser.rb#630
+ def validate_strip_and_col_sep_options; end
+
+ class << self
+ # Convenient method to check whether the give input reached EOF
+ # or not.
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#25
+ def eof?(input); end
+ end
+end
+
+# CSV::InputsScanner receives IO inputs, encoding and the chunk_size.
+# It also controls the life cycle of the object with its methods +keep_start+,
+# +keep_end+, +keep_back+, +keep_drop+.
+#
+# CSV::InputsScanner.scan() tries to match with pattern at the current position.
+# If there's a match, the scanner advances the "scan pointer" and returns the matched string.
+# Otherwise, the scanner returns nil.
+#
+# CSV::InputsScanner.rest() returns the "rest" of the string (i.e. everything after the scan pointer).
+# If there is no more data (eos? = true), it returns "".
+#
+# source://csv//lib/csv/parser.rb#99
+class CSV::Parser::InputsScanner
+ # @return [InputsScanner] a new instance of InputsScanner
+ #
+ # source://csv//lib/csv/parser.rb#100
+ def initialize(inputs, encoding, row_separator, chunk_size: T.unsafe(nil)); end
+
+ # source://csv//lib/csv/parser.rb#270
+ def check(pattern); end
+
+ # @yield [buffer]
+ #
+ # source://csv//lib/csv/parser.rb#110
+ def each_line(row_separator); end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#183
+ def eos?; end
+
+ # source://csv//lib/csv/parser.rb#210
+ def keep_back; end
+
+ # source://csv//lib/csv/parser.rb#249
+ def keep_drop; end
+
+ # source://csv//lib/csv/parser.rb#194
+ def keep_end; end
+
+ # source://csv//lib/csv/parser.rb#187
+ def keep_start; end
+
+ # source://csv//lib/csv/parser.rb#266
+ def rest; end
+
+ # source://csv//lib/csv/parser.rb#156
+ def scan(pattern); end
+
+ # source://csv//lib/csv/parser.rb#167
+ def scan_all(pattern); end
+
+ private
+
+ # source://csv//lib/csv/parser.rb#279
+ def adjust_last_keep; end
+
+ # source://csv//lib/csv/parser.rb#307
+ def read_chunk; end
+
+ # source://csv//lib/csv/parser.rb#275
+ def trace(*args); end
+end
+
+# Raised when encoding is invalid.
+#
+# source://csv//lib/csv/parser.rb#35
+class CSV::Parser::InvalidEncoding < ::StandardError; end
+
+# source://csv//lib/csv/parser.rb#855
+CSV::Parser::SCANNER_TEST = T.let(T.unsafe(nil), FalseClass)
+
+# source://csv//lib/csv/parser.rb#577
+CSV::Parser::STRING_SCANNER_SCAN_ACCEPT_STRING = T.let(T.unsafe(nil), TrueClass)
+
+# CSV::Scanner receives a CSV output, scans it and return the content.
+# It also controls the life cycle of the object with its methods +keep_start+,
+# +keep_end+, +keep_back+, +keep_drop+.
+#
+# Uses StringScanner (the official strscan gem). Strscan provides lexical
+# scanning operations on a String. We inherit its object and take advantage
+# on the methods. For more information, please visit:
+# https://docs.ruby-lang.org/en/master/StringScanner.html
+#
+# source://csv//lib/csv/parser.rb#52
+class CSV::Parser::Scanner < ::StringScanner
+ # @return [Scanner] a new instance of Scanner
+ #
+ # source://csv//lib/csv/parser.rb#55
+ def initialize(*args); end
+
+ # source://csv//lib/csv/parser.rb#60
+ def each_line(row_separator); end
+
+ # source://csv//lib/csv/parser.rb#78
+ def keep_back; end
+
+ # source://csv//lib/csv/parser.rb#82
+ def keep_drop; end
+
+ # source://csv//lib/csv/parser.rb#73
+ def keep_end; end
+
+ # source://csv//lib/csv/parser.rb#69
+ def keep_start; end
+
+ def scan_all(_arg0); end
+end
+
+# Raised when unexpected case is happen.
+#
+# source://csv//lib/csv/parser.rb#39
+class CSV::Parser::UnexpectedError < ::StandardError; end
+
+# source://csv//lib/csv/parser.rb#837
+class CSV::Parser::UnoptimizedStringIO
+ # @return [UnoptimizedStringIO] a new instance of UnoptimizedStringIO
+ #
+ # source://csv//lib/csv/parser.rb#838
+ def initialize(string); end
+
+ # source://csv//lib/csv/parser.rb#846
+ def each_line(*args, &block); end
+
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/parser.rb#850
+ def eof?; end
+
+ # source://csv//lib/csv/parser.rb#842
+ def gets(*args); end
+end
+
+# = \CSV::Row
+# A \CSV::Row instance represents a \CSV table row.
+# (see {class CSV}[../CSV.html]).
+#
+# The instance may have:
+# - Fields: each is an object, not necessarily a \String.
+# - Headers: each serves a key, and also need not be a \String.
+#
+# === Instance Methods
+#
+# \CSV::Row has three groups of instance methods:
+# - Its own internally defined instance methods.
+# - Methods included by module Enumerable.
+# - Methods delegated to class Array.:
+# * Array#empty?
+# * Array#length
+# * Array#size
+#
+# == Creating a \CSV::Row Instance
+#
+# Commonly, a new \CSV::Row instance is created by parsing \CSV source
+# that has headers:
+# source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+# table = CSV.parse(source, headers: true)
+# table.each {|row| p row }
+# Output:
+# #
+# #
+# #
+#
+# You can also create a row directly. See ::new.
+#
+# == Headers
+#
+# Like a \CSV::Table, a \CSV::Row has headers.
+#
+# A \CSV::Row that was created by parsing \CSV source
+# inherits its headers from the table:
+# source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+# table = CSV.parse(source, headers: true)
+# row = table.first
+# row.headers # => ["Name", "Value"]
+#
+# You can also create a new row with headers;
+# like the keys in a \Hash, the headers need not be Strings:
+# row = CSV::Row.new([:name, :value], ['foo', 0])
+# row.headers # => [:name, :value]
+#
+# The new row retains its headers even if added to a table
+# that has headers:
+# table << row # => #
+# row.headers # => [:name, :value]
+# row[:name] # => "foo"
+# row['Name'] # => nil
+#
+#
+#
+# == Accessing Fields
+#
+# You may access a field in a \CSV::Row with either its \Integer index
+# (\Array-style) or its header (\Hash-style).
+#
+# Fetch a field using method #[]:
+# row = CSV::Row.new(['Name', 'Value'], ['foo', 0])
+# row[1] # => 0
+# row['Value'] # => 0
+#
+# Set a field using method #[]=:
+# row = CSV::Row.new(['Name', 'Value'], ['foo', 0])
+# row # => #
+# row[0] = 'bar'
+# row['Value'] = 1
+# row # => #
+#
+# source://csv//lib/csv/row.rb#80
+class CSV::Row
+ include ::Enumerable
+ extend ::Forwardable
+
+ # :call-seq:
+ # CSV::Row.new(headers, fields, header_row = false) -> csv_row
+ #
+ # Returns the new \CSV::Row instance constructed from
+ # arguments +headers+ and +fields+; both should be Arrays;
+ # note that the fields need not be Strings:
+ # row = CSV::Row.new(['Name', 'Value'], ['foo', 0])
+ # row # => #
+ #
+ # If the \Array lengths are different, the shorter is +nil+-filled:
+ # row = CSV::Row.new(['Name', 'Value', 'Date', 'Size'], ['foo', 0])
+ # row # => #
+ #
+ # Each \CSV::Row object is either a field row or a header row;
+ # by default, a new row is a field row; for the row created above:
+ # row.field_row? # => true
+ # row.header_row? # => false
+ #
+ # If the optional argument +header_row+ is given as +true+,
+ # the created row is a header row:
+ # row = CSV::Row.new(['Name', 'Value'], ['foo', 0], header_row = true)
+ # row # => #
+ # row.field_row? # => false
+ # row.header_row? # => true
+ #
+ # @return [Row] a new instance of Row
+ #
+ # source://csv//lib/csv/row.rb#105
+ def initialize(headers, fields, header_row = T.unsafe(nil)); end
+
+ # :call-seq:
+ # row << [header, value] -> self
+ # row << hash -> self
+ # row << value -> self
+ #
+ # Adds a field to +self+; returns +self+:
+ #
+ # If the argument is a 2-element \Array [header, value],
+ # a field is added with the given +header+ and +value+:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row << ['NAME', 'Bat']
+ # row # => #
+ #
+ # If the argument is a \Hash, each key-value pair is added
+ # as a field with header +key+ and value +value+.
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row << {NAME: 'Bat', name: 'Bam'}
+ # row # => #
+ #
+ # Otherwise, the given +value+ is added as a field with no header.
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row << 'Bag'
+ # row # => #
+ #
+ # source://csv//lib/csv/row.rb#389
+ def <<(arg); end
+
+ # :call-seq:
+ # row == other -> true or false
+ #
+ # Returns +true+ if +other+ is a /CSV::Row that has the same
+ # fields (headers and values) in the same order as +self+;
+ # otherwise returns +false+:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # other_row = table[0]
+ # row == other_row # => true
+ # other_row = table[1]
+ # row == other_row # => false
+ #
+ # source://csv//lib/csv/row.rb#633
+ def ==(other); end
+
+ # :call-seq:
+ # field(index) -> value
+ # field(header) -> value
+ # field(header, offset) -> value
+ #
+ # Returns the field value for the given +index+ or +header+.
+ #
+ # ---
+ #
+ # Fetch field value by \Integer index:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.field(0) # => "foo"
+ # row.field(1) # => "bar"
+ #
+ # Counts backward from the last column if +index+ is negative:
+ # row.field(-1) # => "0"
+ # row.field(-2) # => "foo"
+ #
+ # Returns +nil+ if +index+ is out of range:
+ # row.field(2) # => nil
+ # row.field(-3) # => nil
+ #
+ # ---
+ #
+ # Fetch field value by header (first found):
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.field('Name') # => "Foo"
+ #
+ # Fetch field value by header, ignoring +offset+ leading fields:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.field('Name', 2) # => "Baz"
+ #
+ # Returns +nil+ if the header does not exist.
+ #
+ # source://csv//lib/csv/row.rb#203
+ def [](header_or_index, minimum_index = T.unsafe(nil)); end
+
+ # :call-seq:
+ # row[index] = value -> value
+ # row[header, offset] = value -> value
+ # row[header] = value -> value
+ #
+ # Assigns the field value for the given +index+ or +header+;
+ # returns +value+.
+ #
+ # ---
+ #
+ # Assign field value by \Integer index:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row[0] = 'Bat'
+ # row[1] = 3
+ # row # => #
+ #
+ # Counts backward from the last column if +index+ is negative:
+ # row[-1] = 4
+ # row[-2] = 'Bam'
+ # row # => #
+ #
+ # Extends the row with nil:nil if positive +index+ is not in the row:
+ # row[4] = 5
+ # row # => #
+ #
+ # Raises IndexError if negative +index+ is too small (too far from zero).
+ #
+ # ---
+ #
+ # Assign field value by header (first found):
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row['Name'] = 'Bat'
+ # row # => #
+ #
+ # Assign field value by header, ignoring +offset+ leading fields:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row['Name', 2] = 4
+ # row # => #
+ #
+ # Append new field by (new) header:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row['New'] = 6
+ # row# => #
+ #
+ # source://csv//lib/csv/row.rb#339
+ def []=(*args); end
+
+ # :call-seq:
+ # row.deconstruct -> array
+ #
+ # Returns the new \Array suitable for pattern matching containing the values
+ # of the row.
+ #
+ # source://csv//lib/csv/row.rb#682
+ def deconstruct; end
+
+ # :call-seq:
+ # row.deconstruct_keys(keys) -> hash
+ #
+ # Returns the new \Hash suitable for pattern matching containing only the
+ # keys specified as an argument.
+ #
+ # source://csv//lib/csv/row.rb#667
+ def deconstruct_keys(keys); end
+
+ # :call-seq:
+ # delete(index) -> [header, value] or nil
+ # delete(header) -> [header, value] or empty_array
+ # delete(header, offset) -> [header, value] or empty_array
+ #
+ # Removes a specified field from +self+; returns the 2-element \Array
+ # [header, value] if the field exists.
+ #
+ # If an \Integer argument +index+ is given,
+ # removes and returns the field at offset +index+,
+ # or returns +nil+ if the field does not exist:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.delete(1) # => ["Name", "Bar"]
+ # row.delete(50) # => nil
+ #
+ # Otherwise, if the single argument +header+ is given,
+ # removes and returns the first-found field with the given header,
+ # of returns a new empty \Array if the field does not exist:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.delete('Name') # => ["Name", "Foo"]
+ # row.delete('NAME') # => []
+ #
+ # If argument +header+ and \Integer argument +offset+ are given,
+ # removes and returns the first-found field with the given header
+ # whose +index+ is at least as large as +offset+:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.delete('Name', 1) # => ["Name", "Bar"]
+ # row.delete('NAME', 1) # => []
+ #
+ # source://csv//lib/csv/row.rb#451
+ def delete(header_or_index, minimum_index = T.unsafe(nil)); end
+
+ # :call-seq:
+ # row.delete_if {|header, value| ... } -> self
+ #
+ # Removes fields from +self+ as selected by the block; returns +self+.
+ #
+ # Removes each field for which the block returns a truthy value:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.delete_if {|header, value| value.start_with?('B') } # => true
+ # row # => #
+ # row.delete_if {|header, value| header.start_with?('B') } # => false
+ #
+ # If no block is given, returns a new Enumerator:
+ # row.delete_if # => #:delete_if>
+ #
+ # source://csv//lib/csv/row.rb#476
+ def delete_if(&block); end
+
+ # :call-seq:
+ # row.dig(index_or_header, *identifiers) -> object
+ #
+ # Finds and returns the object in nested object that is specified
+ # by +index_or_header+ and +specifiers+.
+ #
+ # The nested objects may be instances of various classes.
+ # See {Dig Methods}[rdoc-ref:dig_methods.rdoc].
+ #
+ # Examples:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.dig(1) # => "0"
+ # row.dig('Value') # => "0"
+ # row.dig(5) # => nil
+ #
+ # source://csv//lib/csv/row.rb#715
+ def dig(index_or_header, *indexes); end
+
+ # :call-seq:
+ # row.each {|header, value| ... } -> self
+ #
+ # Calls the block with each header-value pair; returns +self+:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.each {|header, value| p [header, value] }
+ # Output:
+ # ["Name", "Foo"]
+ # ["Name", "Bar"]
+ # ["Name", "Baz"]
+ #
+ # If no block is given, returns a new Enumerator:
+ # row.each # => #:each>
+ #
+ # source://csv//lib/csv/row.rb#610
+ def each(&block); end
+
+ # :call-seq:
+ # row.each {|header, value| ... } -> self
+ #
+ # Calls the block with each header-value pair; returns +self+:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.each {|header, value| p [header, value] }
+ # Output:
+ # ["Name", "Foo"]
+ # ["Name", "Bar"]
+ # ["Name", "Baz"]
+ #
+ # If no block is given, returns a new Enumerator:
+ # row.each # => #:each>
+ #
+ # source://csv//lib/csv/row.rb#610
+ def each_pair(&block); end
+
+ # :call-seq:
+ # fetch(header) -> value
+ # fetch(header, default) -> value
+ # fetch(header) {|row| ... } -> value
+ #
+ # Returns the field value as specified by +header+.
+ #
+ # ---
+ #
+ # With the single argument +header+, returns the field value
+ # for that header (first found):
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.fetch('Name') # => "Foo"
+ #
+ # Raises exception +KeyError+ if the header does not exist.
+ #
+ # ---
+ #
+ # With arguments +header+ and +default+ given,
+ # returns the field value for the header (first found)
+ # if the header exists, otherwise returns +default+:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.fetch('Name', '') # => "Foo"
+ # row.fetch(:nosuch, '') # => ""
+ #
+ # ---
+ #
+ # With argument +header+ and a block given,
+ # returns the field value for the header (first found)
+ # if the header exists; otherwise calls the block
+ # and returns its return value:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.fetch('Name') {|header| fail 'Cannot happen' } # => "Foo"
+ # row.fetch(:nosuch) {|header| "Header '#{header} not found'" } # => "Header 'nosuch not found'"
+ #
+ # @raise [ArgumentError]
+ #
+ # source://csv//lib/csv/row.rb#258
+ def fetch(header, *varargs); end
+
+ # :call-seq:
+ # field(index) -> value
+ # field(header) -> value
+ # field(header, offset) -> value
+ #
+ # Returns the field value for the given +index+ or +header+.
+ #
+ # ---
+ #
+ # Fetch field value by \Integer index:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.field(0) # => "foo"
+ # row.field(1) # => "bar"
+ #
+ # Counts backward from the last column if +index+ is negative:
+ # row.field(-1) # => "0"
+ # row.field(-2) # => "foo"
+ #
+ # Returns +nil+ if +index+ is out of range:
+ # row.field(2) # => nil
+ # row.field(-3) # => nil
+ #
+ # ---
+ #
+ # Fetch field value by header (first found):
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.field('Name') # => "Foo"
+ #
+ # Fetch field value by header, ignoring +offset+ leading fields:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.field('Name', 2) # => "Baz"
+ #
+ # Returns +nil+ if the header does not exist.
+ #
+ # source://csv//lib/csv/row.rb#203
+ def field(header_or_index, minimum_index = T.unsafe(nil)); end
+
+ # :call-seq:
+ # row.field?(value) -> true or false
+ #
+ # Returns +true+ if +value+ is a field in this row, +false+ otherwise:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.field?('Bar') # => true
+ # row.field?('BAR') # => false
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/row.rb#589
+ def field?(data); end
+
+ # :call-seq:
+ # row.field_row? -> true or false
+ #
+ # Returns +true+ if this is a field row, +false+ otherwise.
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/row.rb#148
+ def field_row?; end
+
+ # :call-seq:
+ # self.fields(*specifiers) -> array_of_fields
+ #
+ # Returns field values per the given +specifiers+, which may be any mixture of:
+ # - \Integer index.
+ # - \Range of \Integer indexes.
+ # - 2-element \Array containing a header and offset.
+ # - Header.
+ # - \Range of headers.
+ #
+ # For +specifier+ in one of the first four cases above,
+ # returns the result of self.field(specifier); see #field.
+ #
+ # Although there may be any number of +specifiers+,
+ # the examples here will illustrate one at a time.
+ #
+ # When the specifier is an \Integer +index+,
+ # returns self.field(index)L
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.fields(1) # => ["Bar"]
+ #
+ # When the specifier is a \Range of \Integers +range+,
+ # returns self.field(range):
+ # row.fields(1..2) # => ["Bar", "Baz"]
+ #
+ # When the specifier is a 2-element \Array +array+,
+ # returns self.field(array)L
+ # row.fields('Name', 1) # => ["Foo", "Bar"]
+ #
+ # When the specifier is a header +header+,
+ # returns self.field(header)L
+ # row.fields('Name') # => ["Foo"]
+ #
+ # When the specifier is a \Range of headers +range+,
+ # forms a new \Range +new_range+ from the indexes of
+ # range.start and range.end,
+ # and returns self.field(new_range):
+ # source = "Name,NAME,name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.fields('Name'..'NAME') # => ["Foo", "Bar"]
+ #
+ # Returns all fields if no argument given:
+ # row.fields # => ["Foo", "Bar", "Baz"]
+ #
+ # source://csv//lib/csv/row.rb#530
+ def fields(*headers_and_or_indices); end
+
+ # :call-seq:
+ # row.has_key?(header) -> true or false
+ #
+ # Returns +true+ if there is a field with the given +header+,
+ # +false+ otherwise.
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/row.rb#279
+ def has_key?(header); end
+
+ # :call-seq:
+ # row.has_key?(header) -> true or false
+ #
+ # Returns +true+ if there is a field with the given +header+,
+ # +false+ otherwise.
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/row.rb#279
+ def header?(header); end
+
+ # :call-seq:
+ # row.header_row? -> true or false
+ #
+ # Returns +true+ if this is a header row, +false+ otherwise.
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/row.rb#140
+ def header_row?; end
+
+ # :call-seq:
+ # row.headers -> array_of_headers
+ #
+ # Returns the headers for this row:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table.first
+ # row.headers # => ["Name", "Value"]
+ #
+ # source://csv//lib/csv/row.rb#160
+ def headers; end
+
+ # :call-seq:
+ # row.has_key?(header) -> true or false
+ #
+ # Returns +true+ if there is a field with the given +header+,
+ # +false+ otherwise.
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/row.rb#279
+ def include?(header); end
+
+ # :call-seq:
+ # index(header) -> index
+ # index(header, offset) -> index
+ #
+ # Returns the index for the given header, if it exists;
+ # otherwise returns +nil+.
+ #
+ # With the single argument +header+, returns the index
+ # of the first-found field with the given +header+:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.index('Name') # => 0
+ # row.index('NAME') # => nil
+ #
+ # With arguments +header+ and +offset+,
+ # returns the index of the first-found field with given +header+,
+ # but ignoring the first +offset+ fields:
+ # row.index('Name', 1) # => 1
+ # row.index('Name', 3) # => nil
+ #
+ # source://csv//lib/csv/row.rb#573
+ def index(header, minimum_index = T.unsafe(nil)); end
+
+ # :call-seq:
+ # row.inspect -> string
+ #
+ # Returns an ASCII-compatible \String showing:
+ # - Class \CSV::Row.
+ # - Header-value pairs.
+ # Example:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.inspect # => "#"
+ #
+ # source://csv//lib/csv/row.rb#740
+ def inspect; end
+
+ # :call-seq:
+ # row.has_key?(header) -> true or false
+ #
+ # Returns +true+ if there is a field with the given +header+,
+ # +false+ otherwise.
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/row.rb#279
+ def key?(header); end
+
+ # :call-seq:
+ # row.has_key?(header) -> true or false
+ #
+ # Returns +true+ if there is a field with the given +header+,
+ # +false+ otherwise.
+ #
+ # @return [Boolean]
+ #
+ # source://csv//lib/csv/row.rb#279
+ def member?(header); end
+
+ # :call-seq:
+ # row.push(*values) -> self
+ #
+ # Appends each of the given +values+ to +self+ as a field; returns +self+:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.push('Bat', 'Bam')
+ # row # => #
+ #
+ # source://csv//lib/csv/row.rb#410
+ def push(*args); end
+
+ # :call-seq:
+ # row.to_csv -> csv_string
+ #
+ # Returns the row as a \CSV String. Headers are not included:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.to_csv # => "foo,0\n"
+ #
+ # source://csv//lib/csv/row.rb#694
+ def to_csv(**options); end
+
+ # :call-seq:
+ # row.to_h -> hash
+ #
+ # Returns the new \Hash formed by adding each header-value pair in +self+
+ # as a key-value pair in the \Hash.
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.to_h # => {"Name"=>"foo", "Value"=>"0"}
+ #
+ # Header order is preserved, but repeated headers are ignored:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.to_h # => {"Name"=>"Foo"}
+ #
+ # source://csv//lib/csv/row.rb#653
+ def to_h; end
+
+ # :call-seq:
+ # row.to_h -> hash
+ #
+ # Returns the new \Hash formed by adding each header-value pair in +self+
+ # as a key-value pair in the \Hash.
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.to_h # => {"Name"=>"foo", "Value"=>"0"}
+ #
+ # Header order is preserved, but repeated headers are ignored:
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.to_h # => {"Name"=>"Foo"}
+ #
+ # source://csv//lib/csv/row.rb#653
+ def to_hash; end
+
+ # :call-seq:
+ # row.to_csv -> csv_string
+ #
+ # Returns the row as a \CSV String. Headers are not included:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.to_csv # => "foo,0\n"
+ #
+ # source://csv//lib/csv/row.rb#694
+ def to_s(**options); end
+
+ # :call-seq:
+ # self.fields(*specifiers) -> array_of_fields
+ #
+ # Returns field values per the given +specifiers+, which may be any mixture of:
+ # - \Integer index.
+ # - \Range of \Integer indexes.
+ # - 2-element \Array containing a header and offset.
+ # - Header.
+ # - \Range of headers.
+ #
+ # For +specifier+ in one of the first four cases above,
+ # returns the result of self.field(specifier); see #field.
+ #
+ # Although there may be any number of +specifiers+,
+ # the examples here will illustrate one at a time.
+ #
+ # When the specifier is an \Integer +index+,
+ # returns self.field(index)L
+ # source = "Name,Name,Name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.fields(1) # => ["Bar"]
+ #
+ # When the specifier is a \Range of \Integers +range+,
+ # returns self.field(range):
+ # row.fields(1..2) # => ["Bar", "Baz"]
+ #
+ # When the specifier is a 2-element \Array +array+,
+ # returns self.field(array)L
+ # row.fields('Name', 1) # => ["Foo", "Bar"]
+ #
+ # When the specifier is a header +header+,
+ # returns self.field(header)L
+ # row.fields('Name') # => ["Foo"]
+ #
+ # When the specifier is a \Range of headers +range+,
+ # forms a new \Range +new_range+ from the indexes of
+ # range.start and range.end,
+ # and returns self.field(new_range):
+ # source = "Name,NAME,name\nFoo,Bar,Baz\n"
+ # table = CSV.parse(source, headers: true)
+ # row = table[0]
+ # row.fields('Name'..'NAME') # => ["Foo", "Bar"]
+ #
+ # Returns all fields if no argument given:
+ # row.fields # => ["Foo", "Bar", "Baz"]
+ #
+ # source://csv//lib/csv/row.rb#530
+ def values_at(*headers_and_or_indices); end
+
+ protected
+
+ # Internal data format used to compare equality.
+ #
+ # source://csv//lib/csv/row.rb#118
+ def row; end
+
+ private
+
+ # :call-seq:
+ # row.initialize_copy(other_row) -> self
+ #
+ # Calls superclass method.
+ #
+ # source://csv//lib/csv/row.rb#130
+ def initialize_copy(other); end
+end
+
+# source://csv//lib/csv.rb#2132
+class CSV::TSV < ::CSV
+ # @return [TSV] a new instance of TSV
+ #
+ # source://csv//lib/csv.rb#2133
+ def initialize(data, **options); end
+end
+
+# = \CSV::Table
+# A \CSV::Table instance represents \CSV data.
+# (see {class CSV}[../CSV.html]).
+#
+# The instance may have:
+# - Rows: each is a Table::Row object.
+# - Headers: names for the columns.
+#
+# === Instance Methods
+#
+# \CSV::Table has three groups of instance methods:
+# - Its own internally defined instance methods.
+# - Methods included by module Enumerable.
+# - Methods delegated to class Array.:
+# * Array#empty?
+# * Array#length
+# * Array#size
+#
+# == Creating a \CSV::Table Instance
+#
+# Commonly, a new \CSV::Table instance is created by parsing \CSV source
+# using headers:
+# source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+# table = CSV.parse(source, headers: true)
+# table.class # => CSV::Table
+#
+# You can also create an instance directly. See ::new.
+#
+# == Headers
+#
+# If a table has headers, the headers serve as labels for the columns of data.
+# Each header serves as the label for its column.
+#
+# The headers for a \CSV::Table object are stored as an \Array of Strings.
+#
+# Commonly, headers are defined in the first row of \CSV source:
+# source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+# table = CSV.parse(source, headers: true)
+# table.headers # => ["Name", "Value"]
+#
+# If no headers are defined, the \Array is empty:
+# table = CSV::Table.new([])
+# table.headers # => []
+#
+# == Access Modes
+#
+# \CSV::Table provides three modes for accessing table data:
+# - \Row mode.
+# - Column mode.
+# - Mixed mode (the default for a new table).
+#
+# The access mode for a\CSV::Table instance affects the behavior
+# of some of its instance methods:
+# - #[]
+# - #[]=
+# - #delete
+# - #delete_if
+# - #each
+# - #values_at
+#
+# === \Row Mode
+#
+# Set a table to row mode with method #by_row!:
+# source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+# table = CSV.parse(source, headers: true)
+# table.by_row! # => #
+#
+# Specify a single row by an \Integer index:
+# # Get a row.
+# table[1] # => #
+# # Set a row, then get it.
+# table[1] = CSV::Row.new(['Name', 'Value'], ['bam', 3])
+# table[1] # => #
+#
+# Specify a sequence of rows by a \Range:
+# # Get rows.
+# table[1..2] # => [#, #]
+# # Set rows, then get them.
+# table[1..2] = [
+# CSV::Row.new(['Name', 'Value'], ['bat', 4]),
+# CSV::Row.new(['Name', 'Value'], ['bad', 5]),
+# ]
+# table[1..2] # => [["Name", #], ["Value", #]]
+#
+# === Column Mode
+#
+# Set a table to column mode with method #by_col!:
+# source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+# table = CSV.parse(source, headers: true)
+# table.by_col! # => #
+#
+# Specify a column by an \Integer index:
+# # Get a column.
+# table[0]
+# # Set a column, then get it.
+# table[0] = ['FOO', 'BAR', 'BAZ']
+# table[0] # => ["FOO", "BAR", "BAZ"]
+#
+# Specify a column by its \String header:
+# # Get a column.
+# table['Name'] # => ["FOO", "BAR", "BAZ"]
+# # Set a column, then get it.
+# table['Name'] = ['Foo', 'Bar', 'Baz']
+# table['Name'] # => ["Foo", "Bar", "Baz"]
+#
+# === Mixed Mode
+#
+# In mixed mode, you can refer to either rows or columns:
+# - An \Integer index refers to a row.
+# - A \Range index refers to multiple rows.
+# - A \String index refers to a column.
+#
+# Set a table to mixed mode with method #by_col_or_row!:
+# source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+# table = CSV.parse(source, headers: true)
+# table.by_col_or_row! # => #
+#
+# Specify a single row by an \Integer index:
+# # Get a row.
+# table[1] # => #
+# # Set a row, then get it.
+# table[1] = CSV::Row.new(['Name', 'Value'], ['bam', 3])
+# table[1] # => #
+#
+# Specify a sequence of rows by a \Range:
+# # Get rows.
+# table[1..2] # => [#, #]
+# # Set rows, then get them.
+# table[1] = CSV::Row.new(['Name', 'Value'], ['bat', 4])
+# table[2] = CSV::Row.new(['Name', 'Value'], ['bad', 5])
+# table[1..2] # => [["Name", #], ["Value", #]]
+#
+# Specify a column by its \String header:
+# # Get a column.
+# table['Name'] # => ["foo", "bat", "bad"]
+# # Set a column, then get it.
+# table['Name'] = ['Foo', 'Bar', 'Baz']
+# table['Name'] # => ["Foo", "Bar", "Baz"]
+#
+# source://csv//lib/csv/table.rb#144
+class CSV::Table
+ include ::Enumerable
+ extend ::Forwardable
+
+ # :call-seq:
+ # CSV::Table.new(array_of_rows, headers = nil) -> csv_table
+ #
+ # Returns a new \CSV::Table object.
+ #
+ # - Argument +array_of_rows+ must be an \Array of CSV::Row objects.
+ # - Argument +headers+, if given, may be an \Array of Strings.
+ #
+ # ---
+ #
+ # Create an empty \CSV::Table object:
+ # table = CSV::Table.new([])
+ # table # => #
+ #
+ # Create a non-empty \CSV::Table object:
+ # rows = [
+ # CSV::Row.new([], []),
+ # CSV::Row.new([], []),
+ # CSV::Row.new([], []),
+ # ]
+ # table = CSV::Table.new(rows)
+ # table # => #
+ #
+ # ---
+ #
+ # If argument +headers+ is an \Array of Strings,
+ # those Strings become the table's headers:
+ # table = CSV::Table.new([], headers: ['Name', 'Age'])
+ # table.headers # => ["Name", "Age"]
+ #
+ # If argument +headers+ is not given and the table has rows,
+ # the headers are taken from the first row:
+ # rows = [
+ # CSV::Row.new(['Foo', 'Bar'], []),
+ # CSV::Row.new(['foo', 'bar'], []),
+ # CSV::Row.new(['FOO', 'BAR'], []),
+ # ]
+ # table = CSV::Table.new(rows)
+ # table.headers # => ["Foo", "Bar"]
+ #
+ # If argument +headers+ is not given and the table is empty (has no rows),
+ # the headers are also empty:
+ # table = CSV::Table.new([])
+ # table.headers # => []
+ #
+ # ---
+ #
+ # Raises an exception if argument +array_of_rows+ is not an \Array object:
+ # # Raises NoMethodError (undefined method `first' for :foo:Symbol):
+ # CSV::Table.new(:foo)
+ #
+ # Raises an exception if an element of +array_of_rows+ is not a \CSV::Table object:
+ # # Raises NoMethodError (undefined method `headers' for :foo:Symbol):
+ # CSV::Table.new([:foo])
+ #
+ # @return [Table] a new instance of Table
+ #
+ # source://csv//lib/csv/table.rb#199
+ def initialize(array_of_rows, headers: T.unsafe(nil)); end
+
+ # :call-seq:
+ # table << row_or_array -> self
+ #
+ # If +row_or_array+ is a \CSV::Row object,
+ # it is appended to the table:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table << CSV::Row.new(table.headers, ['bat', 3])
+ # table[3] # => #
+ #
+ # If +row_or_array+ is an \Array, it is used to create a new
+ # \CSV::Row object which is then appended to the table:
+ # table << ['bam', 4]
+ # table[4] # => #
+ #
+ # source://csv//lib/csv/table.rb#762
+ def <<(row_or_array); end
+
+ # :call-seq:
+ # table == other_table -> true or false
+ #
+ # Returns +true+ if all each row of +self+ ==
+ # the corresponding row of +other_table+, otherwise, +false+.
+ #
+ # The access mode does no affect the result.
+ #
+ # Equal tables:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # other_table = CSV.parse(source, headers: true)
+ # table == other_table # => true
+ #
+ # Different row count:
+ # other_table.delete(2)
+ # table == other_table # => false
+ #
+ # Different last row:
+ # other_table << ['bat', 3]
+ # table == other_table # => false
+ #
+ # source://csv//lib/csv/table.rb#965
+ def ==(other); end
+
+ # :call-seq:
+ # table[n] -> row or column_data
+ # table[range] -> array_of_rows or array_of_column_data
+ # table[header] -> array_of_column_data
+ #
+ # Returns data from the table; does not modify the table.
+ #
+ # ---
+ #
+ # Fetch a \Row by Its \Integer Index::
+ # - Form: table[n], +n+ an integer.
+ # - Access mode: :row or :col_or_row.
+ # - Return value: _nth_ row of the table, if that row exists;
+ # otherwise +nil+.
+ #
+ # Returns the _nth_ row of the table if that row exists:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.by_row! # => #
+ # table[1] # => #
+ # table.by_col_or_row! # => #
+ # table[1] # => #
+ #
+ # Counts backward from the last row if +n+ is negative:
+ # table[-1] # => #
+ #
+ # Returns +nil+ if +n+ is too large or too small:
+ # table[4] # => nil
+ # table[-4] # => nil
+ #
+ # Raises an exception if the access mode is :row
+ # and +n+ is not an \Integer:
+ # table.by_row! # => #
+ # # Raises TypeError (no implicit conversion of String into Integer):
+ # table['Name']
+ #
+ # ---
+ #
+ # Fetch a Column by Its \Integer Index::
+ # - Form: table[n], +n+ an \Integer.
+ # - Access mode: :col.
+ # - Return value: _nth_ column of the table, if that column exists;
+ # otherwise an \Array of +nil+ fields of length self.size.
+ #
+ # Returns the _nth_ column of the table if that column exists:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.by_col! # => #
+ # table[1] # => ["0", "1", "2"]
+ #
+ # Counts backward from the last column if +n+ is negative:
+ # table[-2] # => ["foo", "bar", "baz"]
+ #
+ # Returns an \Array of +nil+ fields if +n+ is too large or too small:
+ # table[4] # => [nil, nil, nil]
+ # table[-4] # => [nil, nil, nil]
+ #
+ # ---
+ #
+ # Fetch Rows by \Range::
+ # - Form: table[range], +range+ a \Range object.
+ # - Access mode: :row or :col_or_row.
+ # - Return value: rows from the table, beginning at row range.start,
+ # if those rows exists.
+ #
+ # Returns rows from the table, beginning at row range.first,
+ # if those rows exist:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.by_row! # => #
+ # rows = table[1..2] # => #
+ # rows # => [#, #]
+ # table.by_col_or_row! # => #
+ # rows = table[1..2] # => #
+ # rows # => [#, #]
+ #
+ # If there are too few rows, returns all from range.start to the end:
+ # rows = table[1..50] # => #
+ # rows # => [#, #]
+ #
+ # Special case: if range.start == table.size, returns an empty \Array:
+ # table[table.size..50] # => []
+ #
+ # If range.end is negative, calculates the ending index from the end:
+ # rows = table[0..-1]
+ # rows # => [#, #, #]
+ #
+ # If range.start is negative, calculates the starting index from the end:
+ # rows = table[-1..2]
+ # rows # => [#]
+ #
+ # If range.start is larger than table.size, returns +nil+:
+ # table[4..4] # => nil
+ #
+ # ---
+ #
+ # Fetch Columns by \Range::
+ # - Form: table[range], +range+ a \Range object.
+ # - Access mode: :col.
+ # - Return value: column data from the table, beginning at column range.start,
+ # if those columns exist.
+ #
+ # Returns column values from the table, if the column exists;
+ # the values are arranged by row:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.by_col!
+ # table[0..1] # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # Special case: if range.start == headers.size,
+ # returns an \Array (size: table.size) of empty \Arrays:
+ # table[table.headers.size..50] # => [[], [], []]
+ #
+ # If range.end is negative, calculates the ending index from the end:
+ # table[0..-1] # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # If range.start is negative, calculates the starting index from the end:
+ # table[-2..2] # => [["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # If range.start is larger than table.size,
+ # returns an \Array of +nil+ values:
+ # table[4..4] # => [nil, nil, nil]
+ #
+ # ---
+ #
+ # Fetch a Column by Its \String Header::
+ # - Form: table[header], +header+ a \String header.
+ # - Access mode: :col or :col_or_row
+ # - Return value: column data from the table, if that +header+ exists.
+ #
+ # Returns column values from the table, if the column exists:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.by_col! # => #
+ # table['Name'] # => ["foo", "bar", "baz"]
+ # table.by_col_or_row! # => #
+ # col = table['Name']
+ # col # => ["foo", "bar", "baz"]
+ #
+ # Modifying the returned column values does not modify the table:
+ # col[0] = 'bat'
+ # col # => ["bat", "bar", "baz"]
+ # table['Name'] # => ["foo", "bar", "baz"]
+ #
+ # Returns an \Array of +nil+ values if there is no such column:
+ # table['Nosuch'] # => [nil, nil, nil]
+ #
+ # source://csv//lib/csv/table.rb#514
+ def [](index_or_header); end
+
+ # :call-seq:
+ # table[n] = row -> row
+ # table[n] = field_or_array_of_fields -> field_or_array_of_fields
+ # table[header] = field_or_array_of_fields -> field_or_array_of_fields
+ #
+ # Puts data onto the table.
+ #
+ # ---
+ #
+ # Set a \Row by Its \Integer Index::
+ # - Form: table[n] = row, +n+ an \Integer,
+ # +row+ a \CSV::Row instance or an \Array of fields.
+ # - Access mode: :row or :col_or_row.
+ # - Return value: +row+.
+ #
+ # If the row exists, it is replaced:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # new_row = CSV::Row.new(['Name', 'Value'], ['bat', 3])
+ # table.by_row! # => #
+ # return_value = table[0] = new_row
+ # return_value.equal?(new_row) # => true # Returned the row
+ # table[0].to_h # => {"Name"=>"bat", "Value"=>3}
+ #
+ # With access mode :col_or_row:
+ # table.by_col_or_row! # => #
+ # table[0] = CSV::Row.new(['Name', 'Value'], ['bam', 4])
+ # table[0].to_h # => {"Name"=>"bam", "Value"=>4}
+ #
+ # With an \Array instead of a \CSV::Row, inherits headers from the table:
+ # array = ['bad', 5]
+ # return_value = table[0] = array
+ # return_value.equal?(array) # => true # Returned the array
+ # table[0].to_h # => {"Name"=>"bad", "Value"=>5}
+ #
+ # If the row does not exist, extends the table by adding rows:
+ # assigns rows with +nil+ as needed:
+ # table.size # => 3
+ # table[5] = ['bag', 6]
+ # table.size # => 6
+ # table[3] # => nil
+ # table[4]# => nil
+ # table[5].to_h # => {"Name"=>"bag", "Value"=>6}
+ #
+ # Note that the +nil+ rows are actually +nil+, not a row of +nil+ fields.
+ #
+ # ---
+ #
+ # Set a Column by Its \Integer Index::
+ # - Form: table[n] = array_of_fields, +n+ an \Integer,
+ # +array_of_fields+ an \Array of \String fields.
+ # - Access mode: :col.
+ # - Return value: +array_of_fields+.
+ #
+ # If the column exists, it is replaced:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # new_col = [3, 4, 5]
+ # table.by_col! # => #
+ # return_value = table[1] = new_col
+ # return_value.equal?(new_col) # => true # Returned the column
+ # table[1] # => [3, 4, 5]
+ # # The rows, as revised:
+ # table.by_row! # => #
+ # table[0].to_h # => {"Name"=>"foo", "Value"=>3}
+ # table[1].to_h # => {"Name"=>"bar", "Value"=>4}
+ # table[2].to_h # => {"Name"=>"baz", "Value"=>5}
+ # table.by_col! # => #
+ #
+ # If there are too few values, fills with +nil+ values:
+ # table[1] = [0]
+ # table[1] # => [0, nil, nil]
+ #
+ # If there are too many values, ignores the extra values:
+ # table[1] = [0, 1, 2, 3, 4]
+ # table[1] # => [0, 1, 2]
+ #
+ # If a single value is given, replaces all fields in the column with that value:
+ # table[1] = 'bat'
+ # table[1] # => ["bat", "bat", "bat"]
+ #
+ # ---
+ #
+ # Set a Column by Its \String Header::
+ # - Form: table[header] = field_or_array_of_fields,
+ # +header+ a \String header, +field_or_array_of_fields+ a field value
+ # or an \Array of \String fields.
+ # - Access mode: :col or :col_or_row.
+ # - Return value: +field_or_array_of_fields+.
+ #
+ # If the column exists, it is replaced:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # new_col = [3, 4, 5]
+ # table.by_col! # => #
+ # return_value = table['Value'] = new_col
+ # return_value.equal?(new_col) # => true # Returned the column
+ # table['Value'] # => [3, 4, 5]
+ # # The rows, as revised:
+ # table.by_row! # => #
+ # table[0].to_h # => {"Name"=>"foo", "Value"=>3}
+ # table[1].to_h # => {"Name"=>"bar", "Value"=>4}
+ # table[2].to_h # => {"Name"=>"baz", "Value"=>5}
+ # table.by_col! # => #
+ #
+ # If there are too few values, fills with +nil+ values:
+ # table['Value'] = [0]
+ # table['Value'] # => [0, nil, nil]
+ #
+ # If there are too many values, ignores the extra values:
+ # table['Value'] = [0, 1, 2, 3, 4]
+ # table['Value'] # => [0, 1, 2]
+ #
+ # If the column does not exist, extends the table by adding columns:
+ # table['Note'] = ['x', 'y', 'z']
+ # table['Note'] # => ["x", "y", "z"]
+ # # The rows, as revised:
+ # table.by_row!
+ # table[0].to_h # => {"Name"=>"foo", "Value"=>0, "Note"=>"x"}
+ # table[1].to_h # => {"Name"=>"bar", "Value"=>1, "Note"=>"y"}
+ # table[2].to_h # => {"Name"=>"baz", "Value"=>2, "Note"=>"z"}
+ # table.by_col!
+ #
+ # If a single value is given, replaces all fields in the column with that value:
+ # table['Value'] = 'bat'
+ # table['Value'] # => ["bat", "bat", "bat"]
+ #
+ # source://csv//lib/csv/table.rb#649
+ def []=(index_or_header, value); end
+
+ # :call-seq:
+ # table.by_col -> table_dup
+ #
+ # Returns a duplicate of +self+, in column mode
+ # (see {Column Mode}[#class-CSV::Table-label-Column+Mode]):
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.mode # => :col_or_row
+ # dup_table = table.by_col
+ # dup_table.mode # => :col
+ # dup_table.equal?(table) # => false # It's a dup
+ #
+ # This may be used to chain method calls without changing the mode
+ # (but also will affect performance and memory usage):
+ # dup_table.by_col['Name']
+ #
+ # Also note that changes to the duplicate table will not affect the original.
+ #
+ # source://csv//lib/csv/table.rb#242
+ def by_col; end
+
+ # :call-seq:
+ # table.by_col! -> self
+ #
+ # Sets the mode for +self+ to column mode
+ # (see {Column Mode}[#class-CSV::Table-label-Column+Mode]); returns +self+:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.mode # => :col_or_row
+ # table1 = table.by_col!
+ # table.mode # => :col
+ # table1.equal?(table) # => true # Returned self
+ #
+ # source://csv//lib/csv/table.rb#257
+ def by_col!; end
+
+ # :call-seq:
+ # table.by_col_or_row -> table_dup
+ #
+ # Returns a duplicate of +self+, in mixed mode
+ # (see {Mixed Mode}[#class-CSV::Table-label-Mixed+Mode]):
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true).by_col!
+ # table.mode # => :col
+ # dup_table = table.by_col_or_row
+ # dup_table.mode # => :col_or_row
+ # dup_table.equal?(table) # => false # It's a dup
+ #
+ # This may be used to chain method calls without changing the mode
+ # (but also will affect performance and memory usage):
+ # dup_table.by_col_or_row['Name']
+ #
+ # Also note that changes to the duplicate table will not affect the original.
+ #
+ # source://csv//lib/csv/table.rb#280
+ def by_col_or_row; end
+
+ # :call-seq:
+ # table.by_col_or_row! -> self
+ #
+ # Sets the mode for +self+ to mixed mode
+ # (see {Mixed Mode}[#class-CSV::Table-label-Mixed+Mode]); returns +self+:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true).by_col!
+ # table.mode # => :col
+ # table1 = table.by_col_or_row!
+ # table.mode # => :col_or_row
+ # table1.equal?(table) # => true # Returned self
+ #
+ # source://csv//lib/csv/table.rb#295
+ def by_col_or_row!; end
+
+ # :call-seq:
+ # table.by_row -> table_dup
+ #
+ # Returns a duplicate of +self+, in row mode
+ # (see {Row Mode}[#class-CSV::Table-label-Row+Mode]):
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.mode # => :col_or_row
+ # dup_table = table.by_row
+ # dup_table.mode # => :row
+ # dup_table.equal?(table) # => false # It's a dup
+ #
+ # This may be used to chain method calls without changing the mode
+ # (but also will affect performance and memory usage):
+ # dup_table.by_row[1]
+ #
+ # Also note that changes to the duplicate table will not affect the original.
+ #
+ # source://csv//lib/csv/table.rb#318
+ def by_row; end
+
+ # :call-seq:
+ # table.by_row! -> self
+ #
+ # Sets the mode for +self+ to row mode
+ # (see {Row Mode}[#class-CSV::Table-label-Row+Mode]); returns +self+:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.mode # => :col_or_row
+ # table1 = table.by_row!
+ # table.mode # => :row
+ # table1.equal?(table) # => true # Returned self
+ #
+ # source://csv//lib/csv/table.rb#333
+ def by_row!; end
+
+ # :call-seq:
+ # table.delete(*indexes) -> deleted_values
+ # table.delete(*headers) -> deleted_values
+ #
+ # If the access mode is :row or :col_or_row,
+ # and each argument is either an \Integer or a \Range,
+ # returns deleted rows.
+ # Otherwise, returns deleted columns data.
+ #
+ # In either case, the returned values are in the order
+ # specified by the arguments. Arguments may be repeated.
+ #
+ # ---
+ #
+ # Returns rows as an \Array of \CSV::Row objects.
+ #
+ # One index:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # deleted_values = table.delete(0)
+ # deleted_values # => [#]
+ #
+ # Two indexes:
+ # table = CSV.parse(source, headers: true)
+ # deleted_values = table.delete(2, 0)
+ # deleted_values # => [#, #]
+ #
+ # ---
+ #
+ # Returns columns data as column Arrays.
+ #
+ # One header:
+ # table = CSV.parse(source, headers: true)
+ # deleted_values = table.delete('Name')
+ # deleted_values # => ["foo", "bar", "baz"]
+ #
+ # Two headers:
+ # table = CSV.parse(source, headers: true)
+ # deleted_values = table.delete('Value', 'Name')
+ # deleted_values # => [["0", "1", "2"], ["foo", "bar", "baz"]]
+ #
+ # source://csv//lib/csv/table.rb#834
+ def delete(*indexes_or_headers); end
+
+ # :call-seq:
+ # table.delete_if {|row_or_column| ... } -> self
+ #
+ # Removes rows or columns for which the block returns a truthy value;
+ # returns +self+.
+ #
+ # Removes rows when the access mode is :row or :col_or_row;
+ # calls the block with each \CSV::Row object:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.by_row! # => #
+ # table.size # => 3
+ # table.delete_if {|row| row['Name'].start_with?('b') }
+ # table.size # => 1
+ #
+ # Removes columns when the access mode is :col;
+ # calls the block with each column as a 2-element array
+ # containing the header and an \Array of column fields:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.by_col! # => #
+ # table.headers.size # => 2
+ # table.delete_if {|column_data| column_data[1].include?('2') }
+ # table.headers.size # => 1
+ #
+ # Returns a new \Enumerator if no block is given:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.delete_if # => #:delete_if>
+ #
+ # source://csv//lib/csv/table.rb#887
+ def delete_if(&block); end
+
+ # Extracts the nested value specified by the sequence of +index+ or +header+ objects by calling dig at each step,
+ # returning nil if any intermediate step is nil.
+ #
+ # source://csv//lib/csv/table.rb#1021
+ def dig(index_or_header, *index_or_headers); end
+
+ # :call-seq:
+ # table.each {|row_or_column| ... ) -> self
+ #
+ # Calls the block with each row or column; returns +self+.
+ #
+ # When the access mode is :row or :col_or_row,
+ # calls the block with each \CSV::Row object:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.by_row! # => #
+ # table.each {|row| p row }
+ # Output:
+ # #
+ # #
+ # #
+ #
+ # When the access mode is :col,
+ # calls the block with each column as a 2-element array
+ # containing the header and an \Array of column fields:
+ # table.by_col! # => #
+ # table.each {|column_data| p column_data }
+ # Output:
+ # ["Name", ["foo", "bar", "baz"]]
+ # ["Value", ["0", "1", "2"]]
+ #
+ # Returns a new \Enumerator if no block is given:
+ # table.each # => #:each>
+ #
+ # source://csv//lib/csv/table.rb#930
+ def each(&block); end
+
+ # :call-seq:
+ # table.headers -> array_of_headers
+ #
+ # Returns a new \Array containing the \String headers for the table.
+ #
+ # If the table is not empty, returns the headers from the first row:
+ # rows = [
+ # CSV::Row.new(['Foo', 'Bar'], []),
+ # CSV::Row.new(['FOO', 'BAR'], []),
+ # CSV::Row.new(['foo', 'bar'], []),
+ # ]
+ # table = CSV::Table.new(rows)
+ # table.headers # => ["Foo", "Bar"]
+ # table.delete(0)
+ # table.headers # => ["FOO", "BAR"]
+ # table.delete(0)
+ # table.headers # => ["foo", "bar"]
+ #
+ # If the table is empty, returns a copy of the headers in the table itself:
+ # table.delete(0)
+ # table.headers # => ["Foo", "Bar"]
+ #
+ # source://csv//lib/csv/table.rb#360
+ def headers; end
+
+ # :call-seq:
+ # table.inspect => string
+ #
+ # Returns a US-ASCII-encoded \String showing table:
+ # - Class: CSV::Table.
+ # - Access mode: :row, :col, or :col_or_row.
+ # - Size: Row count, including the header row.
+ #
+ # Example:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.inspect # => "#\nName,Value\nfoo,0\nbar,1\nbaz,2\n"
+ #
+ # source://csv//lib/csv/table.rb#1048
+ def inspect; end
+
+ # The current access mode for indexing and iteration.
+ #
+ # source://csv//lib/csv/table.rb#214
+ def mode; end
+
+ # :call-seq:
+ # table.push(*rows_or_arrays) -> self
+ #
+ # A shortcut for appending multiple rows. Equivalent to:
+ # rows.each {|row| self << row }
+ #
+ # Each argument may be either a \CSV::Row object or an \Array:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # rows = [
+ # CSV::Row.new(table.headers, ['bat', 3]),
+ # ['bam', 4]
+ # ]
+ # table.push(*rows)
+ # table[3..4] # => [#, #]
+ #
+ # source://csv//lib/csv/table.rb#788
+ def push(*rows); end
+
+ # :call-seq:
+ # table.to_a -> array_of_arrays
+ #
+ # Returns the table as an \Array of \Arrays;
+ # the headers are in the first row:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.to_a # => [["Name", "Value"], ["foo", "0"], ["bar", "1"], ["baz", "2"]]
+ #
+ # source://csv//lib/csv/table.rb#978
+ def to_a; end
+
+ # :call-seq:
+ # table.to_csv(**options) -> csv_string
+ #
+ # Returns the table as \CSV string.
+ # See {Options for Generating}[../CSV.html#class-CSV-label-Options+for+Generating].
+ #
+ # Defaults option +write_headers+ to +true+:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.to_csv # => "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ #
+ # Omits the headers if option +write_headers+ is given as +false+
+ # (see {Option +write_headers+}[../CSV.html#class-CSV-label-Option+write_headers]):
+ # table.to_csv(write_headers: false) # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # Limit rows if option +limit+ is given like +2+:
+ # table.to_csv(limit: 2) # => "Name,Value\nfoo,0\nbar,1\n"
+ #
+ # source://csv//lib/csv/table.rb#1004
+ def to_csv(write_headers: T.unsafe(nil), limit: T.unsafe(nil), **options); end
+
+ # :call-seq:
+ # table.to_csv(**options) -> csv_string
+ #
+ # Returns the table as \CSV string.
+ # See {Options for Generating}[../CSV.html#class-CSV-label-Options+for+Generating].
+ #
+ # Defaults option +write_headers+ to +true+:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.to_csv # => "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ #
+ # Omits the headers if option +write_headers+ is given as +false+
+ # (see {Option +write_headers+}[../CSV.html#class-CSV-label-Option+write_headers]):
+ # table.to_csv(write_headers: false) # => "foo,0\nbar,1\nbaz,2\n"
+ #
+ # Limit rows if option +limit+ is given like +2+:
+ # table.to_csv(limit: 2) # => "Name,Value\nfoo,0\nbar,1\n"
+ #
+ # source://csv//lib/csv/table.rb#1004
+ def to_s(write_headers: T.unsafe(nil), limit: T.unsafe(nil), **options); end
+
+ # :call-seq:
+ # table.values_at(*indexes) -> array_of_rows
+ # table.values_at(*headers) -> array_of_columns_data
+ #
+ # If the access mode is :row or :col_or_row,
+ # and each argument is either an \Integer or a \Range,
+ # returns rows.
+ # Otherwise, returns columns data.
+ #
+ # In either case, the returned values are in the order
+ # specified by the arguments. Arguments may be repeated.
+ #
+ # ---
+ #
+ # Returns rows as an \Array of \CSV::Row objects.
+ #
+ # No argument:
+ # source = "Name,Value\nfoo,0\nbar,1\nbaz,2\n"
+ # table = CSV.parse(source, headers: true)
+ # table.values_at # => []
+ #
+ # One index:
+ # values = table.values_at(0)
+ # values # => [#]
+ #
+ # Two indexes:
+ # values = table.values_at(2, 0)
+ # values # => [#, #]
+ #
+ # One \Range:
+ # values = table.values_at(1..2)
+ # values # => [#, #]
+ #
+ # \Ranges and indexes:
+ # values = table.values_at(0..1, 1..2, 0, 2)
+ # pp values
+ # Output:
+ # [#,
+ # #,
+ # #,
+ # #,
+ # #,
+ # #]
+ #
+ # ---
+ #
+ # Returns columns data as row Arrays,
+ # each consisting of the specified columns data for that row:
+ # values = table.values_at('Name')
+ # values # => [["foo"], ["bar"], ["baz"]]
+ # values = table.values_at('Value', 'Name')
+ # values # => [["0", "foo"], ["1", "bar"], ["2", "baz"]]
+ #
+ # source://csv//lib/csv/table.rb#734
+ def values_at(*indices_or_headers); end
+
+ protected
+
+ # Internal data format used to compare equality.
+ #
+ # source://csv//lib/csv/table.rb#217
+ def table; end
+end
+
+# Note: Don't use this class directly. This is an internal class.
+#
+# source://csv//lib/csv/writer.rb#8
+class CSV::Writer
+ # @return [Writer] a new instance of Writer
+ #
+ # source://csv//lib/csv/writer.rb#16
+ def initialize(output, options); end
+
+ # Adds a new row
+ #
+ # source://csv//lib/csv/writer.rb#31
+ def <<(row); end
+
+ # Returns the value of attribute headers.
+ #
+ # source://csv//lib/csv/writer.rb#14
+ def headers; end
+
+ # A CSV::Writer receives an output, prepares the header, format and output.
+ # It allows us to write new rows in the object and rewind it.
+ #
+ # source://csv//lib/csv/writer.rb#13
+ def lineno; end
+
+ # Winds back to the beginning
+ #
+ # source://csv//lib/csv/writer.rb#63
+ def rewind; end
+
+ private
+
+ # source://csv//lib/csv/writer.rb#69
+ def prepare; end
+
+ # source://csv//lib/csv/writer.rb#105
+ def prepare_force_quotes_fields(force_quotes); end
+
+ # source://csv//lib/csv/writer.rb#132
+ def prepare_format; end
+
+ # source://csv//lib/csv/writer.rb#77
+ def prepare_header; end
+
+ # source://csv//lib/csv/writer.rb#162
+ def prepare_output; end
+
+ # source://csv//lib/csv/writer.rb#189
+ def quote(field, i); end
+
+ # source://csv//lib/csv/writer.rb#180
+ def quote_field(field); end
+end
+
+class Object < ::BasicObject
+ include ::Kernel
+ include ::PP::ObjectMixin
+ include ::MakeMakefile
+
+ private
+
+ # source://csv//lib/csv.rb#3011
+ def CSV(*args, **options, &block); end
+end
+
+# source://csv//lib/csv/core_ext/string.rb#1
+class String
+ include ::Comparable
+
+ # Equivalent to CSV::parse_line(self, options)
+ #
+ # "CSV,data".parse_csv
+ # #=> ["CSV", "data"]
+ #
+ # source://csv//lib/csv/core_ext/string.rb#6
+ def parse_csv(**options); end
+end
diff --git a/sorbet/rbi/gems/enumerable-statistics@2.0.8.rbi b/sorbet/rbi/gems/enumerable-statistics@2.0.8.rbi
new file mode 100644
index 00000000..fdb131c5
--- /dev/null
+++ b/sorbet/rbi/gems/enumerable-statistics@2.0.8.rbi
@@ -0,0 +1,60 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `enumerable-statistics` gem.
+# Please instead update this file by running `bin/tapioca gem enumerable-statistics`.
+
+
+class Array
+ include ::Enumerable
+ include ::EnumerableStatistics::ArrayExtension
+end
+
+# source://enumerable-statistics//lib/enumerable_statistics/version.rb#1
+module EnumerableStatistics; end
+
+# source://enumerable-statistics//lib/enumerable_statistics/array_ext.rb#2
+module EnumerableStatistics::ArrayExtension
+ # source://enumerable-statistics//lib/enumerable_statistics/array_ext.rb#15
+ def argmax; end
+
+ # source://enumerable-statistics//lib/enumerable_statistics/array_ext.rb#31
+ def argmin; end
+
+ def find_max; end
+ def find_min; end
+end
+
+# source://enumerable-statistics//lib/enumerable_statistics/histogram.rb#2
+class EnumerableStatistics::Histogram < ::Struct
+ # Returns the value of attribute isdensity
+ #
+ # @return [Object] the current value of isdensity
+ def density?; end
+
+ # Returns the value of attribute edges
+ #
+ # @return [Object] the current value of edges
+ def edge; end
+end
+
+# source://enumerable-statistics//lib/enumerable_statistics/version.rb#2
+EnumerableStatistics::VERSION = T.let(T.unsafe(nil), String)
+
+# source://enumerable-statistics//lib/enumerable_statistics/version.rb#4
+module EnumerableStatistics::Version; end
+
+# source://enumerable-statistics//lib/enumerable_statistics/version.rb#6
+EnumerableStatistics::Version::MAJOR = T.let(T.unsafe(nil), Integer)
+
+# source://enumerable-statistics//lib/enumerable_statistics/version.rb#6
+EnumerableStatistics::Version::MICRO = T.let(T.unsafe(nil), Integer)
+
+# source://enumerable-statistics//lib/enumerable_statistics/version.rb#6
+EnumerableStatistics::Version::MINOR = T.let(T.unsafe(nil), Integer)
+
+# source://enumerable-statistics//lib/enumerable_statistics/version.rb#7
+EnumerableStatistics::Version::STRING = T.let(T.unsafe(nil), String)
+
+# source://enumerable-statistics//lib/enumerable_statistics/version.rb#5
+EnumerableStatistics::Version::TAG = T.let(T.unsafe(nil), T.untyped)
diff --git a/sorbet/rbi/gems/lbfgsb@0.6.0.rbi b/sorbet/rbi/gems/lbfgsb@0.6.0.rbi
new file mode 100644
index 00000000..11256680
--- /dev/null
+++ b/sorbet/rbi/gems/lbfgsb@0.6.0.rbi
@@ -0,0 +1,111 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `lbfgsb` gem.
+# Please instead update this file by running `bin/tapioca gem lbfgsb`.
+
+
+# Lbfgsb.rb is a Ruby binding for L-BFGS-B with Numo::NArray.
+#
+# source://lbfgsb//lib/lbfgsb/version.rb#4
+module Lbfgsb
+ private
+
+ # source://lbfgsb//lib/lbfgsb.rb#69
+ def fnc(fnc, x, args); end
+
+ # source://lbfgsb//lib/lbfgsb.rb#82
+ def jcb(jcb, x, args); end
+
+ def min_l_bfgs_b(_arg0, _arg1, _arg2, _arg3, _arg4, _arg5, _arg6, _arg7, _arg8, _arg9, _arg10, _arg11); end
+
+ # Minimize a function using the L-BFGS-B algorithm.
+ #
+ # @param fnc [Method/Proc] Method for calculating the function to be minimized.
+ # @param x_init [Numo::DFloat] (shape: [n_elements]) Initial point.
+ # @param jcb [Method/Proc/Boolean] Method for calculating the gradient vector.
+ # If true is given, fnc is assumed to return the function value and gardient vector as [f, g] array.
+ # @param args [Object] Arguments pass to the 'fnc' and 'jcb'.
+ # @param bounds [Numo::DFloat/Nil] (shape: [n_elements, 2])
+ # \[lower, upper\] bounds for each element x. If nil is given, x is unbounded.
+ # @param factr [Float] The iteration will be stop when
+ #
+ # (f^k - f^\{k+1\})/max{|f^k|,|f^\{k+1\}|,1} <= factr * Lbfgsb::DBL_EPSILON
+ #
+ # Typical values for factr: 1e12 for low accuracy; 1e7 for moderate accuracy; 1e1 for extremely high accuracy.
+ # @param pgtol [Float] The iteration will be stop when
+ #
+ # max{|pg_i| i = 1, ..., n} <= pgtol
+ #
+ # where pg_i is the ith component of the projected gradient.
+ # @param maxcor [Integer] The maximum number of variable metric corrections used to define the limited memory matrix.
+ # @param maxiter [Integer] The maximum number of iterations.
+ # @param verbose [Integer/Nil] If negative value or nil is given, no display output is generated.
+ # @return [Hash] Optimization results; { x:, n_fev:, n_jev:, n_iter:, fnc:, jcb:, task:, success: }
+ # - x [Numo::DFloat] Updated vector by optimization.
+ # - n_fev [Interger] Number of calls of the objective function.
+ # - n_jev [Integer] Number of calls of the jacobian.
+ # - n_iter [Integer] Number of iterations.
+ # - fnc [Float] Value of the objective function.
+ # - jcb [Numo::Narray] Values of the jacobian
+ # - task [String] Description of the cause of the termination.
+ # - success [Boolean] Whether or not the optimization exited successfully.
+ #
+ # source://lbfgsb//lib/lbfgsb.rb#43
+ def minimize(fnc:, x_init:, jcb:, args: T.unsafe(nil), bounds: T.unsafe(nil), factr: T.unsafe(nil), pgtol: T.unsafe(nil), maxcor: T.unsafe(nil), maxiter: T.unsafe(nil), verbose: T.unsafe(nil)); end
+
+ class << self
+ # Minimize a function using the L-BFGS-B algorithm.
+ #
+ # @param fnc [Method/Proc] Method for calculating the function to be minimized.
+ # @param x_init [Numo::DFloat] (shape: [n_elements]) Initial point.
+ # @param jcb [Method/Proc/Boolean] Method for calculating the gradient vector.
+ # If true is given, fnc is assumed to return the function value and gardient vector as [f, g] array.
+ # @param args [Object] Arguments pass to the 'fnc' and 'jcb'.
+ # @param bounds [Numo::DFloat/Nil] (shape: [n_elements, 2])
+ # \[lower, upper\] bounds for each element x. If nil is given, x is unbounded.
+ # @param factr [Float] The iteration will be stop when
+ #
+ # (f^k - f^\{k+1\})/max{|f^k|,|f^\{k+1\}|,1} <= factr * Lbfgsb::DBL_EPSILON
+ #
+ # Typical values for factr: 1e12 for low accuracy; 1e7 for moderate accuracy; 1e1 for extremely high accuracy.
+ # @param pgtol [Float] The iteration will be stop when
+ #
+ # max{|pg_i| i = 1, ..., n} <= pgtol
+ #
+ # where pg_i is the ith component of the projected gradient.
+ # @param maxcor [Integer] The maximum number of variable metric corrections used to define the limited memory matrix.
+ # @param maxiter [Integer] The maximum number of iterations.
+ # @param verbose [Integer/Nil] If negative value or nil is given, no display output is generated.
+ # @return [Hash] Optimization results; { x:, n_fev:, n_jev:, n_iter:, fnc:, jcb:, task:, success: }
+ # - x [Numo::DFloat] Updated vector by optimization.
+ # - n_fev [Interger] Number of calls of the objective function.
+ # - n_jev [Integer] Number of calls of the jacobian.
+ # - n_iter [Integer] Number of iterations.
+ # - fnc [Float] Value of the objective function.
+ # - jcb [Numo::Narray] Values of the jacobian
+ # - task [String] Description of the cause of the termination.
+ # - success [Boolean] Whether or not the optimization exited successfully.
+ #
+ # source://lbfgsb//lib/lbfgsb.rb#43
+ def minimize(fnc:, x_init:, jcb:, args: T.unsafe(nil), bounds: T.unsafe(nil), factr: T.unsafe(nil), pgtol: T.unsafe(nil), maxcor: T.unsafe(nil), maxiter: T.unsafe(nil), verbose: T.unsafe(nil)); end
+
+ private
+
+ # source://lbfgsb//lib/lbfgsb.rb#69
+ def fnc(fnc, x, args); end
+
+ # source://lbfgsb//lib/lbfgsb.rb#82
+ def jcb(jcb, x, args); end
+
+ def min_l_bfgs_b(_arg0, _arg1, _arg2, _arg3, _arg4, _arg5, _arg6, _arg7, _arg8, _arg9, _arg10, _arg11); end
+ end
+end
+
+Lbfgsb::DBL_EPSILON = T.let(T.unsafe(nil), Float)
+Lbfgsb::SZ_F77_INTEGER = T.let(T.unsafe(nil), Integer)
+
+# The version of Lbfgsb.rb you are using.
+#
+# source://lbfgsb//lib/lbfgsb/version.rb#6
+Lbfgsb::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/mmh3@1.2.0.rbi b/sorbet/rbi/gems/mmh3@1.2.0.rbi
new file mode 100644
index 00000000..5e57318b
--- /dev/null
+++ b/sorbet/rbi/gems/mmh3@1.2.0.rbi
@@ -0,0 +1,132 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `mmh3` gem.
+# Please instead update this file by running `bin/tapioca gem mmh3`.
+
+
+# source://mmh3//lib/mmh3/version.rb#3
+module Mmh3
+ private
+
+ # source://mmh3//lib/mmh3.rb#312
+ def block32(kb, bstart, offset); end
+
+ # source://mmh3//lib/mmh3.rb#321
+ def block64(kb, bstart, offset); end
+
+ # source://mmh3//lib/mmh3.rb#348
+ def fmix32(h); end
+
+ # source://mmh3//lib/mmh3.rb#356
+ def fmix64(h); end
+
+ # Generate a 128-bit hash value.
+ #
+ # @example
+ # require 'mmh3'
+ #
+ # puts Mmh3.hash128('Hello, world') # => 87198040132278428547135563345531192982
+ # @param key [String] Key for hash value.
+ # @param seed [Integer] Seed for hash value.
+ # @param x64arch [Boolean] Flag indicating whether to generate hash value for x64 architecture.
+ # @return [Integer] Returns hash value.
+ #
+ # source://mmh3//lib/mmh3.rb#70
+ def hash128(key, seed: T.unsafe(nil), x64arch: T.unsafe(nil)); end
+
+ # private
+ #
+ # source://mmh3//lib/mmh3.rb#78
+ def hash128_x64(key, seed = T.unsafe(nil)); end
+
+ # source://mmh3//lib/mmh3.rb#166
+ def hash128_x86(key, seed = T.unsafe(nil)); end
+
+ # Generate a 32-bit hash value.
+ #
+ # @example
+ # require 'mmh3'
+ #
+ # puts Mmh3.hash32('Hello, world') # => 1785891924
+ # @param key [String] Key for hash value.
+ # @param seed [Integer] Seed for hash value.
+ # @return [Integer] Returns hash value.
+ #
+ # source://mmh3//lib/mmh3.rb#24
+ def hash32(key, seed: T.unsafe(nil)); end
+
+ # source://mmh3//lib/mmh3.rb#334
+ def rotl32(x, r); end
+
+ # source://mmh3//lib/mmh3.rb#338
+ def rotl64(x, r); end
+
+ # source://mmh3//lib/mmh3.rb#342
+ def scramble32(k); end
+
+ class << self
+ # Generate a 128-bit hash value.
+ #
+ # @example
+ # require 'mmh3'
+ #
+ # puts Mmh3.hash128('Hello, world') # => 87198040132278428547135563345531192982
+ # @param key [String] Key for hash value.
+ # @param seed [Integer] Seed for hash value.
+ # @param x64arch [Boolean] Flag indicating whether to generate hash value for x64 architecture.
+ # @return [Integer] Returns hash value.
+ #
+ # source://mmh3//lib/mmh3.rb#70
+ def hash128(key, seed: T.unsafe(nil), x64arch: T.unsafe(nil)); end
+
+ # Generate a 32-bit hash value.
+ #
+ # @example
+ # require 'mmh3'
+ #
+ # puts Mmh3.hash32('Hello, world') # => 1785891924
+ # @param key [String] Key for hash value.
+ # @param seed [Integer] Seed for hash value.
+ # @return [Integer] Returns hash value.
+ #
+ # source://mmh3//lib/mmh3.rb#24
+ def hash32(key, seed: T.unsafe(nil)); end
+
+ private
+
+ # source://mmh3//lib/mmh3.rb#312
+ def block32(kb, bstart, offset); end
+
+ # source://mmh3//lib/mmh3.rb#321
+ def block64(kb, bstart, offset); end
+
+ # source://mmh3//lib/mmh3.rb#348
+ def fmix32(h); end
+
+ # source://mmh3//lib/mmh3.rb#356
+ def fmix64(h); end
+
+ # private
+ #
+ # source://mmh3//lib/mmh3.rb#78
+ def hash128_x64(key, seed = T.unsafe(nil)); end
+
+ # source://mmh3//lib/mmh3.rb#166
+ def hash128_x86(key, seed = T.unsafe(nil)); end
+
+ # source://mmh3//lib/mmh3.rb#334
+ def rotl32(x, r); end
+
+ # source://mmh3//lib/mmh3.rb#338
+ def rotl64(x, r); end
+
+ # source://mmh3//lib/mmh3.rb#342
+ def scramble32(k); end
+ end
+end
+
+# Version number of Mmh3 you are using.
+#
+# source://mmh3//lib/mmh3/version.rb#5
+Mmh3::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-clustering@1.0.0.rbi b/sorbet/rbi/gems/rumale-clustering@1.0.0.rbi
new file mode 100644
index 00000000..5ec1a491
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-clustering@1.0.0.rbi
@@ -0,0 +1,982 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-clustering` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-clustering`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-clustering//lib/rumale/clustering/dbscan.rb#8
+module Rumale; end
+
+# This module consists of classes that implement cluster analysis methods.
+#
+# source://rumale-clustering//lib/rumale/clustering/dbscan.rb#9
+module Rumale::Clustering; end
+
+# DBSCAN is a class that implements DBSCAN cluster analysis.
+#
+# *Reference*
+# - Ester, M., Kriegel, H-P., Sander, J., and Xu, X., "A density-based algorithm for discovering clusters in large spatial databases with noise," Proc. KDD' 96, pp. 266--231, 1996.
+#
+# @example
+# require 'rumale/clustering/dbscan'
+#
+# analyzer = Rumale::Clustering::DBSCAN.new(eps: 0.5, min_samples: 5)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/dbscan.rb#20
+class Rumale::Clustering::DBSCAN < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with DBSCAN method.
+ #
+ # @param eps [Float] The radius of neighborhood.
+ # @param min_samples [Integer] The number of neighbor samples to be used for the criterion whether a point is a core point.
+ # @param metric [String] The metric to calculate the distances.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance between points.
+ # If metric is 'precomputed', the fit and fit_transform methods expect to be given a distance matrix.
+ # @return [DBSCAN] a new instance of DBSCAN
+ #
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#38
+ def initialize(eps: T.unsafe(nil), min_samples: T.unsafe(nil), metric: T.unsafe(nil)); end
+
+ # Return the core sample indices.
+ #
+ # @return [Numo::Int32] (shape: [n_core_samples])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#25
+ def core_sample_ids; end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ # @raise [ArgumentError]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#53
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be used for cluster analysis.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_samples, n_samples]).
+ # @raise [ArgumentError]
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#66
+ def fit_predict(x); end
+
+ # Return the cluster labels. The negative cluster label indicates that the point is noise.
+ #
+ # @return [Numo::Int32] (shape: [n_samples])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#29
+ def labels; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#95
+ def calc_pairwise_metrics(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#76
+ def check_invalid_array_shape(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#99
+ def expand_cluster(metric_mat, query_id, cluster_id); end
+
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#80
+ def partial_fit(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/dbscan.rb#121
+ def region_query(metric_arr); end
+end
+
+# GaussianMixture is a class that implements cluster analysis with gaussian mixture model.
+#
+# @example
+# require 'rumale/clustering/gaussian_mixture'
+#
+# analyzer = Rumale::Clustering::GaussianMixture.new(n_clusters: 10, max_iter: 50)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# # If Numo::Linalg is installed, you can specify 'full' for the tyep of covariance option.
+# require 'numo/linalg/autoloader'
+# require 'rumale/clustering/gaussian_mixture'
+#
+# analyzer = Rumale::Clustering::GaussianMixture.new(n_clusters: 10, max_iter: 50, covariance_type: 'full')
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#26
+class Rumale::Clustering::GaussianMixture < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with gaussian mixture model.
+ #
+ # @param n_clusters [Integer] The number of clusters.
+ # @param init [String] The initialization method for centroids ('random' or 'k-means++').
+ # @param covariance_type [String] The type of covariance parameter to be used ('diag' or 'full').
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # @param reg_covar [Float] The non-negative regularization to the diagonal of covariance.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [GaussianMixture] a new instance of GaussianMixture
+ #
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#54
+ def initialize(n_clusters: T.unsafe(nil), init: T.unsafe(nil), covariance_type: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), reg_covar: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the diagonal elements of covariance matrix of each cluster.
+ #
+ # @return [Numo::DFloat] (shape: [n_clusters, n_features] if 'diag', [n_clusters, n_features, n_features] if 'full')
+ #
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#43
+ def covariances; end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#73
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for cluster analysis.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#109
+ def fit_predict(x); end
+
+ # Return the mean of each cluster.
+ #
+ # @return [Numo::DFloat] (shape: [n_clusters, n_features])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#39
+ def means; end
+
+ # Return the number of iterations to covergence.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#31
+ def n_iter; end
+
+ # Predict cluster labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the cluster label.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#97
+ def predict(x); end
+
+ # Return the weight of each cluster.
+ #
+ # @return [Numo::DFloat] (shape: [n_clusters])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#35
+ def weights; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#118
+ def assign_cluster(memberships); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#151
+ def calc_covariances(x, means, memberships, reg_cover, covar_type); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#159
+ def calc_diag_covariances(x, means, reg_cover, memberships); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#168
+ def calc_full_covariances(x, means, reg_cover, memberships); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#192
+ def calc_inv_covariance(covar, covar_type); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#200
+ def calc_inv_sqrt_det_covariance(covar, covar_type); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#147
+ def calc_means(x, memberships); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#131
+ def calc_memberships(x, weights, means, covars, covar_type); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#181
+ def calc_unnormalized_membership(centered, weight, covar, covar_type); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#143
+ def calc_weights(n_samples, memberships); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#208
+ def check_enable_linalg(method_name); end
+
+ # source://rumale-clustering//lib/rumale/clustering/gaussian_mixture.rb#123
+ def init_memberships(x); end
+end
+
+# HDBSCAN is a class that implements HDBSCAN cluster analysis.
+#
+# *Reference*
+# - Campello, R J. G. B., Moulavi, D., Zimek, A., and Sander, J., "Hierarchical Density Estimates for Data Clustering, Visualization, and Outlier Detection," TKDD, Vol. 10 (1), pp. 5:1--5:51, 2015.
+# - Campello, R J. G. B., Moulavi, D., and Sander, J., "Density-Based Clustering Based on Hierarchical Density Estimates," Proc. PAKDD'13, pp. 160--172, 2013.
+# - Lelis, L., and Sander, J., "Semi-Supervised Density-Based Clustering," Proc. ICDM'09, pp. 842--847, 2009.
+#
+# @example
+# require 'rumale/clustering/hdbscan'
+#
+# analyzer = Rumale::Clustering::HDBSCAN.new(min_samples: 5)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#23
+class Rumale::Clustering::HDBSCAN < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with HDBSCAN algorithm.
+ #
+ # @param min_samples [Integer] The number of neighbor samples to be used for the criterion whether a point is a core point.
+ # @param min_cluster_size [Integer/Nil] The minimum size of cluster. If nil is given, it is set equal to min_samples.
+ # @param metric [String] The metric to calculate the distances.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance between points.
+ # If metric is 'precomputed', the fit and fit_transform methods expect to be given a distance matrix.
+ # @return [HDBSCAN] a new instance of HDBSCAN
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#37
+ def initialize(min_samples: T.unsafe(nil), min_cluster_size: T.unsafe(nil), metric: T.unsafe(nil)); end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ # @raise [ArgumentError]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#52
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be used for cluster analysis.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_samples, n_samples]).
+ # @raise [ArgumentError]
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#65
+ def fit_predict(x); end
+
+ # Return the cluster labels. The negative cluster label indicates that the point is noise.
+ #
+ # @return [Numo::Int32] (shape: [n_samples])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#28
+ def labels; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#144
+ def breadth_first_search_hierarchy(hierarchy, root); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#242
+ def breadth_first_search_tree(tree, root); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#75
+ def check_invalid_array_shape(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#213
+ def cluster_stability(tree); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#158
+ def condense_tree(hierarchy, min_cluster_size); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#252
+ def flatten(tree, stabilities); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#139
+ def mutual_reachability_distances(distance_mat, min_samples); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#131
+ def partial_fit(distance_mat); end
+end
+
+# source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#111
+class Rumale::Clustering::HDBSCAN::Node
+ # @return [Node] a new instance of Node
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#116
+ def initialize(x:, y:, weight:, n_elements: T.unsafe(nil)); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#124
+ def ==(other); end
+
+ # Returns the value of attribute n_elements.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#113
+ def n_elements; end
+
+ # Returns the value of attribute weight.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#113
+ def weight; end
+
+ # Returns the value of attribute x.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#113
+ def x; end
+
+ # Returns the value of attribute y.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#113
+ def y; end
+end
+
+# source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#80
+class Rumale::Clustering::HDBSCAN::UnionFind
+ # @return [UnionFind] a new instance of UnionFind
+ #
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#81
+ def initialize(n); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#104
+ def find(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/hdbscan.rb#87
+ def union(x, y); end
+end
+
+# KMeans is a class that implements K-Means cluster analysis.
+# The current implementation uses the Euclidean distance for analyzing the clusters.
+#
+# *Reference*
+# - Arthur, D., and Vassilvitskii, S., "k-means++: the advantages of careful seeding," Proc. SODA'07, pp. 1027--1035, 2007.
+#
+# @example
+# require 'rumale/clustering/k_means'
+#
+# analyzer = Rumale::Clustering::KMeans.new(n_clusters: 10, max_iter: 50)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/k_means.rb#21
+class Rumale::Clustering::KMeans < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with K-Means method.
+ #
+ # @param n_clusters [Integer] The number of clusters.
+ # @param init [String] The initialization method for centroids ('random' or 'k-means++').
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [KMeans] a new instance of KMeans
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_means.rb#39
+ def initialize(n_clusters: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the centroids.
+ #
+ # @return [Numo::DFloat] (shape: [n_clusters, n_features])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_means.rb#26
+ def cluster_centers; end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_means.rb#56
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for cluster analysis.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_means.rb#87
+ def fit_predict(x); end
+
+ # Predict cluster labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the cluster label.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_means.rb#77
+ def predict(x); end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_means.rb#30
+ def rng; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/k_means.rb#95
+ def assign_cluster(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/k_means.rb#100
+ def init_cluster_centers(x); end
+end
+
+# KMedoids is a class that implements K-Medoids cluster analysis.
+#
+# *Reference*
+# - Arthur, D., and Vassilvitskii, S., "k-means++: the advantages of careful seeding," Proc. SODA'07, pp. 1027--1035, 2007.
+#
+# @example
+# require 'rumale/clustering/k_medoids'
+#
+# analyzer = Rumale::Clustering::KMedoids.new(n_clusters: 10, max_iter: 50)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#19
+class Rumale::Clustering::KMedoids < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with K-Medoids method.
+ #
+ # @param n_clusters [Integer] The number of clusters.
+ # @param metric [String] The metric to calculate the distances.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance between points.
+ # If metric is 'precomputed', the fit and fit_transform methods expect to be given a distance matrix.
+ # @param init [String] The initialization method for centroids ('random' or 'k-means++').
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [KMedoids] a new instance of KMedoids
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#40
+ def initialize(n_clusters: T.unsafe(nil), metric: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ # @raise [ArgumentError]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#59
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for cluster analysis.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_samples, n_samples]).
+ # @raise [ArgumentError]
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#103
+ def fit_predict(x); end
+
+ # Return the indices of medoids.
+ #
+ # @return [Numo::Int32] (shape: [n_clusters])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#24
+ def medoid_ids; end
+
+ # Predict cluster labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the cluster label.
+ # If the metric is 'precomputed', x must be distances between samples and medoids (shape: [n_samples, n_clusters]).
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#87
+ def predict(x); end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#28
+ def rng; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#121
+ def assign_cluster(distances_to_medoids); end
+
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#117
+ def check_invalid_array_shape(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/k_medoids.rb#125
+ def init_cluster_centers(distance_mat); end
+end
+
+# MeanShift is a class that implements mean-shift clustering with flat kernel.
+#
+# *Reference*
+# - Carreira-Perpinan, M A., "A review of mean-shift algorithms for clustering," arXiv:1503.00687v1.
+# - Sheikh, Y A., Khan, E A., and Kanade, T., "Mode-seeking by Medoidshifts," Proc. ICCV'07, pp. 1--8, 2007.
+# - Vedaldi, A., and Soatto, S., "Quick Shift and Kernel Methods for Mode Seeking," Proc. ECCV'08, pp. 705--718, 2008.
+#
+# @example
+# require 'rumale/clustering/mean_shift'
+#
+# analyzer = Rumale::Clustering::MeanShift.new(bandwidth: 1.5)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/mean_shift.rb#22
+class Rumale::Clustering::MeanShift < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with mean-shift algorithm.
+ #
+ # @param bandwidth [Float] The bandwidth parameter of flat kernel.
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion
+ # @return [MeanShift] a new instance of MeanShift
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mean_shift.rb#34
+ def initialize(bandwidth: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil)); end
+
+ # Return the centroids.
+ #
+ # @return [Numo::DFloat] (shape: [n_clusters, n_features])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mean_shift.rb#27
+ def cluster_centers; end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mean_shift.rb#48
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for cluster analysis.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mean_shift.rb#82
+ def fit_predict(x); end
+
+ # Predict cluster labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the cluster label.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mean_shift.rb#72
+ def predict(x); end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/mean_shift.rb#90
+ def assign_cluster(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/mean_shift.rb#96
+ def connect_components(z); end
+end
+
+# MniBatchKMeans is a class that implements K-Means cluster analysis
+# with mini-batch stochastic gradient descent (SGD).
+#
+# *Reference*
+# - Sculley, D., "Web-scale k-means clustering," Proc. WWW'10, pp. 1177--1178, 2010.
+#
+# @example
+# require 'rumale/clustering/mini_batch_k_means'
+#
+# analyzer = Rumale::Clustering::MiniBatchKMeans.new(n_clusters: 10, max_iter: 50, batch_size: 50, random_seed: 1)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#21
+class Rumale::Clustering::MiniBatchKMeans < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with K-Means method with mini-batch SGD.
+ #
+ # @param n_clusters [Integer] The number of clusters.
+ # @param init [String] The initialization method for centroids ('random' or 'k-means++').
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param batch_size [Integer] The size of the mini batches.
+ # @param tol [Float] The tolerance of termination criterion.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [MiniBatchKMeans] a new instance of MiniBatchKMeans
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#40
+ def initialize(n_clusters: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), batch_size: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the centroids.
+ #
+ # @return [Numo::DFloat] (shape: [n_clusters, n_features])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#26
+ def cluster_centers; end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#58
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for cluster analysis.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#106
+ def fit_predict(x); end
+
+ # Predict cluster labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the cluster label.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#96
+ def predict(x); end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#30
+ def rng; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#114
+ def assign_cluster(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/mini_batch_k_means.rb#119
+ def init_cluster_centers(x, sub_rng); end
+end
+
+# PowerIteration is a class that implements power iteration clustering.
+#
+# *Reference*
+# - Lin, F., and Cohen, W W., "Power Iteration Clustering," Proc. ICML'10, pp. 655--662, 2010.
+#
+# @example
+# require 'rumale/clustering/power_iteration'
+#
+# analyzer = Rumale::Clustering::PowerIteration.new(n_clusters: 10, gamma: 8.0, max_iter: 1000)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#21
+class Rumale::Clustering::PowerIteration < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with power iteration clustering.
+ #
+ # @param n_clusters [Integer] The number of clusters.
+ # @param affinity [String] The representation of affinity matrix ('rbf' or 'precomputed').
+ # @param gamma [Float] The parameter of rbf kernel, if nil it is 1 / n_features.
+ # If affinity = 'precomputed', this parameter is ignored.
+ # @param init [String] The initialization method for centroids of K-Means clustering ('random' or 'k-means++').
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # @param eps [Float] A small value close to zero to avoid zero division error.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [PowerIteration] a new instance of PowerIteration
+ #
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#47
+ def initialize(n_clusters: T.unsafe(nil), affinity: T.unsafe(nil), gamma: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), eps: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the data in embedded space.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#26
+ def embedding; end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ # @raise [ArgumentError]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#68
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for cluster analysis.
+ # If the affinity is 'precomputed', x must be a square affinity matrix (shape: [n_samples, n_samples]).
+ # @raise [ArgumentError]
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#81
+ def fit_predict(x); end
+
+ # Return the cluster labels.
+ #
+ # @return [Numo::Int32] (shape: [n_samples])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#30
+ def labels; end
+
+ # Return the number of iterations run for optimization
+ #
+ # @return [Integer]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#34
+ def n_iter; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#92
+ def check_invalid_array_shape(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#96
+ def embedded_space(affinity_mat, max_iter, tol); end
+
+ # source://rumale-clustering//lib/rumale/clustering/power_iteration.rb#120
+ def line_kmeans_clustering(vec); end
+end
+
+# SNN is a class that implements Shared Nearest Neighbor cluster analysis.
+# The SNN method is a variation of DBSCAN that uses similarity based on k-nearest neighbors as a metric.
+#
+# *Reference*
+# - Ertoz, L., Steinbach, M., and Kumar, V., "Finding Clusters of Different Sizes, Shapes, and Densities in Noisy, High Dimensional Data," Proc. SDM'03, pp. 47--58, 2003.
+# - Houle, M E., Kriegel, H-P., Kroger, P., Schubert, E., and Zimek, A., "Can Shared-Neighbor Distances Defeat the Curse of Dimensionality?," Proc. SSDBM'10, pp. 482--500, 2010.
+#
+# @example
+# require 'rumale/clustering/snn'
+#
+# analyzer = Rumale::Clustering::SNN.new(n_neighbros: 10, eps: 5, min_samples: 5)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/snn.rb#20
+class Rumale::Clustering::SNN < ::Rumale::Clustering::DBSCAN
+ # Create a new cluster analyzer with Shared Neareset Neighbor method.
+ #
+ # @param n_neighbors [Integer] The number of neighbors to be used for finding k-nearest neighbors.
+ # @param eps [Integer] The threshold value for finding connected components based on similarity.
+ # @param min_samples [Integer] The number of neighbor samples to be used for the criterion whether a point is a core point.
+ # @param metric [String] The metric to calculate the distances.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance between points.
+ # If metric is 'precomputed', the fit and fit_transform methods expect to be given a distance matrix.
+ # @return [SNN] a new instance of SNN
+ #
+ # source://rumale-clustering//lib/rumale/clustering/snn.rb#29
+ def initialize(n_neighbors: T.unsafe(nil), eps: T.unsafe(nil), min_samples: T.unsafe(nil), metric: T.unsafe(nil)); end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-clustering//lib/rumale/clustering/snn.rb#44
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be used for cluster analysis.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_samples, n_samples]).
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/snn.rb#53
+ def fit_predict(x); end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/snn.rb#59
+ def calc_pairwise_metrics(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/snn.rb#70
+ def region_query(similarity_arr); end
+end
+
+# SingleLinkage is a class that implements hierarchical cluster analysis with single linakge method.
+# This class is used internally for HDBSCAN.
+#
+# *Reference*
+# - Mullner, D., "Modern hierarchical, agglomerative clustering algorithms," arXiv:1109.2378, 2011.
+#
+# @example
+# require 'rumale/clustering/single_linkage'
+#
+# analyzer = Rumale::Clustering::SingleLinkage.new(n_clusters: 2)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#21
+class Rumale::Clustering::SingleLinkage < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with single linkage algorithm.
+ #
+ # @param n_clusters [Integer] The number of clusters.
+ # @param metric [String] The metric to calculate the distances.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance between points.
+ # If metric is 'precomputed', the fit and fit_transform methods expect to be given a distance matrix.
+ # @return [SingleLinkage] a new instance of SingleLinkage
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#38
+ def initialize(n_clusters: T.unsafe(nil), metric: T.unsafe(nil)); end
+
+ # Analysis clusters with given training data.
+ #
+ # @overload fit
+ # @raise [ArgumentError]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#52
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be used for cluster analysis.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_samples, n_samples]).
+ # @raise [ArgumentError]
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#65
+ def fit_predict(x); end
+
+ # Return the hierarchical structure.
+ #
+ # @return [Array] (shape: [n_samples - 1])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#30
+ def hierarchy; end
+
+ # Return the cluster labels.
+ #
+ # @return [Numo::Int32] (shape: [n_samples])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#26
+ def labels; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#75
+ def check_invalid_array_shape(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#168
+ def descedent_ids(hierarchy_, start_node); end
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#189
+ def flatten(hierarchy_, n_clusters); end
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#136
+ def minimum_spanning_tree(complete_graph); end
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#130
+ def partial_fit(distance_mat); end
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#154
+ def single_linkage_hierarchy(mst); end
+end
+
+# source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#110
+class Rumale::Clustering::SingleLinkage::Node
+ # @return [Node] a new instance of Node
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#115
+ def initialize(x:, y:, weight:, n_elements: T.unsafe(nil)); end
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#123
+ def ==(other); end
+
+ # Returns the value of attribute n_elements.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#112
+ def n_elements; end
+
+ # Returns the value of attribute weight.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#112
+ def weight; end
+
+ # Returns the value of attribute x.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#112
+ def x; end
+
+ # Returns the value of attribute y.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#112
+ def y; end
+end
+
+# source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#80
+class Rumale::Clustering::SingleLinkage::UnionFind
+ # @return [UnionFind] a new instance of UnionFind
+ #
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#81
+ def initialize(n); end
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#98
+ def find(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/single_linkage.rb#88
+ def union(x, y); end
+end
+
+# SpectralClustering is a class that implements the normalized spectral clustering.
+#
+# *Reference*
+# - Ng, A Y., Jordan, M I., and Weiss, Y., "On Spectral Clustering: Analyssi and an algorithm," Proc. NIPS'01, pp. 849--856, 2001.
+# - von Luxburg, U., "A tutorial on spectral clustering," Statistics and Computing, Vol. 17 (4), pp. 395--416, 2007.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/clustering/spectral_clustering'
+#
+# analyzer = Rumale::Clustering::SpectralClustering.new(n_clusters: 10, gamma: 8.0)
+# cluster_labels = analyzer.fit_predict(samples)
+#
+# source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#24
+class Rumale::Clustering::SpectralClustering < ::Rumale::Base::Estimator
+ include ::Rumale::Base::ClusterAnalyzer
+
+ # Create a new cluster analyzer with normalized spectral clustering.
+ #
+ # @param n_clusters [Integer] The number of clusters.
+ # @param affinity [String] The representation of affinity matrix ('rbf' or 'precomputed').
+ # If affinity = 'rbf', the class performs the normalized spectral clustering with the fully connected graph weighted by rbf kernel.
+ # @param gamma [Float] The parameter of rbf kernel, if nil it is 1 / n_features.
+ # If affinity = 'precomputed', this parameter is ignored.
+ # @param init [String] The initialization method for centroids of K-Means clustering ('random' or 'k-means++').
+ # @param max_iter [Integer] The maximum number of iterations for K-Means clustering.
+ # @param tol [Float] The tolerance of termination criterion for K-Means clustering.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [SpectralClustering] a new instance of SpectralClustering
+ #
+ # source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#46
+ def initialize(n_clusters: T.unsafe(nil), affinity: T.unsafe(nil), gamma: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the data in embedded space.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples, n_clusters])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#29
+ def embedding; end
+
+ # Analysis clusters with given training data.
+ # To execute this method, Numo::Linalg must be loaded.
+ #
+ # @overload fit
+ # @raise [ArgumentError]
+ #
+ # source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#66
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Analysis clusters and assign samples to clusters.
+ # To execute this method, Numo::Linalg must be loaded.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for cluster analysis.
+ # If the metric is 'precomputed', x must be a square affinity matrix (shape: [n_samples, n_samples]).
+ # @raise [ArgumentError]
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted cluster label per sample.
+ #
+ # source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#82
+ def fit_predict(x); end
+
+ # Return the cluster labels.
+ #
+ # @return [Numo::Int32] (shape: [n_samples])
+ #
+ # source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#33
+ def labels; end
+
+ private
+
+ # source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#98
+ def check_invalid_array_shape(x); end
+
+ # source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#102
+ def embedded_space(affinity_mat, n_clusters); end
+
+ # source://rumale-clustering//lib/rumale/clustering/spectral_clustering.rb#112
+ def kmeans_clustering(x); end
+end
+
+# source://rumale-clustering//lib/rumale/clustering/version.rb#8
+Rumale::Clustering::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-core@1.0.0.rbi b/sorbet/rbi/gems/rumale-core@1.0.0.rbi
new file mode 100644
index 00000000..27bf501f
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-core@1.0.0.rbi
@@ -0,0 +1,575 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-core` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-core`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-core//lib/rumale/core/version.rb#4
+module Rumale; end
+
+# This module consists of basic mix-in classes.
+#
+# source://rumale-core//lib/rumale/base/estimator.rb#7
+module Rumale::Base; end
+
+# Module for all classifiers in Rumale.
+#
+# source://rumale-core//lib/rumale/base/classifier.rb#10
+module Rumale::Base::Classifier
+ # An abstract method for fitting a model.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/classifier.rb#12
+ def fit; end
+
+ # An abstract method for predicting labels.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/classifier.rb#17
+ def predict; end
+
+ # Calculate the mean accuracy of the given testing data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) Testing data.
+ # @param y [Numo::Int32] (shape: [n_samples]) True labels for testing data.
+ # @return [Float] Mean accuracy
+ #
+ # source://rumale-core//lib/rumale/base/classifier.rb#26
+ def score(x, y); end
+end
+
+# Module for all clustering algorithms in Rumale.
+#
+# source://rumale-core//lib/rumale/base/cluster_analyzer.rb#8
+module Rumale::Base::ClusterAnalyzer
+ # An abstract method for analyzing clusters and predicting cluster indices.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/cluster_analyzer.rb#10
+ def fit_predict; end
+
+ # Calculate purity of clustering result.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) Testing data.
+ # @param y [Numo::Int32] (shape: [n_samples]) True labels for testing data.
+ # @return [Float] Purity
+ #
+ # source://rumale-core//lib/rumale/base/cluster_analyzer.rb#19
+ def score(x, y); end
+end
+
+# Base class for all estimators in Rumale.
+#
+# source://rumale-core//lib/rumale/base/estimator.rb#9
+class Rumale::Base::Estimator
+ # Return parameters about an estimator.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-core//lib/rumale/base/estimator.rb#12
+ def params; end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-core//lib/rumale/base/estimator.rb#16
+ def enable_linalg?(warning: T.unsafe(nil)); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-core//lib/rumale/base/estimator.rb#34
+ def enable_parallel?(warning: T.unsafe(nil)); end
+
+ # source://rumale-core//lib/rumale/base/estimator.rb#47
+ def n_processes; end
+
+ # source://rumale-core//lib/rumale/base/estimator.rb#53
+ def parallel_map(n_outputs, &block); end
+end
+
+# Module for all evaluation measures in Rumale.
+#
+# source://rumale-core//lib/rumale/base/evaluator.rb#8
+module Rumale::Base::Evaluator
+ # An abstract method for evaluation of model.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/evaluator.rb#10
+ def score; end
+end
+
+# Module for all regressors in Rumale.
+#
+# source://rumale-core//lib/rumale/base/regressor.rb#8
+module Rumale::Base::Regressor
+ # An abstract method for fitting a model.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/regressor.rb#10
+ def fit; end
+
+ # An abstract method for predicting labels.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/regressor.rb#15
+ def predict; end
+
+ # Calculate the coefficient of determination for the given testing data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) Testing data.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) Target values for testing data.
+ # @return [Float] Coefficient of determination
+ #
+ # source://rumale-core//lib/rumale/base/regressor.rb#24
+ def score(x, y); end
+end
+
+# Module for all validation methods in Rumale.
+#
+# source://rumale-core//lib/rumale/base/splitter.rb#8
+module Rumale::Base::Splitter
+ # Return the number of splits.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-core//lib/rumale/base/splitter.rb#11
+ def n_splits; end
+
+ # An abstract method for splitting dataset.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/splitter.rb#14
+ def split; end
+end
+
+# Module for all transfomers in Rumale.
+#
+# source://rumale-core//lib/rumale/base/transformer.rb#8
+module Rumale::Base::Transformer
+ # An abstract method for fitting a model.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/transformer.rb#10
+ def fit; end
+
+ # An abstract method for fitting a model and transforming given data.
+ #
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-core//lib/rumale/base/transformer.rb#15
+ def fit_transform; end
+end
+
+# source://rumale-core//lib/rumale/core/version.rb#6
+module Rumale::Core; end
+
+# source://rumale-core//lib/rumale/core/version.rb#8
+Rumale::Core::VERSION = T.let(T.unsafe(nil), String)
+
+# Module for loading and saving a dataset file.
+#
+# source://rumale-core//lib/rumale/dataset.rb#9
+module Rumale::Dataset
+ class << self
+ # Dump the dataset with the libsvm file format.
+ #
+ # @param data [Numo::NArray] (shape: [n_samples, n_features]) matrix consisting of feature vectors.
+ # @param labels [Numo::NArray] (shape: [n_samples]) matrix consisting of labels or target values.
+ # @param filename [String] A path to the output libsvm file.
+ # @param zero_based [Boolean] Whether the column index starts from 0 (true) or 1 (false).
+ #
+ # source://rumale-core//lib/rumale/dataset.rb#43
+ def dump_libsvm_file(data, labels, filename, zero_based: T.unsafe(nil)); end
+
+ # Load a dataset with the libsvm file format into Numo::NArray.
+ #
+ # @param filename [String] A path to a dataset file.
+ # @param n_features [Integer/Nil] The number of features of data to load.
+ # If nil is given, it will be detected automatically from given file.
+ # @param zero_based [Boolean] Whether the column index starts from 0 (true) or 1 (false).
+ # @param dtype [Numo::NArray] Data type of Numo::NArray for features to be loaded.
+ # @return [Array] Returns array containing the (n_samples x n_features) matrix for feature vectors
+ # and (n_samples) vector for labels or target values.
+ #
+ # source://rumale-core//lib/rumale/dataset.rb#22
+ def load_libsvm_file(filename, n_features: T.unsafe(nil), zero_based: T.unsafe(nil), dtype: T.unsafe(nil)); end
+
+ # Generate Gaussian blobs.
+ #
+ # @param n_samples [Integer] The total number of samples.
+ # @param n_features [Integer] The number of features.
+ # If "centers" parameter is given as a Numo::DFloat array, this parameter is ignored.
+ # @param centers [Integer/Numo::DFloat/Nil] The number of cluster centroids or the fixed cluster centroids.
+ # If nil is given, the number of cluster centroids is set to 3.
+ # @param cluster_std [Float] The standard deviation of the clusters.
+ # @param center_box [Array] The bounding box for each cluster centroids.
+ # If "centers" parameter is given as a Numo::DFloat array, this parameter is ignored.
+ # @param shuffle [Boolean] The flag indicating whether to shuffle the dataset
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ #
+ # source://rumale-core//lib/rumale/dataset.rb#134
+ def make_blobs(n_samples = T.unsafe(nil), n_features = T.unsafe(nil), centers: T.unsafe(nil), cluster_std: T.unsafe(nil), center_box: T.unsafe(nil), shuffle: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Generate a two-dimensional data set consisting of an inner circle and an outer circle.
+ #
+ # @param n_samples [Integer] The number of samples.
+ # @param shuffle [Boolean] The flag indicating whether to shuffle the dataset
+ # @param noise [Float] The standard deviaion of gaussian noise added to the data.
+ # If nil is given, no noise is added.
+ # @param factor [Float] The scale factor between inner and outer circles. The interval of factor is (0, 1).
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ #
+ # source://rumale-core//lib/rumale/dataset.rb#65
+ def make_circles(n_samples, shuffle: T.unsafe(nil), noise: T.unsafe(nil), factor: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Generate a two-dimensional data set consisting of two half circles shifted.
+ #
+ # @param n_samples [Integer] The number of samples.
+ # @param shuffle [Boolean] The flag indicating whether to shuffle the dataset
+ # @param noise [Float] The standard deviaion of gaussian noise added to the data.
+ # If nil is given, no noise is added.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ #
+ # source://rumale-core//lib/rumale/dataset.rb#97
+ def make_moons(n_samples, shuffle: T.unsafe(nil), noise: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ private
+
+ # source://rumale-core//lib/rumale/dataset.rb#196
+ def convert_to_matrix(data, n_features, dtype); end
+
+ # source://rumale-core//lib/rumale/dataset.rb#206
+ def detect_dtype(data); end
+
+ # source://rumale-core//lib/rumale/dataset.rb#224
+ def dump_label(label, label_type_str); end
+
+ # source://rumale-core//lib/rumale/dataset.rb#215
+ def dump_libsvm_line(label, ftvec, label_type, value_type, zero_based); end
+
+ # source://rumale-core//lib/rumale/dataset.rb#191
+ def parse_label(label); end
+
+ # source://rumale-core//lib/rumale/dataset.rb#176
+ def parse_libsvm_line(line, zero_based); end
+ end
+end
+
+# Module for calculating pairwise distances, similarities, and kernels.
+#
+# source://rumale-core//lib/rumale/pairwise_metric.rb#7
+module Rumale::PairwiseMetric
+ private
+
+ # Calculate the pairwise cosine distances between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#74
+ def cosine_distance(x, y = T.unsafe(nil)); end
+
+ # Calculate the pairwise cosine simlarities between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#55
+ def cosine_similarity(x, y = T.unsafe(nil)); end
+
+ # Calculate the pairwise euclidean distances between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#15
+ def euclidean_distance(x, y = T.unsafe(nil)); end
+
+ # Calculate the linear kernel between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#97
+ def linear_kernel(x, y = T.unsafe(nil)); end
+
+ # Calculate the pairwise manhattan distances between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#24
+ def manhattan_distance(x, y = T.unsafe(nil)); end
+
+ # Calculate the polynomial kernel between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @param degree [Integer] The parameter of polynomial kernel.
+ # @param gamma [Float] The parameter of polynomial kernel, if nil it is 1 / n_features.
+ # @param coef [Integer] The parameter of polynomial kernel.
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#110
+ def polynomial_kernel(x, y = T.unsafe(nil), degree = T.unsafe(nil), gamma = T.unsafe(nil), coef = T.unsafe(nil)); end
+
+ # Calculate the rbf kernel between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @param gamma [Float] The parameter of rbf kernel, if nil it is 1 / n_features.
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#86
+ def rbf_kernel(x, y = T.unsafe(nil), gamma = T.unsafe(nil)); end
+
+ # Calculate the sigmoid kernel between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @param gamma [Float] The parameter of polynomial kernel, if nil it is 1 / n_features.
+ # @param coef [Integer] The parameter of polynomial kernel.
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#123
+ def sigmoid_kernel(x, y = T.unsafe(nil), gamma = T.unsafe(nil), coef = T.unsafe(nil)); end
+
+ # Calculate the pairwise squared errors between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#40
+ def squared_error(x, y = T.unsafe(nil)); end
+
+ class << self
+ # Calculate the pairwise cosine distances between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#74
+ def cosine_distance(x, y = T.unsafe(nil)); end
+
+ # Calculate the pairwise cosine simlarities between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#55
+ def cosine_similarity(x, y = T.unsafe(nil)); end
+
+ # Calculate the pairwise euclidean distances between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#15
+ def euclidean_distance(x, y = T.unsafe(nil)); end
+
+ # Calculate the linear kernel between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#97
+ def linear_kernel(x, y = T.unsafe(nil)); end
+
+ # Calculate the pairwise manhattan distances between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#24
+ def manhattan_distance(x, y = T.unsafe(nil)); end
+
+ # Calculate the polynomial kernel between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @param degree [Integer] The parameter of polynomial kernel.
+ # @param gamma [Float] The parameter of polynomial kernel, if nil it is 1 / n_features.
+ # @param coef [Integer] The parameter of polynomial kernel.
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#110
+ def polynomial_kernel(x, y = T.unsafe(nil), degree = T.unsafe(nil), gamma = T.unsafe(nil), coef = T.unsafe(nil)); end
+
+ # Calculate the rbf kernel between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @param gamma [Float] The parameter of rbf kernel, if nil it is 1 / n_features.
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#86
+ def rbf_kernel(x, y = T.unsafe(nil), gamma = T.unsafe(nil)); end
+
+ # Calculate the sigmoid kernel between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @param gamma [Float] The parameter of polynomial kernel, if nil it is 1 / n_features.
+ # @param coef [Integer] The parameter of polynomial kernel.
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#123
+ def sigmoid_kernel(x, y = T.unsafe(nil), gamma = T.unsafe(nil), coef = T.unsafe(nil)); end
+
+ # Calculate the pairwise squared errors between x and y.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
+ # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
+ # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
+ #
+ # source://rumale-core//lib/rumale/pairwise_metric.rb#40
+ def squared_error(x, y = T.unsafe(nil)); end
+ end
+end
+
+# Module for calculating posterior class probabilities with SVM outputs.
+# This module is used for internal processes.
+#
+# *Reference*
+# - Platt, J C., "Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods," Adv. Large Margin Classifiers, pp. 61--74, 2000.
+# - Lin, H-T., Lin, C-J., and Weng, R C., "A Note on Platt's Probabilistic Outputs for Support Vector Machines," J. Machine Learning, Vol. 63 (3), pp. 267--276, 2007.
+#
+# @example
+# estimator = Rumale::LinearModel::SVC.new
+# estimator.fit(x, bin_y)
+# df = estimator.decision_function(x)
+# params = Rumale::ProbabilisticOutput.fit_sigmoid(df, bin_y)
+# probs = 1 / (Numo::NMath.exp(params[0] * df + params[1]) + 1)
+#
+# source://rumale-core//lib/rumale/probabilistic_output.rb#19
+module Rumale::ProbabilisticOutput
+ class << self
+ # Fit the probabilistic model for binary SVM outputs.
+ #
+ # @param df [Numo::DFloat] (shape: [n_samples]) The outputs of decision function to be used for fitting the model.
+ # @param bin_y [Numo::Int32] (shape: [n_samples]) The binary labels to be used for fitting the model.
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param min_step [Float] The minimum step of Newton's method.
+ # @param sigma [Float] The parameter to avoid hessian matrix from becoming singular matrix.
+ # @return [Numo::DFloat] (shape: 2) The parameters of the model.
+ #
+ # source://rumale-core//lib/rumale/probabilistic_output.rb#29
+ def fit_sigmoid(df, bin_y, max_iter = T.unsafe(nil), min_step = T.unsafe(nil), sigma = T.unsafe(nil)); end
+
+ private
+
+ # source://rumale-core//lib/rumale/probabilistic_output.rb#109
+ def directions(grad_vec, hess_mat); end
+
+ # source://rumale-core//lib/rumale/probabilistic_output.rb#76
+ def error_function(target_probs, df, alpha, beta); end
+
+ # source://rumale-core//lib/rumale/probabilistic_output.rb#96
+ def gradient(target_probs, probs, df); end
+
+ # source://rumale-core//lib/rumale/probabilistic_output.rb#101
+ def hessian_matrix(probs, df, sigma); end
+
+ # source://rumale-core//lib/rumale/probabilistic_output.rb#86
+ def predicted_probs(df, alpha, beta); end
+ end
+end
+
+# source://rumale-core//lib/rumale/utils.rb#7
+module Rumale::Utils
+ private
+
+ # source://rumale-core//lib/rumale/utils.rb#45
+ def binarize_labels(labels); end
+
+ # source://rumale-core//lib/rumale/utils.rb#11
+ def choice_ids(size, probs, rng = T.unsafe(nil)); end
+
+ # source://rumale-core//lib/rumale/utils.rb#56
+ def normalize(x, norm); end
+
+ # source://rumale-core//lib/rumale/utils.rb#37
+ def rand_normal(shape, rng = T.unsafe(nil), mu = T.unsafe(nil), sigma = T.unsafe(nil)); end
+
+ # source://rumale-core//lib/rumale/utils.rb#26
+ def rand_uniform(shape, rng = T.unsafe(nil)); end
+
+ class << self
+ # source://rumale-core//lib/rumale/utils.rb#45
+ def binarize_labels(labels); end
+
+ # source://rumale-core//lib/rumale/utils.rb#11
+ def choice_ids(size, probs, rng = T.unsafe(nil)); end
+
+ # source://rumale-core//lib/rumale/utils.rb#56
+ def normalize(x, norm); end
+
+ # source://rumale-core//lib/rumale/utils.rb#37
+ def rand_normal(shape, rng = T.unsafe(nil), mu = T.unsafe(nil), sigma = T.unsafe(nil)); end
+
+ # source://rumale-core//lib/rumale/utils.rb#26
+ def rand_uniform(shape, rng = T.unsafe(nil)); end
+ end
+end
+
+# source://rumale-core//lib/rumale/validation.rb#5
+module Rumale::Validation
+ private
+
+ # source://rumale-core//lib/rumale/validation.rb#17
+ def check_convert_label_array(y); end
+
+ # source://rumale-core//lib/rumale/validation.rb#9
+ def check_convert_sample_array(x); end
+
+ # source://rumale-core//lib/rumale/validation.rb#25
+ def check_convert_target_value_array(y); end
+
+ # source://rumale-core//lib/rumale/validation.rb#33
+ def check_sample_size(x, y); end
+
+ class << self
+ # @raise [ArgumentError]
+ #
+ # source://rumale-core//lib/rumale/validation.rb#17
+ def check_convert_label_array(y); end
+
+ # @raise [ArgumentError]
+ #
+ # source://rumale-core//lib/rumale/validation.rb#9
+ def check_convert_sample_array(x); end
+
+ # @raise [ArgumentError]
+ #
+ # source://rumale-core//lib/rumale/validation.rb#25
+ def check_convert_target_value_array(y); end
+
+ # @raise [ArgumentError]
+ #
+ # source://rumale-core//lib/rumale/validation.rb#33
+ def check_sample_size(x, y); end
+ end
+end
diff --git a/sorbet/rbi/gems/rumale-decomposition@1.0.0.rbi b/sorbet/rbi/gems/rumale-decomposition@1.0.0.rbi
new file mode 100644
index 00000000..260a5761
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-decomposition@1.0.0.rbi
@@ -0,0 +1,494 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-decomposition` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-decomposition`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#7
+module Rumale; end
+
+# Module for matrix decomposition algorithms.
+#
+# source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#8
+module Rumale::Decomposition; end
+
+# FactorAnalysis is a class that implements fator analysis with EM algorithm.
+#
+# *Reference*
+# - Barber, D., "Bayesian Reasoning and Machine Learning," Cambridge University Press, 2012.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/decomposition/factor_analysis'
+#
+# decomposer = Rumale::Decomposition::FactorAnalysis.new(n_components: 2)
+# representaion = decomposer.fit_transform(samples)
+#
+# source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#20
+class Rumale::Decomposition::FactorAnalysis < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with factor analysis.
+ #
+ # @param n_components [Integer] The number of components (dimensionality of latent space).
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float/Nil] The tolerance of termination criterion for EM algorithm.
+ # If nil is given, iterate EM steps up to the maximum number of iterations.
+ # @return [FactorAnalysis] a new instance of FactorAnalysis
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#49
+ def initialize(n_components: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil)); end
+
+ # Returns the components with maximum variance.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#33
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#63
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#107
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Returns the log likelihood at each iteration.
+ #
+ # @return [Numo::DFloat] (shape: [n_iter])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#37
+ def loglike; end
+
+ # Returns the mean vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#25
+ def mean; end
+
+ # Return the number of iterations run for optimization
+ #
+ # @return [Integer]
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#41
+ def n_iter; end
+
+ # Returns the estimated noise variance for each feature.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#29
+ def noise_variance; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#118
+ def transform(x); end
+
+ private
+
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#131
+ def log_likelihood(cov_mat, factors, noise_vars); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/factor_analysis.rb#137
+ def truncate_svd(x, k); end
+end
+
+# FastICA is a class that implments Fast Independent Component Analaysis.
+#
+# *Reference*
+# - Hyvarinen, A., "Fast and Robust Fixed-Point Algorithms for Independent Component Analysis," IEEE Trans. Neural Networks, Vol. 10 (3), pp. 626--634, 1999.
+# - Hyvarinen, A., and Oja, E., "Independent Component Analysis: Algorithms and Applications," Neural Networks, Vol. 13 (4-5), pp. 411--430, 2000.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/decomposition/fast_ica'
+#
+# transformer = Rumale::Decomposition::FastICA.new(n_components: 2, random_seed: 1)
+# source_data = transformer.fit_transform(observed_data)
+#
+# source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#22
+class Rumale::Decomposition::FastICA < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with FastICA.
+ #
+ # @param n_components [Integer] The number of independent components.
+ # @param whiten [Boolean] The flag indicating whether to perform whitening.
+ # @param fun [String] The type of contrast function ('logcosh', 'exp', or 'cube').
+ # @param alpha [Float] The parameter of contrast function for 'logcosh' and 'exp'.
+ # If fun = 'cube', this parameter is ignored.
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [FastICA] a new instance of FastICA
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#51
+ def initialize(n_components: T.unsafe(nil), whiten: T.unsafe(nil), fun: T.unsafe(nil), alpha: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Returns the unmixing matrix.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#27
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ # @return [FastICA] The learned transformer itself.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#70
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#91
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Inverse transform the given transformed data with the learned model.
+ #
+ # @param z [Numo::DFloat] (shape: [n_samples, n_components]) The source data reconstructed to the mixed data.
+ # @return [Numo::DFloat] (shape: [n_samples, n_featuress]) The mixed data.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#113
+ def inverse_transform(z); end
+
+ # Returns the mixing matrix.
+ #
+ # @return [Numo::DFloat] (shape: [n_features, n_components])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#31
+ def mixing; end
+
+ # Returns the number of iterations when converged.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#35
+ def n_iter; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#39
+ def rng; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#102
+ def transform(x); end
+
+ private
+
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#148
+ def decorrelation(w); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#179
+ def grad_cube(x); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#171
+ def grad_exp(x, alpha); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#165
+ def grad_logcosh(x, alpha); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#154
+ def gradient(x, func); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#133
+ def ica(x, fun, max_iter, tol, sub_rng); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/fast_ica.rb#124
+ def whitening(x, n_components); end
+end
+
+# NMF is a class that implements Non-negative Matrix Factorization.
+#
+# *Reference*
+# - Xu, W., Liu, X., and Gong, Y., "Document Clustering Based On Non-negative Matrix Factorization," Proc. SIGIR' 03 , pp. 267--273, 2003.
+#
+# @example
+# require 'rumale/decomposition/nmf'
+#
+# decomposer = Rumale::Decomposition::NMF.new(n_components: 2)
+# representaion = decomposer.fit_transform(samples)
+#
+# source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#20
+class Rumale::Decomposition::NMF < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with NMF.
+ #
+ # @param n_components [Integer] The number of components.
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # @param eps [Float] A small value close to zero to avoid zero division error.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [NMF] a new instance of NMF
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#38
+ def initialize(n_components: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), eps: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Returns the factorization matrix.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#25
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#55
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#67
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Inverse transform the given transformed data with the learned model.
+ #
+ # @param z [Numo::DFloat] (shape: [n_samples, n_components]) The data to be restored into original space with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_featuress]) The restored data.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#87
+ def inverse_transform(z); end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#29
+ def rng; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#77
+ def transform(x); end
+
+ private
+
+ # source://rumale-decomposition//lib/rumale/decomposition/nmf.rb#95
+ def partial_fit(x, update_comps: T.unsafe(nil)); end
+end
+
+# PCA is a class that implements Principal Component Analysis.
+#
+# *Reference*
+# - Sharma, A., and Paliwal, K K., "Fast principal component analysis using fixed-point algorithm," Pattern Recognition Letters, 28, pp. 1151--1155, 2007.
+#
+# @example
+# require 'rumale/decomposition/pca'
+#
+# decomposer = Rumale::Decomposition::PCA.new(n_components: 2, solver: 'fpt')
+# representaion = decomposer.fit_transform(samples)
+#
+# # If Numo::Linalg is installed, you can specify 'evd' for the solver option.
+# require 'numo/linalg/autoloader'
+# require 'rumale/decomposition/pca'
+#
+# decomposer = Rumale::Decomposition::PCA.new(n_components: 2, solver: 'evd')
+# representaion = decomposer.fit_transform(samples)
+#
+# # If Numo::Linalg is loaded and the solver option is not given,
+# # the solver option is choosen 'evd' automatically.
+# decomposer = Rumale::Decomposition::PCA.new(n_components: 2)
+# representaion = decomposer.fit_transform(samples)
+#
+# source://rumale-decomposition//lib/rumale/decomposition/pca.rb#33
+class Rumale::Decomposition::PCA < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with PCA.
+ #
+ # @param n_components [Integer] The number of principal components.
+ # @param solver [String] The algorithm for the optimization ('auto', 'fpt' or 'evd').
+ # 'auto' chooses the 'evd' solver if Numo::Linalg is loaded. Otherwise, it chooses the 'fpt' solver.
+ # 'fpt' uses the fixed-point algorithm.
+ # 'evd' performs eigen value decomposition of the covariance matrix of samples.
+ # @param max_iter [Integer] The maximum number of iterations. If solver = 'evd', this parameter is ignored.
+ # @param tol [Float] The tolerance of termination criterion. If solver = 'evd', this parameter is ignored.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [PCA] a new instance of PCA
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#58
+ def initialize(n_components: T.unsafe(nil), solver: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Returns the principal components.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#38
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#76
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#112
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Inverse transform the given transformed data with the learned model.
+ #
+ # @param z [Numo::DFloat] (shape: [n_samples, n_components]) The data to be restored into original space with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_featuress]) The restored data.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#132
+ def inverse_transform(z); end
+
+ # Returns the mean vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#42
+ def mean; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#46
+ def rng; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#122
+ def transform(x); end
+
+ private
+
+ # source://rumale-decomposition//lib/rumale/decomposition/pca.rb#141
+ def orthogonalize(pcvec); end
+end
+
+# SparsePCA is a class that implements Sparse Principal Component Analysis.
+#
+# *Reference*
+# - Macky, L., "Deflation Methods for Sparse PCA," Advances in NIPS'08, pp. 1017--1024, 2008.
+# - Hein, M. and Bühler, T., "An Inverse Power Method for Nonlinear Eigenproblems with Applications in 1-Spectral Clustering and Sparse PCA," Advances in NIPS'10, pp. 847--855, 2010.
+#
+# @example
+# require 'numo/tiny_linalg'
+# Numo::Linalg = Numo::TinyLinalg
+#
+# require 'rumale/decomposition/sparse_pca'
+#
+# decomposer = Rumale::Decomposition::SparsePCA.new(n_components: 2, reg_param: 0.1)
+# representaion = decomposer.fit_transform(samples)
+# sparse_components = decomposer.components
+#
+# source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#25
+class Rumale::Decomposition::SparsePCA < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with Sparse PCA.
+ #
+ # @param n_components [Integer] The number of principal components.
+ # @param reg_param [Float] The regularization parameter (interval: [0, 1]).
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [SparsePCA] a new instance of SparsePCA
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#47
+ def initialize(n_components: T.unsafe(nil), reg_param: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Returns the principal components.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#30
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#67
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#90
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Returns the mean vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#34
+ def mean; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#38
+ def rng; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#100
+ def transform(x); end
+
+ private
+
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#142
+ def coeff_numerator(f); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#153
+ def norm(v, ord); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#108
+ def partial_fit(x); end
+
+ # source://rumale-decomposition//lib/rumale/decomposition/sparse_pca.rb#146
+ def sign(v); end
+end
+
+# source://rumale-decomposition//lib/rumale/decomposition/version.rb#8
+Rumale::Decomposition::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-ensemble@1.0.0.rbi b/sorbet/rbi/gems/rumale-ensemble@1.0.0.rbi
new file mode 100644
index 00000000..fb0a28f9
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-ensemble@1.0.0.rbi
@@ -0,0 +1,1453 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-ensemble` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-ensemble`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-ensemble//lib/rumale/ensemble/version.rb#4
+module Rumale; end
+
+# This module consists of the classes that implement ensemble-based methods.
+#
+# source://rumale-ensemble//lib/rumale/ensemble/version.rb#6
+module Rumale::Ensemble; end
+
+# AdaBoostClassifier is a class that implements AdaBoost (SAMME.R) for classification.
+# This class uses decision tree for a weak learner.
+#
+# *Reference*
+# - Zhu, J., Rosset, S., Zou, H., and Hashie, T., "Multi-class AdaBoost," Technical Report No. 430, Department of Statistics, University of Michigan, 2005.
+#
+# @example
+# require 'rumale/ensemble/ada_boost_classifier'
+#
+# estimator =
+# Rumale::Ensemble::AdaBoostClassifier.new(
+# n_estimators: 10, criterion: 'gini', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#26
+class Rumale::Ensemble::AdaBoostClassifier < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with AdaBoost.
+ #
+ # @param n_estimators [Integer] The numeber of decision trees for contructing AdaBoost classifier.
+ # @param criterion [String] The function to evalue spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [AdaBoostClassifier] a new instance of AdaBoostClassifier
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#58
+ def initialize(n_estimators: T.unsafe(nil), criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#35
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#136
+ def decision_function(x); end
+
+ # Return the set of estimators.
+ #
+ # @return [Array]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#31
+ def estimators; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#39
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [AdaBoostClassifier] The learned classifier itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#79
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#153
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#165
+ def predict_proba(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_classifier.rb#43
+ def rng; end
+end
+
+# AdaBoostRegressor is a class that implements AdaBoost for regression.
+# This class uses decision tree for a weak learner.
+#
+# *Reference*
+# - Shrestha, D. L., and Solomatine, D. P., "Experiments with AdaBoost.RT, an Improved Boosting Scheme for Regression," Neural Computation 18 (7), pp. 1678--1710, 2006.
+#
+# @example
+# require 'rumale/ensemble/ada_boost_regressor'
+#
+# estimator =
+# Rumale::Ensemble::AdaBoostRegressor.new(
+# n_estimators: 10, criterion: 'mse', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/ada_boost_regressor.rb#26
+class Rumale::Ensemble::AdaBoostRegressor < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with random forest.
+ #
+ # @param n_estimators [Integer] The numeber of decision trees for contructing AdaBoost regressor.
+ # @param threshold [Float] The threshold for delimiting correct and incorrect predictions. That is constrained to [0, 1]
+ # @param exponent [Float] The exponent for the weight of each weak learner.
+ # @param criterion [String] The function to evalue spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [AdaBoostRegressor] a new instance of AdaBoostRegressor
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_regressor.rb#60
+ def initialize(n_estimators: T.unsafe(nil), threshold: T.unsafe(nil), exponent: T.unsafe(nil), criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the weight for each weak learner.
+ #
+ # @return [Numo::DFloat] (size: n_estimates)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_regressor.rb#35
+ def estimator_weights; end
+
+ # Return the set of estimators.
+ #
+ # @return [Array]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_regressor.rb#31
+ def estimators; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_regressor.rb#39
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples]) The target values to be used for fitting the model.
+ # @return [AdaBoostRegressor] The learned regressor itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_regressor.rb#83
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted value per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_regressor.rb#154
+ def predict(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/ada_boost_regressor.rb#43
+ def rng; end
+end
+
+# ExtraTreesClassifier is a class that implements extremely randomized trees for classification.
+# The algorithm of extremely randomized trees is similar to random forest.
+# The features of the algorithm of extremely randomized trees are
+# not to apply the bagging procedure and to randomly select the threshold for splitting feature space.
+#
+# *Reference*
+# - Geurts, P., Ernst, D., and Wehenkel, L., "Extremely randomized trees," Machine Learning, vol. 63 (1), pp. 3--42, 2006.
+#
+# @example
+# require 'rumale/ensemble/extra_trees_classifier'
+#
+# estimator =
+# Rumale::Ensemble::ExtraTreesClassifier.new(
+# n_estimators: 10, criterion: 'gini', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#26
+class Rumale::Ensemble::ExtraTreesClassifier < ::Rumale::Ensemble::RandomForestClassifier
+ # Create a new classifier with extremely randomized trees.
+ #
+ # @param n_estimators [Integer] The numeber of trees for contructing extremely randomized trees.
+ # @param criterion [String] The function to evalue spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, extra tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on extra tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers 'Math.sqrt(n_features)' features.
+ # @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
+ # If nil is given, the method does not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [ExtraTreesClassifier] a new instance of ExtraTreesClassifier
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#60
+ def initialize(n_estimators: T.unsafe(nil), criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples, n_estimators]) Leaf index for sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#123
+ def apply(x); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#33
+ def classes; end
+
+ # Return the set of estimators.
+ #
+ # @return [Array]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#29
+ def estimators; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#37
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [ExtraTreesClassifier] The learned classifier itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#71
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#103
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#113
+ def predict_proba(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#41
+ def rng; end
+
+ private
+
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_classifier.rb#131
+ def plant_tree(rnd_seed); end
+end
+
+# ExtraTreesRegressor is a class that implements extremely randomized trees for regression
+# The algorithm of extremely randomized trees is similar to random forest.
+# The features of the algorithm of extremely randomized trees are
+# not to apply the bagging procedure and to randomly select the threshold for splitting feature space.
+#
+# *Reference*
+# - Geurts, P., Ernst, D., and Wehenkel, L., "Extremely randomized trees," Machine Learning, vol. 63 (1), pp. 3--42, 2006.
+#
+# @example
+# @require 'rumale/ensemble/extra_trees_regressor'
+#
+# estimator =
+# Rumale::Ensemble::ExtraTreesRegressor.new(
+# n_estimators: 10, criterion: 'mse', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#26
+class Rumale::Ensemble::ExtraTreesRegressor < ::Rumale::Ensemble::RandomForestRegressor
+ # Create a new regressor with extremely randomized trees.
+ #
+ # @param n_estimators [Integer] The numeber of trees for contructing extremely randomized trees.
+ # @param criterion [String] The function to evalue spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, extra tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on extra tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers 'Math.sqrt(n_features)' features.
+ # @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [ExtraTreesRegressor] a new instance of ExtraTreesRegressor
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#56
+ def initialize(n_estimators: T.unsafe(nil), criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to assign each leaf.
+ # @return [Numo::Int32] (shape: [n_samples, n_estimators]) Leaf index for sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#108
+ def apply(x); end
+
+ # Return the set of estimators.
+ #
+ # @return [Array]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#29
+ def estimators; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#33
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [ExtraTreesRegressor] The learned regressor itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#67
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted value per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#98
+ def predict(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#37
+ def rng; end
+
+ private
+
+ # source://rumale-ensemble//lib/rumale/ensemble/extra_trees_regressor.rb#116
+ def plant_tree(rnd_seed); end
+end
+
+# GradientBoostingClassifier is a class that implements gradient tree boosting for classification.
+# The class use negative binomial log-likelihood for the loss function.
+# For multiclass classification problem, it uses one-vs-the-rest strategy.
+#
+# *Reference*
+# - Friedman, J H., "Greedy Function Approximation: A Gradient Boosting Machine," Annals of Statistics, 29 (5), pp. 1189--1232, 2001.
+# - Friedman, J H., "Stochastic Gradient Boosting," Computational Statistics and Data Analysis, 38 (4), pp. 367--378, 2002.
+# - Chen, T., and Guestrin, C., "XGBoost: A Scalable Tree Boosting System," Proc. KDD'16, pp. 785--794, 2016.
+#
+# @example
+# require 'rumale/ensemble/gradient_boosting_classifier'
+#
+# estimator =
+# Rumale::Ensemble::GradientBoostingClassifier.new(
+# n_estimators: 100, learning_rate: 0.3, reg_lambda: 0.001, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#29
+class Rumale::Ensemble::GradientBoostingClassifier < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with gradient tree boosting.
+ #
+ # @param n_estimators [Integer] The numeber of trees for contructing classifier.
+ # @param learning_rate [Float] The boosting learining rate
+ # @param reg_lambda [Float] The L2 regularization term on weight.
+ # @param subsample [Float] The subsampling ratio of the training samples.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [GradientBoostingClassifier] a new instance of GradientBoostingClassifier
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#68
+ def initialize(n_estimators: T.unsafe(nil), learning_rate: T.unsafe(nil), reg_lambda: T.unsafe(nil), subsample: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples, n_estimators, n_classes]) Leaf index for sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#172
+ def apply(x); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#38
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#127
+ def decision_function(x); end
+
+ # Return the set of estimators.
+ #
+ # @return [Array] or [Array>]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#34
+ def estimators; end
+
+ # Return the importance for each feature.
+ # The feature importances are calculated based on the numbers of times the feature is used for splitting.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#43
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [GradientBoostingClassifier] The learned classifier itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#92
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#142
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#154
+ def predict_proba(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#47
+ def rng; end
+
+ private
+
+ # for debug
+ #
+ # def loss(y_true, y_pred)
+ # # y_true in {-1, 1}
+ # Numo::NMath.log(1.0 + Numo::NMath.exp(-2.0 * y_true * y_pred)).mean
+ # end
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#220
+ def gradient(y_true, y_pred); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#225
+ def hessian(y_true, y_pred); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#239
+ def multiclass_base_predictions(y); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#257
+ def multiclass_estimators(x, y); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#272
+ def multiclass_feature_importances; end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#281
+ def multiclass_scores(x); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#186
+ def partial_fit(x, y, init_pred); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_classifier.rb#230
+ def plant_tree(sub_rng); end
+end
+
+# GradientBoostingRegressor is a class that implements gradient tree boosting for regression.
+# The class use L2 loss for the loss function.
+#
+# *Reference*
+# - Friedman, J H. "Greedy Function Approximation: A Gradient Boosting Machine," Annals of Statistics, 29 (5), pp. 1189--1232, 2001.
+# - Friedman, J H. "Stochastic Gradient Boosting," Computational Statistics and Data Analysis, 38 (4), pp. 367--378, 2002.
+# - Chen, T., and Guestrin, C., "XGBoost: A Scalable Tree Boosting System," Proc. KDD'16, pp. 785--794, 2016.
+#
+# @example
+# require 'rumale/ensemble/gradient_boosting_regressor'
+#
+# estimator =
+# Rumale::Ensemble::GradientBoostingRegressor.new(
+# n_estimators: 100, learning_rate: 0.3, reg_lambda: 0.001, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#28
+class Rumale::Ensemble::GradientBoostingRegressor < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with gradient tree boosting.
+ #
+ # @param n_estimators [Integer] The numeber of trees for contructing regressor.
+ # @param learning_rate [Float] The boosting learining rate
+ # @param reg_lambda [Float] The L2 regularization term on weight.
+ # @param subsample [Float] The subsampling ratio of the training samples.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [GradientBoostingRegressor] a new instance of GradientBoostingRegressor
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#63
+ def initialize(n_estimators: T.unsafe(nil), learning_rate: T.unsafe(nil), reg_lambda: T.unsafe(nil), subsample: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::Int32] (shape: [n_samples, n_estimators]) Leaf index for sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#128
+ def apply(x); end
+
+ # Return the set of estimators.
+ #
+ # @return [Array] or [Array>]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#33
+ def estimators; end
+
+ # Return the importance for each feature.
+ # The feature importances are calculated based on the numbers of times the feature is used for splitting.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#38
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples]) The target values to be used for fitting the model.
+ # @return [GradientBoostingRegressor] The learned regressor itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#87
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples]) Predicted values per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#113
+ def predict(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#42
+ def rng; end
+
+ private
+
+ # for debug
+ #
+ # def loss(y_true, y_pred)
+ # ((y_true - y_pred)**2).mean
+ # end
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#173
+ def gradient(y_true, y_pred); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#177
+ def hessian(n_samples); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#190
+ def multivar_estimators(x, y); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#199
+ def multivar_feature_importances; end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#208
+ def multivar_predict(x); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#140
+ def partial_fit(x, y, init_pred); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/gradient_boosting_regressor.rb#181
+ def plant_tree(sub_rng); end
+end
+
+# RandomForestClassifier is a class that implements random forest for classification.
+#
+# @example
+# require 'rumale/ensemble/random_forest_classifier'
+#
+# estimator =
+# Rumale::Ensemble::RandomForestClassifier.new(
+# n_estimators: 10, criterion: 'gini', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#23
+class Rumale::Ensemble::RandomForestClassifier < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with random forest.
+ #
+ # @param n_estimators [Integer] The numeber of decision trees for contructing random forest.
+ # @param criterion [String] The function to evalue spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers 'Math.sqrt(n_features)' features.
+ # @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
+ # If nil is given, the method does not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [RandomForestClassifier] a new instance of RandomForestClassifier
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#59
+ def initialize(n_estimators: T.unsafe(nil), criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples, n_estimators]) Leaf index for sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#154
+ def apply(x); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#32
+ def classes; end
+
+ # Return the set of estimators.
+ #
+ # @return [Array]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#28
+ def estimators; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#36
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [RandomForestClassifier] The learned classifier itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#81
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#120
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#139
+ def predict_proba(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#40
+ def rng; end
+
+ private
+
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#162
+ def plant_tree(rnd_seed); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_classifier.rb#170
+ def predict_proba_tree(tree, x); end
+end
+
+# RandomForestRegressor is a class that implements random forest for regression
+#
+# @example
+# require 'rumale/ensemble/random_forest_regressor'
+#
+# estimator =
+# Rumale::Ensemble::RandomForestRegressor.new(
+# n_estimators: 10, criterion: 'mse', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#22
+class Rumale::Ensemble::RandomForestRegressor < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with random forest.
+ #
+ # @param n_estimators [Integer] The numeber of decision trees for contructing random forest.
+ # @param criterion [String] The function to evalue spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers 'Math.sqrt(n_features)' features.
+ # @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [RandomForestRegressor] a new instance of RandomForestRegressor
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#54
+ def initialize(n_estimators: T.unsafe(nil), criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to assign each leaf.
+ # @return [Numo::Int32] (shape: [n_samples, n_estimators]) Leaf index for sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#129
+ def apply(x); end
+
+ # Return the set of estimators.
+ #
+ # @return [Array]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#27
+ def estimators; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#31
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [RandomForestRegressor] The learned regressor itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#76
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted value per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#115
+ def predict(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#35
+ def rng; end
+
+ private
+
+ # source://rumale-ensemble//lib/rumale/ensemble/random_forest_regressor.rb#137
+ def plant_tree(rnd_seed); end
+end
+
+# StackingClassifier is a class that implements classifier with stacking method.
+#
+# *Reference*
+# - Zhou, Z-H., "Ensemble Methods - Foundations and Algorithms," CRC Press Taylor and Francis Group, Chapman and Hall/CRC, 2012.
+#
+# @example
+# require 'rumale/ensemble/stacking_classifier'
+#
+# estimators = {
+# lgr: Rumale::LinearModel::LogisticRegression.new(reg_param: 1e-2),
+# mlp: Rumale::NeuralNetwork::MLPClassifier.new(hidden_units: [256], random_seed: 1),
+# rnd: Rumale::Ensemble::RandomForestClassifier.new(random_seed: 1)
+# }
+# meta_estimator = Rumale::LinearModel::LogisticRegression.new
+# classifier = Rumale::Ensemble::StackedClassifier.new(
+# estimators: estimators, meta_estimator: meta_estimator, random_seed: 1
+# )
+# classifier.fit(training_samples, training_labels)
+# results = classifier.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#31
+class Rumale::Ensemble::StackingClassifier < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with stacking method.
+ #
+ # @param estimators [Hash] The base classifiers for extracting meta features.
+ # @param meta_estimator [Classifier/Nil] The meta classifier that predicts class label.
+ # If nil is given, LogisticRegression is used.
+ # @param n_splits [Integer] The number of folds for cross validation with stratified k-fold on meta feature extraction in training phase.
+ # @param shuffle [Boolean] The flag indicating whether to shuffle the dataset on cross validation.
+ # @param stack_method [String] The method name of base classifier for using meta feature extraction.
+ # If 'auto' is given, it searches the callable method in the order 'predict_proba', 'decision_function', and 'predict'
+ # on each classifier.
+ # @param passthrough [Boolean] The flag indicating whether to concatenate the original features and meta features when training the meta classifier.
+ # @param random_seed [Integer/Nil] The seed value using to initialize the random generator on cross validation.
+ # @return [StackingClassifier] a new instance of StackingClassifier
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#62
+ def initialize(estimators:, meta_estimator: T.unsafe(nil), n_splits: T.unsafe(nil), shuffle: T.unsafe(nil), stack_method: T.unsafe(nil), passthrough: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#44
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) The confidence score per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#134
+ def decision_function(x); end
+
+ # Return the base classifiers.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#36
+ def estimators; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [StackedClassifier] The learned classifier itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#81
+ def fit(x, y); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The meta features for training data.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#189
+ def fit_transform(x, y); end
+
+ # Return the meta classifier.
+ #
+ # @return [Classifier]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#40
+ def meta_estimator; end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) The predicted class label per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#145
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probabilities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) The predicted probability of each class per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#156
+ def predict_proba(x); end
+
+ # Return the method used by each base classifier.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#48
+ def stack_method; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The meta features for samples.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#167
+ def transform(x); end
+
+ private
+
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#215
+ def detect_output_size(n_features); end
+
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#203
+ def detect_stack_method; end
+end
+
+# source://rumale-ensemble//lib/rumale/ensemble/stacking_classifier.rb#199
+Rumale::Ensemble::StackingClassifier::STACK_METHODS = T.let(T.unsafe(nil), Array)
+
+# StackingRegressor is a class that implements regressor with stacking method.
+#
+# *Reference*
+# - Zhou, Z-H., "Ensemble Methods - Foundations and Algorithms," CRC Press Taylor and Francis Group, Chapman and Hall/CRC, 2012.
+#
+# @example
+# require 'rumale/ensemble/stacking_regressor'
+#
+# estimators = {
+# las: Rumale::LinearModel::Lasso.new(reg_param: 1e-2, random_seed: 1),
+# mlp: Rumale::NeuralNetwork::MLPRegressor.new(hidden_units: [256], random_seed: 1),
+# rnd: Rumale::Ensemble::RandomForestRegressor.new(random_seed: 1)
+# }
+# meta_estimator = Rumale::LinearModel::Ridge.new
+# regressor = Rumale::Ensemble::StackedRegressor.new(
+# estimators: estimators, meta_estimator: meta_estimator, random_seed: 1
+# )
+# regressor.fit(training_samples, training_values)
+# results = regressor.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#30
+class Rumale::Ensemble::StackingRegressor < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with stacking method.
+ #
+ # @param estimators [Hash] The base regressors for extracting meta features.
+ # @param meta_estimator [Regressor/Nil] The meta regressor that predicts values.
+ # If nil is given, Ridge is used.
+ # @param n_splits [Integer] The number of folds for cross validation with k-fold on meta feature extraction in training phase.
+ # @param shuffle [Boolean] The flag indicating whether to shuffle the dataset on cross validation.
+ # @param passthrough [Boolean] The flag indicating whether to concatenate the original features and meta features when training the meta regressor.
+ # @param random_seed [Integer/Nil] The seed value using to initialize the random generator on cross validation.
+ # @return [StackingRegressor] a new instance of StackingRegressor
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#50
+ def initialize(estimators:, meta_estimator: T.unsafe(nil), n_splits: T.unsafe(nil), shuffle: T.unsafe(nil), passthrough: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the base regressors.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#35
+ def estimators; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target variables to be used for fitting the model.
+ # @return [StackedRegressor] The learned regressor itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#67
+ def fit(x, y); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target variables to be used for fitting the model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The meta features for training data.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#149
+ def fit_transform(x, y); end
+
+ # Return the meta regressor.
+ #
+ # @return [Regressor]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#39
+ def meta_estimator; end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) The predicted values per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#116
+ def predict(x); end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The meta features for samples.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#127
+ def transform(x); end
+
+ private
+
+ # source://rumale-ensemble//lib/rumale/ensemble/stacking_regressor.rb#159
+ def detect_output_size(n_features); end
+end
+
+# source://rumale-ensemble//lib/rumale/ensemble/version.rb#8
+Rumale::Ensemble::VERSION = T.let(T.unsafe(nil), String)
+
+# VRTreesClassifier is a class that implements variable-random (VR) trees for classification.
+#
+# *Reference*
+# - Liu, F. T., Ting, K. M., Yu, Y., and Zhou, Z. H., "Spectrum of Variable-Random Trees," Journal of Artificial Intelligence Research, vol. 32, pp. 355--384, 2008.
+#
+# @example
+# require 'rumale/ensemble/vr_trees_classifier'
+#
+# estimator =
+# Rumale::Ensemble::VRTreesClassifier.new(
+# n_estimators: 10, criterion: 'gini', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#23
+class Rumale::Ensemble::VRTreesClassifier < ::Rumale::Ensemble::RandomForestClassifier
+ # Create a new classifier with variable-random trees.
+ #
+ # @param n_estimators [Integer] The numeber of trees for contructing variable-random trees.
+ # @param criterion [String] The function to evalue spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, variable-random tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on variable-random tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers 'n_features' features.
+ # @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
+ # If nil is given, the method does not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [VRTreesClassifier] a new instance of VRTreesClassifier
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#57
+ def initialize(n_estimators: T.unsafe(nil), criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples, n_estimators]) Leaf index for sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#122
+ def apply(x); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#30
+ def classes; end
+
+ # Return the set of estimators.
+ #
+ # @return [Array]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#26
+ def estimators; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#34
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [VRTreesClassifier] The learned classifier itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#68
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#102
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#112
+ def predict_proba(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#38
+ def rng; end
+
+ private
+
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_classifier.rb#130
+ def plant_tree(alpha, rnd_seed); end
+end
+
+# VRTreesRegressor is a class that implements variable-random (VR) trees for regression
+#
+# *Reference*
+# - Liu, F. T., Ting, K. M., Yu, Y., and Zhou, Z. H., "Spectrum of Variable-Random Trees," Journal of Artificial Intelligence Research, vol. 32, pp. 355--384, 2008.
+#
+# @example
+# @require 'rumale/ensemble/vr_trees_regressor'
+#
+# estimator =
+# Rumale::Ensemble::VRTreesRegressor.new(
+# n_estimators: 10, criterion: 'mse', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#23
+class Rumale::Ensemble::VRTreesRegressor < ::Rumale::Ensemble::RandomForestRegressor
+ # Create a new regressor with variable-random trees.
+ #
+ # @param n_estimators [Integer] The numeber of trees for contructing variable-random trees.
+ # @param criterion [String] The function to evalue spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, variable-random tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on variable-random tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers 'n_features' features.
+ # @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [VRTreesRegressor] a new instance of VRTreesRegressor
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#53
+ def initialize(n_estimators: T.unsafe(nil), criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to assign each leaf.
+ # @return [Numo::Int32] (shape: [n_samples, n_estimators]) Leaf index for sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#107
+ def apply(x); end
+
+ # Return the set of estimators.
+ #
+ # @return [Array]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#26
+ def estimators; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#30
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [VRTreesRegressor] The learned regressor itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#64
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted value per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#97
+ def predict(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#34
+ def rng; end
+
+ private
+
+ # source://rumale-ensemble//lib/rumale/ensemble/vr_trees_regressor.rb#115
+ def plant_tree(alpha, rnd_seed); end
+end
+
+# source://rumale-ensemble//lib/rumale/ensemble/value.rb#6
+module Rumale::Ensemble::Value; end
+
+# source://rumale-ensemble//lib/rumale/ensemble/value.rb#8
+Rumale::Ensemble::Value::N_BITS = T.let(T.unsafe(nil), Integer)
+
+# source://rumale-ensemble//lib/rumale/ensemble/value.rb#10
+Rumale::Ensemble::Value::SEED_BASE = T.let(T.unsafe(nil), Integer)
+
+# VotingClassifier is a class that implements classifier with voting ensemble method.
+#
+# *Reference*
+# - Zhou, Z-H., "Ensemble Methods - Foundations and Algorithms," CRC Press Taylor and Francis Group, Chapman and Hall/CRC, 2012.
+#
+# @example
+# require 'rumale/ensemble/voting_classifier'
+#
+# estimators = {
+# lgr: Rumale::LinearModel::LogisticRegression.new(reg_param: 1e-2),
+# mlp: Rumale::NeuralNetwork::MLPClassifier.new(hidden_units: [256], random_seed: 1),
+# rnd: Rumale::Ensemble::RandomForestClassifier.new(random_seed: 1)
+# }
+# weights = { lgr: 0.2, mlp: 0.3, rnd: 0.5 }
+#
+# classifier = Rumale::Ensemble::VotingClassifier.new(estimators: estimators, weights: weights, voting: 'soft')
+# classifier.fit(x_train, y_train)
+# results = classifier.predict(x_test)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#28
+class Rumale::Ensemble::VotingClassifier < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new ensembled classifier with voting rule.
+ #
+ # @param estimators [Hash] The sub-classifiers to vote.
+ # @param weights [Hash] The weight value for each classifier.
+ # @param voting [String] The voting rule for the predicted results of each classifier.
+ # If 'hard' is given, the ensembled classifier predicts the class label by majority vote.
+ # If 'soft' is given, the ensembled classifier uses the weighted average of predicted probabilities for the prediction.
+ # @return [VotingClassifier] a new instance of VotingClassifier
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#46
+ def initialize(estimators:, weights: T.unsafe(nil), voting: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#37
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) The confidence score per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#77
+ def decision_function(x); end
+
+ # Return the sub-classifiers that voted.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#33
+ def estimators; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [VotingClassifier] The learned classifier itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#60
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) The predicted class label per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#95
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probabilities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#109
+ def predict_proba(x); end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_classifier.rb#124
+ def soft_voting?; end
+end
+
+# VotingRegressor is a class that implements regressor with voting ensemble method.
+#
+# *Reference*
+# - Zhou, Z-H., "Ensemble Methods - Foundations and Algorithms," CRC Press Taylor and Francis Group, Chapman and Hall/CRC, 2012.
+#
+# @example
+# require 'rumale/ensemble/voting_regressor'
+#
+# estimators = {
+# rdg: Rumale::LinearModel::Ridge.new(reg_param: 0.1),
+# mlp: Rumale::NeuralNetwork::MLPRegressor.new(hidden_units: [256], random_seed: 1),
+# rnd: Rumale::Ensemble::RandomForestRegressor.new(random_seed: 1)
+# }
+# weights = { rdg: 0.2, mlp: 0.3, rnd: 0.5 }
+#
+# regressor = Rumale::Ensemble::VotingRegressor.new(estimators: estimators, weights: weights, voting: 'soft')
+# regressor.fit(x_train, y_train)
+# results = regressor.predict(x_test)
+#
+# source://rumale-ensemble//lib/rumale/ensemble/voting_regressor.rb#27
+class Rumale::Ensemble::VotingRegressor < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new ensembled regressor with voting rule.
+ #
+ # @param estimators [Hash] The sub-regressors to vote.
+ # @param weights [Hash] The weight value for each regressor.
+ # @return [VotingRegressor] a new instance of VotingRegressor
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_regressor.rb#38
+ def initialize(estimators:, weights: T.unsafe(nil)); end
+
+ # Return the sub-regressors that voted.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_regressor.rb#32
+ def estimators; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [VotingRegressor] The learned regressor itself.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_regressor.rb#51
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted value per sample.
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_regressor.rb#66
+ def predict(x); end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-ensemble//lib/rumale/ensemble/voting_regressor.rb#79
+ def single_target?; end
+end
diff --git a/sorbet/rbi/gems/rumale-evaluation_measure@1.0.0.rbi b/sorbet/rbi/gems/rumale-evaluation_measure@1.0.0.rbi
new file mode 100644
index 00000000..c1154da9
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-evaluation_measure@1.0.0.rbi
@@ -0,0 +1,750 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-evaluation_measure` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-evaluation_measure`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/version.rb#4
+module Rumale; end
+
+# This module consists of the classes for model evaluation.
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/version.rb#6
+module Rumale::EvaluationMeasure
+ private
+
+ # Output a summary of classification performance for each class.
+ #
+ # @example
+ # y_true = Numo::Int32[0, 1, 1, 2, 2, 2, 0]
+ # y_pred = Numo::Int32[1, 1, 1, 0, 0, 2, 0]
+ # puts Rumale::EvaluationMeasure.classification_report(y_true, y_pred)
+ #
+ # # precision recall f1-score support
+ # #
+ # # 0 0.33 0.50 0.40 2
+ # # 1 0.67 1.00 0.80 2
+ # # 2 1.00 0.33 0.50 3
+ # #
+ # # accuracy 0.57 7
+ # # macro avg 0.67 0.61 0.57 7
+ # # weighted avg 0.71 0.57 0.56 7
+ # @param y_true [Numo::Int32] (shape: [n_samples]) The ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) The predicted labels.
+ # @param target_name [Nil/Array] The label names.
+ # @param output_hash [Boolean] The flag indicating whether to output with Ruby Hash.
+ # @return [String/Hash] The summary of classification performance.
+ # If output_hash is true, it returns the summary with Ruby Hash.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/function.rb#69
+ def classification_report(y_true, y_pred, target_name: T.unsafe(nil), output_hash: T.unsafe(nil)); end
+
+ # Calculate confusion matrix for evaluating classification performance.
+ #
+ # @example
+ # require 'rumale/evaluation_measure/function'
+ #
+ # y_true = Numo::Int32[2, 0, 2, 2, 0, 1]
+ # y_pred = Numo::Int32[0, 0, 2, 2, 0, 2]
+ # p Rumale::EvaluationMeasure.confusion_matrix(y_true, y_pred)
+ #
+ # # Numo::Int32#shape=[3,3]
+ # # [[2, 0, 0],
+ # # [0, 0, 1],
+ # # [1, 0, 2]]
+ # @param y_true [Numo::Int32] (shape: [n_samples]) The ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) The predicted labels.
+ # @return [Numo::Int32] (shape: [n_classes, n_classes]) The confusion matrix.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/function.rb#30
+ def confusion_matrix(y_true, y_pred); end
+
+ class << self
+ # Output a summary of classification performance for each class.
+ #
+ # @example
+ # y_true = Numo::Int32[0, 1, 1, 2, 2, 2, 0]
+ # y_pred = Numo::Int32[1, 1, 1, 0, 0, 2, 0]
+ # puts Rumale::EvaluationMeasure.classification_report(y_true, y_pred)
+ #
+ # # precision recall f1-score support
+ # #
+ # # 0 0.33 0.50 0.40 2
+ # # 1 0.67 1.00 0.80 2
+ # # 2 1.00 0.33 0.50 3
+ # #
+ # # accuracy 0.57 7
+ # # macro avg 0.67 0.61 0.57 7
+ # # weighted avg 0.71 0.57 0.56 7
+ # @param y_true [Numo::Int32] (shape: [n_samples]) The ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) The predicted labels.
+ # @param target_name [Nil/Array] The label names.
+ # @param output_hash [Boolean] The flag indicating whether to output with Ruby Hash.
+ # @return [String/Hash] The summary of classification performance.
+ # If output_hash is true, it returns the summary with Ruby Hash.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/function.rb#69
+ def classification_report(y_true, y_pred, target_name: T.unsafe(nil), output_hash: T.unsafe(nil)); end
+
+ # Calculate confusion matrix for evaluating classification performance.
+ #
+ # @example
+ # require 'rumale/evaluation_measure/function'
+ #
+ # y_true = Numo::Int32[2, 0, 2, 2, 0, 1]
+ # y_pred = Numo::Int32[0, 0, 2, 2, 0, 2]
+ # p Rumale::EvaluationMeasure.confusion_matrix(y_true, y_pred)
+ #
+ # # Numo::Int32#shape=[3,3]
+ # # [[2, 0, 0],
+ # # [0, 0, 1],
+ # # [1, 0, 2]]
+ # @param y_true [Numo::Int32] (shape: [n_samples]) The ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) The predicted labels.
+ # @return [Numo::Int32] (shape: [n_classes, n_classes]) The confusion matrix.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/function.rb#30
+ def confusion_matrix(y_true, y_pred); end
+ end
+end
+
+# Accuracy is a class that calculates the accuracy of classifier from the predicted labels.
+#
+# @example
+# require 'rumale/evaluation_measure/accuracy'
+#
+# evaluator = Rumale::EvaluationMeasure::Accuracy.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/accuracy.rb#14
+class Rumale::EvaluationMeasure::Accuracy
+ include ::Rumale::Base::Evaluator
+
+ # Calculate mean accuracy.
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted labels.
+ # @return [Float] Mean accuracy
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/accuracy.rb#22
+ def score(y_true, y_pred); end
+end
+
+# AdjustedRandScore is a class that calculates the adjusted rand index.
+#
+# *Reference*
+# - Vinh, N X., Epps, J., and Bailey, J., "Information Theoretic Measures for Clusterings Comparison: Variants, Properties, Normalization and Correction for Chance", J. Machine Learnig Research, Vol. 11, pp.2837--2854, 2010.
+#
+# @example
+# require 'rumale/evaluation_measure/adjusted_rand_score'
+#
+# evaluator = Rumale::EvaluationMeasure::AdjustedRandScore.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/adjusted_rand_score.rb#17
+class Rumale::EvaluationMeasure::AdjustedRandScore
+ include ::Rumale::Base::Evaluator
+
+ # Calculate adjusted rand index.
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted cluster labels.
+ # @return [Float] Adjusted rand index.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/adjusted_rand_score.rb#25
+ def score(y_true, y_pred); end
+
+ private
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/adjusted_rand_score.rb#68
+ def comb_two(k); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/adjusted_rand_score.rb#46
+ def contingency_table(y_true, y_pred); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/adjusted_rand_score.rb#62
+ def special_cases?(n_samples, n_classes, n_clusters); end
+end
+
+# CalinskiHarabaszScore is a class that calculates the Calinski and Harabasz score.
+#
+# *Reference*
+# - Calinski, T., and Harabsz, J., "A dendrite method for cluster analysis," Communication in Statistics, Vol. 3 (1), pp. 1--27, 1972.
+#
+# @example
+# require 'rumale/evaluation_measure/calinski_harabasz_score'
+#
+# evaluator = Rumale::EvaluationMeasure::CalinskiHarabaszScore.new
+# puts evaluator.score(x, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/calinski_harabasz_score.rb#17
+class Rumale::EvaluationMeasure::CalinskiHarabaszScore
+ include ::Rumale::Base::Evaluator
+
+ # Calculates the Calinski and Harabasz score.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be used for calculating score.
+ # @param y [Numo::Int32] (shape: [n_samples]) The predicted labels for each sample.
+ # @return [Float] The Calinski and Harabasz score.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/calinski_harabasz_score.rb#25
+ def score(x, y); end
+end
+
+# DaviesBouldinScore is a class that calculates the Davies-Bouldin score.
+#
+# *Reference*
+# - Davies, D L., and Bouldin, D W., "A Cluster Separation Measure," IEEE Trans. Pattern Analysis and Machine Intelligence, Vol. PAMI-1, No. 2, pp. 224--227, 1979.
+#
+# @example
+# require 'rumale/evaluation_measure/davies_bouldin_score'
+#
+# evaluator = Rumale::EvaluationMeasure::DaviesBouldinScore.new
+# puts evaluator.score(x, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/davies_bouldin_score.rb#18
+class Rumale::EvaluationMeasure::DaviesBouldinScore
+ # Calculates the Davies-Bouldin score.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be used for calculating score.
+ # @param y [Numo::Int32] (shape: [n_samples]) The predicted labels for each sample.
+ # @return [Float] The Davies-Bouldin score.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/davies_bouldin_score.rb#24
+ def score(x, y); end
+end
+
+# ExplainedVarianceScore is a class that calculates the explained variance score.
+#
+# @example
+# require 'rumale/evaluation_measure/explained_variance_score'
+#
+# evaluator = Rumale::EvaluationMeasure::ExplainedVarianceScore.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/explained_variance_score.rb#14
+class Rumale::EvaluationMeasure::ExplainedVarianceScore
+ include ::Rumale::Base::Evaluator
+
+ # Calculate explained variance score.
+ #
+ # @param y_true [Numo::DFloat] (shape: [n_samples, n_outputs]) Ground truth target values.
+ # @param y_pred [Numo::DFloat] (shape: [n_samples, n_outputs]) Estimated target values.
+ # @return [Float] Explained variance score.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/explained_variance_score.rb#22
+ def score(y_true, y_pred); end
+end
+
+# FScore is a class that calculates the F1-score of the predicted labels.
+#
+# @example
+# require 'rumale/evaluation_measure/f_score'
+#
+# evaluator = Rumale::EvaluationMeasure::FScore.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/f_score.rb#16
+class Rumale::EvaluationMeasure::FScore
+ include ::Rumale::Base::Evaluator
+ include ::Rumale::EvaluationMeasure::PrecisionRecall
+
+ # Create a new evaluation measure calculater for F1-score.
+ #
+ # @param average [String] The average type ('binary', 'micro', 'macro')
+ # @return [FScore] a new instance of FScore
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/f_score.rb#27
+ def initialize(average: T.unsafe(nil)); end
+
+ # Return the average type for calculation of F1-score.
+ #
+ # @return [String] ('binary', 'micro', 'macro')
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/f_score.rb#22
+ def average; end
+
+ # Calculate average F1-score
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted labels.
+ # @return [Float] Average F1-score
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/f_score.rb#36
+ def score(y_true, y_pred); end
+end
+
+# LogLoss is a class that calculates the logarithmic loss of predicted class probability.
+#
+# @example
+# require 'rumale/evaluation_measure/log_loss'
+#
+# evaluator = Rumale::EvaluationMeasure::LogLoss.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/log_loss.rb#14
+class Rumale::EvaluationMeasure::LogLoss
+ include ::Rumale::Base::Evaluator
+
+ # Calculate mean logarithmic loss.
+ # If both y_true and y_pred are array (both shapes are [n_samples]), this method calculates
+ # mean logarithmic loss for binary classification.
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted class probability.
+ # @param eps [Float] A small value close to zero to avoid outputting infinity in logarithmic calcuation.
+ # @return [Float] mean logarithmic loss
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/log_loss.rb#25
+ def score(y_true, y_pred, eps = T.unsafe(nil)); end
+
+ private
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/log_loss.rb#43
+ def binarize(y); end
+end
+
+# MeanAbsoluteError is a class that calculates the mean absolute error.
+#
+# @example
+# require 'rumale/evaluation_measure/mean_absolute_error'
+#
+# evaluator = Rumale::EvaluationMeasure::MeanAbsoluteError.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/mean_absolute_error.rb#14
+class Rumale::EvaluationMeasure::MeanAbsoluteError
+ include ::Rumale::Base::Evaluator
+
+ # Calculate mean absolute error.
+ #
+ # @param y_true [Numo::DFloat] (shape: [n_samples, n_outputs]) Ground truth target values.
+ # @param y_pred [Numo::DFloat] (shape: [n_samples, n_outputs]) Estimated target values.
+ # @return [Float] Mean absolute error
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/mean_absolute_error.rb#22
+ def score(y_true, y_pred); end
+end
+
+# MeanSquaredError is a class that calculates the mean squared error.
+#
+# @example
+# require 'rumale/evaluation_measure/mean_squared_error'
+#
+# evaluator = Rumale::EvaluationMeasure::MeanSquaredError.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/mean_squared_error.rb#14
+class Rumale::EvaluationMeasure::MeanSquaredError
+ include ::Rumale::Base::Evaluator
+
+ # Calculate mean squared error.
+ #
+ # @param y_true [Numo::DFloat] (shape: [n_samples, n_outputs]) Ground truth target values.
+ # @param y_pred [Numo::DFloat] (shape: [n_samples, n_outputs]) Estimated target values.
+ # @return [Float] Mean squared error
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/mean_squared_error.rb#22
+ def score(y_true, y_pred); end
+end
+
+# MeanSquaredLogError is a class that calculates the mean squared logarithmic error.
+#
+# @example
+# require 'rumale/evaluation_measure/mean_squared_log_error'
+#
+# evaluator = Rumale::EvaluationMeasure::MeanSquaredLogError.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/mean_squared_log_error.rb#14
+class Rumale::EvaluationMeasure::MeanSquaredLogError
+ include ::Rumale::Base::Evaluator
+
+ # Calculate mean squared logarithmic error.
+ #
+ # @param y_true [Numo::DFloat] (shape: [n_samples, n_outputs]) Ground truth target values.
+ # @param y_pred [Numo::DFloat] (shape: [n_samples, n_outputs]) Estimated target values.
+ # @return [Float] Mean squared logarithmic error.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/mean_squared_log_error.rb#22
+ def score(y_true, y_pred); end
+end
+
+# MedianAbsoluteError is a class that calculates the median absolute error.
+#
+# @example
+# require 'rumale/evaluation_measure/median_absolute_error'
+#
+# evaluator = Rumale::EvaluationMeasure::MedianAbsoluteError.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/median_absolute_error.rb#14
+class Rumale::EvaluationMeasure::MedianAbsoluteError
+ include ::Rumale::Base::Evaluator
+
+ # Calculate median absolute error.
+ #
+ # @param y_true [Numo::DFloat] (shape: [n_samples]) Ground truth target values.
+ # @param y_pred [Numo::DFloat] (shape: [n_samples]) Estimated target values.
+ # @return [Float] Median absolute error.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/median_absolute_error.rb#22
+ def score(y_true, y_pred); end
+end
+
+# MutualInformation is a class that calculates the mutual information.
+#
+# *Reference*
+# - Vinh, N X., Epps, J., and Bailey, J., "Information Theoretic Measures for Clusterings Comparison: Variants, Properties, Normalization and Correction for Chance," J. Machine Learning Research, vol. 11, pp. 2837--1854, 2010.
+#
+# @example
+# require 'rumale/evaluation_measure/mutual_information'
+#
+# evaluator = Rumale::EvaluationMeasure::MutualInformation.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/mutual_information.rb#17
+class Rumale::EvaluationMeasure::MutualInformation
+ include ::Rumale::Base::Evaluator
+
+ # Calculate mutual information
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted cluster labels.
+ # @return [Float] Mutual information.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/mutual_information.rb#25
+ def score(y_true, y_pred); end
+end
+
+# NormalizedMutualInformation is a class that calculates the normalized mutual information.
+#
+# *Reference*
+# - Manning, C D., Raghavan, P., and Schutze, H., "Introduction to Information Retrieval," Cambridge University Press., 2008.
+# - Vinh, N X., Epps, J., and Bailey, J., "Information Theoretic Measures for Clusterings Comparison: Variants, Properties, Normalization and Correction for Chance," J. Machine Learning Research, vol. 11, pp. 2837--1854, 2010.
+#
+# @example
+# require 'rumale/evaluation_measure/normalized_mutual_information'
+#
+# evaluator = Rumale::EvaluationMeasure::NormalizedMutualInformation.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/normalized_mutual_information.rb#19
+class Rumale::EvaluationMeasure::NormalizedMutualInformation
+ include ::Rumale::Base::Evaluator
+
+ # Calculate noramlzied mutual information
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted cluster labels.
+ # @return [Float] Normalized mutual information
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/normalized_mutual_information.rb#27
+ def score(y_true, y_pred); end
+
+ private
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/normalized_mutual_information.rb#42
+ def entropy(y); end
+end
+
+# Precision is a class that calculates the preicision of the predicted labels.
+#
+# @example
+# require 'rumale/evaluation_measure/precision'
+#
+# evaluator = Rumale::EvaluationMeasure::Precision.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision.rb#16
+class Rumale::EvaluationMeasure::Precision
+ include ::Rumale::Base::Evaluator
+ include ::Rumale::EvaluationMeasure::PrecisionRecall
+
+ # Create a new evaluation measure calculater for precision score.
+ #
+ # @param average [String] The average type ('binary', 'micro', 'macro')
+ # @return [Precision] a new instance of Precision
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision.rb#27
+ def initialize(average: T.unsafe(nil)); end
+
+ # Return the average type for calculation of precision.
+ #
+ # @return [String] ('binary', 'micro', 'macro')
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision.rb#22
+ def average; end
+
+ # Calculate average precision.
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted labels.
+ # @return [Float] Average precision
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision.rb#36
+ def score(y_true, y_pred); end
+end
+
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#11
+module Rumale::EvaluationMeasure::PrecisionRecall
+ private
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#39
+ def f_score_each_class(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#93
+ def macro_average_f_score(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#83
+ def macro_average_precision(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#88
+ def macro_average_recall(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#76
+ def micro_average_f_score(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#48
+ def micro_average_precision(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#62
+ def micro_average_recall(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#15
+ def precision_each_class(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#27
+ def recall_each_class(y_true, y_pred); end
+
+ class << self
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#39
+ def f_score_each_class(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#93
+ def macro_average_f_score(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#83
+ def macro_average_precision(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#88
+ def macro_average_recall(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#76
+ def micro_average_f_score(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#48
+ def micro_average_precision(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#62
+ def micro_average_recall(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#15
+ def precision_each_class(y_true, y_pred); end
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/precision_recall.rb#27
+ def recall_each_class(y_true, y_pred); end
+ end
+end
+
+# Purity is a class that calculates the purity of cluatering results.
+#
+# *Reference*
+# - Manning, C D., Raghavan, P., and Schutze, H., "Introduction to Information Retrieval," Cambridge University Press., 2008.
+#
+# @example
+# require 'rumale/evaluation_measure/purity'
+#
+# evaluator = Rumale::EvaluationMeasure::Purity.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/purity.rb#17
+class Rumale::EvaluationMeasure::Purity
+ include ::Rumale::Base::Evaluator
+
+ # Calculate purity
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted cluster labels.
+ # @return [Float] Purity
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/purity.rb#25
+ def score(y_true, y_pred); end
+end
+
+# R2Score is a class that calculates the coefficient of determination for the predicted values.
+#
+# @example
+# require 'rumale/evaluation_measure/r2_score'
+#
+# evaluator = Rumale::EvaluationMeasure::R2Score.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/r2_score.rb#14
+class Rumale::EvaluationMeasure::R2Score
+ include ::Rumale::Base::Evaluator
+
+ # Create a new evaluation measure calculater for coefficient of determination.
+ #
+ # @return [R2Score] a new instance of R2Score
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/r2_score.rb#18
+ def initialize; end
+
+ # Calculate the coefficient of determination.
+ #
+ # @param y_true [Numo::DFloat] (shape: [n_samples, n_outputs]) Ground truth target values.
+ # @param y_pred [Numo::DFloat] (shape: [n_samples, n_outputs]) Estimated taget values.
+ # @return [Float] Coefficient of determination
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/r2_score.rb#25
+ def score(y_true, y_pred); end
+end
+
+# ROCAUC is a class that calculate area under the receiver operation characteristic curve from predicted scores.
+#
+# @example
+# require 'rumale/preprocessing'
+# require 'rumale/linear_model'
+# require 'rumale/evaluation_measure/roc_auc'
+#
+# # Encode labels to integer array.
+# labels = %w[A B B C A A C C C A]
+# label_encoder = Rumale::Preprocessing::LabelEncoder.new
+# y = label_encoder.fit_transform(labels)
+# # Fit classifier.
+# classifier = Rumale::LinearModel::LogisticRegression.new
+# classifier.fit(x, y)
+# # Predict class probabilities.
+# y_score = classifier.predict_proba(x)
+# # Encode labels to one-hot vectors.
+# one_hot_encoder = Rumale::Preprocessing::OneHotEncoder.new
+# y_onehot = one_hot_encoder.fit_transform(y)
+# # Calculate ROC AUC.
+# evaluator = Rumale::EvaluationMeasure::ROCAUC.new
+# puts evaluator.score(y_onehot, y_score)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/roc_auc.rb#29
+class Rumale::EvaluationMeasure::ROCAUC
+ include ::Rumale::Base::Evaluator
+
+ # Calculate area under the curve using the trapezoidal rule.
+ #
+ # @param x [Numo::Int32/Numo::DFloat] (shape: [n_elements])
+ # x coordinates. These are expected to monotonously increase or decrease.
+ # @param y [Numo::Int32/Numo::DFloat] (shape: [n_elements]) y coordinates.
+ # @raise [ArgumentError]
+ # @return [Float] area under the curve.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/roc_auc.rb#99
+ def auc(x, y); end
+
+ # Calculate receiver operation characteristic curve.
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth binary labels.
+ # @param y_score [Numo::DFloat] (shape: [n_samples]) Predicted class probabilities or confidence scores.
+ # @param pos_label [Integer] Label to be a positive label when binarizing the given labels.
+ # If nil is given, the method considers the maximum value of the label as a positive label.
+ # @return [Array] fpr (Numo::DFloat): false positive rates. tpr (Numo::DFloat): true positive rates.
+ # thresholds (Numo::DFloat): thresholds on the decision function used to calculate fpr and tpr.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/roc_auc.rb#62
+ def roc_curve(y_true, y_score, pos_label = T.unsafe(nil)); end
+
+ # Calculate area under the receiver operation characteristic curve (ROC AUC).
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples] or [n_samples, n_classes])
+ # Ground truth binary labels or one-hot encoded multi-labels.
+ # @param y_score [Numo::DFloat] (shape: [n_samples] or [n_samples, n_classes])
+ # Predicted class probabilities or confidence scores.
+ # @return [Float] (macro-averaged) ROC AUC.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/roc_auc.rb#39
+ def score(y_true, y_score); end
+
+ private
+
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/roc_auc.rb#108
+ def binary_roc_curve(y_true, y_score, pos_label = T.unsafe(nil)); end
+end
+
+# Recall is a class that calculates the recall of the predicted labels.
+#
+# @example
+# require 'rumale/evaluation_measure/recall'
+#
+# evaluator = Rumale::EvaluationMeasure::Recall.new
+# puts evaluator.score(ground_truth, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/recall.rb#16
+class Rumale::EvaluationMeasure::Recall
+ include ::Rumale::Base::Evaluator
+ include ::Rumale::EvaluationMeasure::PrecisionRecall
+
+ # Create a new evaluation measure calculater for recall score.
+ #
+ # @param average [String] The average type ('binary', 'micro', 'macro')
+ # @return [Recall] a new instance of Recall
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/recall.rb#27
+ def initialize(average: T.unsafe(nil)); end
+
+ # Return the average type for calculation of recall.
+ #
+ # @return [String] ('binary', 'micro', 'macro')
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/recall.rb#22
+ def average; end
+
+ # Calculate average recall
+ #
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted labels.
+ # @return [Float] Average recall
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/recall.rb#36
+ def score(y_true, y_pred); end
+end
+
+# SilhouetteScore is a class that calculates the Silhouette Coefficient.
+#
+# *Reference*
+# - Rousseuw, P J., "Silhouettes: A graphical aid to the interpretation and validation of cluster analysis," Journal of Computational and Applied Mathematics, Vol. 20, pp. 53--65, 1987.
+#
+# @example
+# require 'rumale/evaluation_measure/silhouette_score'
+#
+# evaluator = Rumale::EvaluationMeasure::SilhouetteScore.new
+# puts evaluator.score(x, predicted)
+#
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/silhouette_score.rb#18
+class Rumale::EvaluationMeasure::SilhouetteScore
+ include ::Rumale::Base::Evaluator
+
+ # Create a new evaluator that calculates the silhouette coefficient.
+ #
+ # @param metric [String] The metric to calculate the sihouette coefficient.
+ # If metric is 'euclidean', Euclidean distance is used for dissimilarity between sample points.
+ # If metric is 'precomputed', the score method expects to be given a distance matrix.
+ # @return [SilhouetteScore] a new instance of SilhouetteScore
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/silhouette_score.rb#26
+ def initialize(metric: T.unsafe(nil)); end
+
+ # Calculates the silhouette coefficient.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be used for calculating score.
+ # @param y [Numo::Int32] (shape: [n_samples]) The predicted labels for each sample.
+ # @return [Float] The mean of silhouette coefficient.
+ #
+ # source://rumale-evaluation_measure//lib/rumale/evaluation_measure/silhouette_score.rb#35
+ def score(x, y); end
+end
+
+# source://rumale-evaluation_measure//lib/rumale/evaluation_measure/version.rb#8
+Rumale::EvaluationMeasure::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-feature_extraction@1.0.0.rbi b/sorbet/rbi/gems/rumale-feature_extraction@1.0.0.rbi
new file mode 100644
index 00000000..ecd4ddf1
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-feature_extraction@1.0.0.rbi
@@ -0,0 +1,267 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-feature_extraction` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-feature_extraction`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#8
+module Rumale; end
+
+# This module consists of the classes that extract features from raw data.
+#
+# source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#9
+module Rumale::FeatureExtraction; end
+
+# Encode array of feature-value hash to vectors with feature hashing (hashing trick).
+# This encoder turns array of mappings (Array) with pairs of feature names and values into Numo::NArray.
+# This encoder employs signed 32-bit Murmurhash3 as the hash function.
+#
+# @example
+# require 'rumale/feature_extraction/feature_hasher'
+#
+# encoder = Rumale::FeatureExtraction::FeatureHasher.new(n_features: 10)
+# x = encoder.transform([
+# { dog: 1, cat: 2, elephant: 4 },
+# { dog: 2, run: 5 }
+# ])
+#
+# # > pp x
+# # Numo::DFloat#shape=[2,10]
+# # [[0, 0, -4, -1, 0, 0, 0, 0, 0, 2],
+# # [0, 0, 0, -2, -5, 0, 0, 0, 0, 0]]
+#
+# source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#27
+class Rumale::FeatureExtraction::FeatureHasher < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new encoder for converting array of hash consisting of feature names and values to vectors
+ # with feature hashing algorith.
+ #
+ # @param n_features [Integer] The number of features of encoded samples.
+ # @param alternate_sign [Boolean] The flag indicating whether to reflect the sign of the hash value to the feature value.
+ # @return [FeatureHasher] a new instance of FeatureHasher
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#35
+ def initialize(n_features: T.unsafe(nil), alternate_sign: T.unsafe(nil)); end
+
+ # This method does not do anything. The encoder does not require training.
+ #
+ # @overload fit
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#48
+ def fit(_x = T.unsafe(nil), _y = T.unsafe(nil)); end
+
+ # Encode given the array of feature-value hash.
+ # This method has the same output as the transform method
+ # because the encoder does not require training.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#59
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Encode given the array of feature-value hash.
+ #
+ # @param x [Array] (shape: [n_samples]) The array of hash consisting of feature names and values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_features]) The encoded sample array.
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#67
+ def transform(x); end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#95
+ def alternate_sign?; end
+
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/feature_hasher.rb#91
+ def n_features; end
+end
+
+# Encode array of feature-value hash to vectors.
+# This encoder turns array of mappings (Array) with pairs of feature names and values into Numo::NArray.
+#
+# @example
+# require 'rumale/feature_extraction/hash_vectorizer'
+#
+# encoder = Rumale::FeatureExtraction::HashVectorizer.new
+# x = encoder.fit_transform([
+# { foo: 1, bar: 2 },
+# { foo: 3, baz: 1 }
+# ])
+#
+# # > pp x
+# # Numo::DFloat#shape=[2,3]
+# # [[2, 0, 1],
+# # [0, 1, 3]]
+#
+# x = encoder.fit_transform([
+# { city: 'Dubai', temperature: 33 },
+# { city: 'London', temperature: 12 },
+# { city: 'San Francisco', temperature: 18 }
+# ])
+#
+# # > pp x
+# # Numo::DFloat#shape=[3,4]
+# # [[1, 0, 0, 33],
+# # [0, 1, 0, 12],
+# # [0, 0, 1, 18]]
+# # > pp encoder.inverse_transform(x)
+# # [{:city=>"Dubai", :temperature=>33.0},
+# # {:city=>"London", :temperature=>12.0},
+# # {:city=>"San Francisco", :temperature=>18.0}]
+#
+# source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#40
+class Rumale::FeatureExtraction::HashVectorizer < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new encoder for converting array of hash consisting of feature names and values to vectors.
+ #
+ # @param separator [String] The separator string used for constructing new feature names for categorical feature.
+ # @param sort [Boolean] The flag indicating whether to sort feature names.
+ # @return [HashVectorizer] a new instance of HashVectorizer
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#55
+ def initialize(separator: T.unsafe(nil), sort: T.unsafe(nil)); end
+
+ # Return the list of feature names.
+ #
+ # @return [Array] (size: [n_features])
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#45
+ def feature_names; end
+
+ # Fit the encoder with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#68
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the encoder with given training data, then return encoded data.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#95
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Decode sample matirx to the array of feature-value hash.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The encoded sample array.
+ # @return [Array] The array of hash consisting of feature names and values.
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#126
+ def inverse_transform(x); end
+
+ # Encode given the array of feature-value hash.
+ #
+ # @param x [Array] (shape: [n_samples]) The array of hash consisting of feature names and values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_features]) The encoded sample array.
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#103
+ def transform(x); end
+
+ # Return the hash consisting of pairs of feature names and indices.
+ #
+ # @return [Hash] (size: [n_features])
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#49
+ def vocabulary; end
+
+ private
+
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#143
+ def feature_key_val(fname, fval); end
+
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#148
+ def separator; end
+
+ # @return [Boolean]
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/hash_vectorizer.rb#152
+ def sort_feature?; end
+end
+
+# Transform sample matrix with term frequecy (tf) to a normalized tf-idf (inverse document frequency) reprensentation.
+#
+# *Reference*
+# - Manning, C D., Raghavan, P., and Schutze, H., "Introduction to Information Retrieval," Cambridge University Press., 2008.
+#
+# @example
+# require 'rumale/feature_extraction/hash_vectorizer'
+# require 'rumale/feature_extraction/tfidf_transformer'
+#
+# encoder = Rumale::FeatureExtraction::HashVectorizer.new
+# x = encoder.fit_transform([
+# { foo: 1, bar: 2 },
+# { foo: 3, baz: 1 }
+# ])
+#
+# # > pp x
+# # Numo::DFloat#shape=[2,3]
+# # [[2, 0, 1],
+# # [0, 1, 3]]
+#
+# transformer = Rumale::FeatureExtraction::TfidfTransformer.new
+# x_tfidf = transformer.fit_transform(x)
+#
+# # > pp x_tfidf
+# # Numo::DFloat#shape=[2,3]
+# # [[0.959056, 0, 0.283217],
+# # [0, 0.491506, 0.870874]]
+#
+# source://rumale-feature_extraction//lib/rumale/feature_extraction/tfidf_transformer.rb#36
+class Rumale::FeatureExtraction::TfidfTransformer < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transfomer for converting tf vectors to tf-idf vectors.
+ #
+ # @param norm [String] The normalization method to be used ('l1', 'l2' and 'none').
+ # @param use_idf [Boolean] The flag indicating whether to use inverse document frequency weighting.
+ # @param smooth_idf [Boolean] The flag indicating whether to apply idf smoothing by log((n_samples + 1) / (df + 1)) + 1.
+ # @param sublinear_tf [Boolean] The flag indicating whether to perform subliner tf scaling by 1 + log(tf).
+ # @return [TfidfTransformer] a new instance of TfidfTransformer
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/tfidf_transformer.rb#49
+ def initialize(norm: T.unsafe(nil), use_idf: T.unsafe(nil), smooth_idf: T.unsafe(nil), sublinear_tf: T.unsafe(nil)); end
+
+ # Calculate the inverse document frequency for weighting.
+ #
+ # @overload fit
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate the idf values.
+ # @return [TfidfTransformer]
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/tfidf_transformer.rb#65
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Calculate the idf values, and then transfrom samples to the tf-idf representation.
+ #
+ # @overload fit_transform
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate idf and be transformed to tf-idf representation.
+ # @return [Numo::DFloat] The transformed samples.
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/tfidf_transformer.rb#87
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the vector consists of inverse document frequency.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/tfidf_transformer.rb#41
+ def idf; end
+
+ # Perform transforming the given samples to the tf-idf representation.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be transformed.
+ # @return [Numo::DFloat] The transformed samples.
+ #
+ # source://rumale-feature_extraction//lib/rumale/feature_extraction/tfidf_transformer.rb#95
+ def transform(x); end
+end
+
+# source://rumale-feature_extraction//lib/rumale/feature_extraction/version.rb#8
+Rumale::FeatureExtraction::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-kernel_approximation@1.0.0.rbi b/sorbet/rbi/gems/rumale-kernel_approximation@1.0.0.rbi
new file mode 100644
index 00000000..a32e2d23
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-kernel_approximation@1.0.0.rbi
@@ -0,0 +1,174 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-kernel_approximation` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-kernel_approximation`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#8
+module Rumale; end
+
+# Module for kernel approximation algorithms.
+#
+# source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#9
+module Rumale::KernelApproximation; end
+
+# Nystroem is a class that implements feature mapping with Nystroem method.
+#
+# *Reference*
+# - Yang, T., Li, Y., Mahdavi, M., Jin, R., and Zhou, Z-H., "Nystrom Method vs Random Fourier Features: A Theoretical and Empirical Comparison," Advances in NIPS'12, Vol. 1, pp. 476--484, 2012.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/kernel_approximation/nystroem'
+#
+# transformer = Rumale::KernelApproximation::Nystroem.new(kernel: 'rbf', gamma: 1, n_components: 128, random_seed: 1)
+# new_training_samples = transformer.fit_transform(training_samples)
+# new_testing_samples = transformer.transform(testing_samples)
+#
+# source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#22
+class Rumale::KernelApproximation::Nystroem < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer for mapping to kernel feature space with Nystrom method.
+ #
+ # @param kernel [String] The type of kernel function ('rbf', 'linear', 'poly', and 'sigmoid)
+ # @param gamma [Float] The gamma parameter in rbf/poly/sigmoid kernel function.
+ # @param degree [Integer] The degree parameter in polynomial kernel function.
+ # @param coef [Float] The coefficient in poly/sigmoid kernel function.
+ # @param n_components [Integer] The number of dimensions of the kernel feature space.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [Nystroem] a new instance of Nystroem
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#49
+ def initialize(kernel: T.unsafe(nil), gamma: T.unsafe(nil), degree: T.unsafe(nil), coef: T.unsafe(nil), n_components: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Returns the indices sampled training data.
+ #
+ # @return [Numo::Int32] (shape: [n_components])
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#31
+ def component_indices; end
+
+ # Returns the randomly sampled training data for feature mapping.
+ #
+ # @return [Numo::DFloat] (shape: n_components, n_features])
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#27
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#67
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#95
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Returns the normalizing factors.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_components])
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#35
+ def normalizer; end
+
+ # Return the random generator for transformation.
+ #
+ # @return [Random]
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#39
+ def rng; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#105
+ def transform(x); end
+
+ private
+
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/nystroem.rb#114
+ def kernel_mat(x, y = T.unsafe(nil)); end
+end
+
+# Class for RBF kernel feature mapping.
+#
+# *Refernce*:
+# - Rahimi, A., and Recht, B., "Random Features for Large-Scale Kernel Machines," Proc. NIPS'07, pp.1177--1184, 2007.
+#
+# @example
+# require 'rumale/kernel_approximation/rbf'
+#
+# transformer = Rumale::KernelApproximation::RBF.new(gamma: 1.0, n_components: 128, random_seed: 1)
+# new_training_samples = transformer.fit_transform(training_samples)
+# new_testing_samples = transformer.transform(testing_samples)
+#
+# source://rumale-kernel_approximation//lib/rumale/kernel_approximation/rbf.rb#21
+class Rumale::KernelApproximation::RBF < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer for mapping to RBF kernel feature space.
+ #
+ # @param gamma [Float] The parameter of RBF kernel: exp(-gamma * x^2).
+ # @param n_components [Integer] The number of dimensions of the RBF kernel feature space.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [RBF] a new instance of RBF
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/rbf.rb#41
+ def initialize(gamma: T.unsafe(nil), n_components: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/rbf.rb#57
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/rbf.rb#76
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the random matrix for transformation.
+ #
+ # @return [Numo::DFloat] (shape: [n_features, n_components])
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/rbf.rb#26
+ def random_mat; end
+
+ # Return the random vector for transformation.
+ #
+ # @return [Numo::DFloat] (shape: [n_components])
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/rbf.rb#30
+ def random_vec; end
+
+ # Return the random generator for transformation.
+ #
+ # @return [Random]
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/rbf.rb#34
+ def rng; end
+
+ # Transform the given data with the learned model.
+ #
+ # @overload transform
+ #
+ # source://rumale-kernel_approximation//lib/rumale/kernel_approximation/rbf.rb#87
+ def transform(x); end
+end
+
+# source://rumale-kernel_approximation//lib/rumale/kernel_approximation/version.rb#8
+Rumale::KernelApproximation::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-kernel_machine@1.0.0.rbi b/sorbet/rbi/gems/rumale-kernel_machine@1.0.0.rbi
new file mode 100644
index 00000000..f4158cb4
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-kernel_machine@1.0.0.rbi
@@ -0,0 +1,388 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-kernel_machine` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-kernel_machine`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_fda.rb#7
+module Rumale; end
+
+# This module consists of the classes that implement kernel method-based estimator.
+#
+# source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_fda.rb#8
+module Rumale::KernelMachine; end
+
+# KernelFDA is a class that implements Kernel Fisher Discriminant Analysis.
+#
+# *Reference*
+# - Baudat, G., and Anouar, F., "Generalized Discriminant Analysis using a Kernel Approach," Neural Computation, vol. 12, pp. 2385--2404, 2000.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/pairwise_metric'
+# require 'rumale/kernel_machine/kernel_fda'
+#
+# kernel_mat_train = Rumale::PairwiseMetric::rbf_kernel(x_train)
+# kfda = Rumale::KernelMachine::KernelFDA.new
+# mapped_traininig_samples = kfda.fit_transform(kernel_mat_train, y)
+#
+# kernel_mat_test = Rumale::PairwiseMetric::rbf_kernel(x_test, x_train)
+# mapped_test_samples = kfda.transform(kernel_mat_test)
+#
+# source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_fda.rb#25
+class Rumale::KernelMachine::KernelFDA < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with Kernel FDA.
+ #
+ # @param n_components [Integer] The number of components.
+ # @param reg_param [Float] The regularization parameter.
+ # @return [KernelFDA] a new instance of KernelFDA
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_fda.rb#36
+ def initialize(n_components: T.unsafe(nil), reg_param: T.unsafe(nil)); end
+
+ # Returns the eigenvectors for embedding.
+ #
+ # @return [Numo::DFloat] (shape: [n_training_sampes, n_components])
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_fda.rb#30
+ def alphas; end
+
+ # Fit the model with given training data.
+ # To execute this method, Numo::Linalg must be loaded.
+ #
+ # @param x [Numo::DFloat] (shape: [n_training_samples, n_training_samples])
+ # The kernel matrix of the training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @raise [ArgumentError]
+ # @return [KernelFDA] The learned transformer itself.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_fda.rb#51
+ def fit(x, y); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ # To execute this method, Numo::Linalg must be loaded.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_samples])
+ # The kernel matrix of the training data to be used for fitting the model and transformed.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_fda.rb#98
+ def fit_transform(x, y); end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
+ # The kernel matrix between testing samples and training samples to be transformed.
+ # @return [Numo::DFloat] (shape: [n_testing_samples, n_components]) The transformed data.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_fda.rb#111
+ def transform(x); end
+end
+
+# KernelPCA is a class that implements Kernel Principal Component Analysis.
+#
+# *Reference*
+# - Scholkopf, B., Smola, A., and Muller, K-R., "Nonlinear Component Analysis as a Kernel Eigenvalue Problem," Neural Computation, Vol. 10 (5), pp. 1299--1319, 1998.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/pairwise_metric'
+# require 'rumale/kernel_machine/kernel_pca'
+#
+# kernel_mat_train = Rumale::PairwiseMetric::rbf_kernel(training_samples)
+# kpca = Rumale::KernelMachine::KernelPCA.new(n_components: 2)
+# mapped_traininig_samples = kpca.fit_transform(kernel_mat_train)
+#
+# kernel_mat_test = Rumale::PairwiseMetric::rbf_kernel(test_samples, training_samples)
+# mapped_test_samples = kpca.transform(kernel_mat_test)
+#
+# source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_pca.rb#25
+class Rumale::KernelMachine::KernelPCA < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with Kernel PCA.
+ #
+ # @param n_components [Integer] The number of components.
+ # @return [KernelPCA] a new instance of KernelPCA
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_pca.rb#39
+ def initialize(n_components: T.unsafe(nil)); end
+
+ # Returns the eigenvectors of the centered kernel matrix.
+ #
+ # @return [Numo::DFloat] (shape: [n_training_sampes, n_components])
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_pca.rb#34
+ def alphas; end
+
+ # Fit the model with given training data.
+ # To execute this method, Numo::Linalg must be loaded.
+ #
+ # @overload fit
+ # @raise [ArgumentError]
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_pca.rb#53
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ # To execute this method, Numo::Linalg must be loaded.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_pca.rb#77
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Returns the eigenvalues of the centered kernel matrix.
+ #
+ # @return [Numo::DFloat] (shape: [n_components])
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_pca.rb#30
+ def lambdas; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
+ # The kernel matrix between testing samples and training samples to be transformed.
+ # @return [Numo::DFloat] (shape: [n_testing_samples, n_components]) The transformed data.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_pca.rb#88
+ def transform(x); end
+end
+
+# KernelRidge is a class that implements kernel ridge regression.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/pairwise_metric'
+# require 'rumale/kernel_machine/kernel_ridge'
+#
+# kernel_mat_train = Rumale::PairwiseMetric::rbf_kernel(training_samples)
+# kridge = Rumale::KernelMachine::KernelRidge.new(reg_param: 1.0)
+# kridge.fit(kernel_mat_train, traininig_values)
+#
+# kernel_mat_test = Rumale::PairwiseMetric::rbf_kernel(test_samples, training_samples)
+# results = kridge.predict(kernel_mat_test)
+#
+# source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge.rb#22
+class Rumale::KernelMachine::KernelRidge < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with kernel ridge regression.
+ #
+ # @param reg_param [Float/Numo::DFloat] The regularization parameter.
+ # @return [KernelRidge] a new instance of KernelRidge
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge.rb#32
+ def initialize(reg_param: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_training_samples, n_training_samples])
+ # The kernel matrix of the training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The taget values to be used for fitting the model.
+ # @raise [ArgumentError]
+ # @return [KernelRidge] The learned regressor itself.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge.rb#45
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
+ # The kernel matrix between testing samples and training samples to predict values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge.rb#74
+ def predict(x); end
+
+ # Return the weight vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_training_sample, n_outputs])
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge.rb#27
+ def weight_vec; end
+end
+
+# KernelRidgeClassifier is a class that implements classifier based-on kernel ridge regression.
+# It learns a classifier by converting labels to target values { -1, 1 } and performing kernel ridge regression.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/pairwise_metric'
+# require 'rumale/kernel_machine/kernel_ridge_classifier'
+#
+# kernel_mat_train = Rumale::PairwiseMetric::rbf_kernel(training_samples)
+# kridge = Rumale::KernelMachine::KernelRidgeClassifier.new(reg_param: 0.5)
+# kridge.fit(kernel_mat_train, traininig_values)
+#
+# kernel_mat_test = Rumale::PairwiseMetric::rbf_kernel(test_samples, training_samples)
+# results = kridge.predict(kernel_mat_test)
+#
+# source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge_classifier.rb#24
+class Rumale::KernelMachine::KernelRidgeClassifier < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new regressor with kernel ridge classifier.
+ #
+ # @param reg_param [Float/Numo::DFloat] The regularization parameter.
+ # @return [KernelRidgeClassifier] a new instance of KernelRidgeClassifier
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge_classifier.rb#38
+ def initialize(reg_param: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge_classifier.rb#29
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
+ # The kernel matrix between testing samples and training samples to predict values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) The confidence score per sample.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge_classifier.rb#73
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_training_samples, n_training_samples])
+ # The kernel matrix of the training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_training_samples]) The labels to be used for fitting the model.
+ # @raise [ArgumentError]
+ # @return [KernelRidgeClassifier] The learned classifier itself.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge_classifier.rb#51
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
+ # The kernel matrix between testing samples and training samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_testing_samples]) Predicted class label per sample.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge_classifier.rb#84
+ def predict(x); end
+
+ # Return the weight vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_training_sample, n_classes])
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_ridge_classifier.rb#33
+ def weight_vec; end
+end
+
+# KernelSVC is a class that implements (Nonlinear) Kernel Support Vector Classifier
+# with stochastic gradient descent (SGD) optimization.
+# For multiclass classification problem, it uses one-vs-the-rest strategy.
+#
+# *Reference*
+# - Shalev-Shwartz, S., Singer, Y., Srebro, N., and Cotter, A., "Pegasos: Primal Estimated sub-GrAdient SOlver for SVM," Mathematical Programming, vol. 127 (1), pp. 3--30, 2011.
+#
+# @example
+# require 'rumale/pairwise_metric'
+# require 'rumale/kernel_machine/kernel_svc'
+#
+# training_kernel_matrix = Rumale::PairwiseMetric::rbf_kernel(training_samples)
+# estimator =
+# Rumale::KernelMachine::KernelSVC.new(reg_param: 1.0, max_iter: 1000, random_seed: 1)
+# estimator.fit(training_kernel_matrix, traininig_labels)
+# testing_kernel_matrix = Rumale::PairwiseMetric::rbf_kernel(testing_samples, training_samples)
+# results = estimator.predict(testing_kernel_matrix)
+# @note Rumale::SVM provides kernel support vector classifier based on LIBSVM.
+# If you prefer execution speed, you should use Rumale::SVM::SVC.
+# https://github.com/yoshoku/rumale-svm
+#
+# source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#32
+class Rumale::KernelMachine::KernelSVC < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with Kernel Support Vector Machine by the SGD optimization.
+ #
+ # @param reg_param [Float] The regularization parameter.
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param probability [Boolean] The flag indicating whether to perform probability estimation.
+ # @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [KernelSVC] a new instance of KernelSVC
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#57
+ def initialize(reg_param: T.unsafe(nil), max_iter: T.unsafe(nil), probability: T.unsafe(nil), n_jobs: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (shape: [n_classes])
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#41
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
+ # The kernel matrix between testing samples and training samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_testing_samples, n_classes]) Confidence score per sample.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#113
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_training_samples, n_training_samples])
+ # The kernel matrix of the training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_training_samples]) The labels to be used for fitting the model.
+ # @return [KernelSVC] The learned classifier itself.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#75
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
+ # The kernel matrix between testing samples and training samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_testing_samples]) Predicted class label per sample.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#124
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
+ # The kernel matrix between testing samples and training samples to predict the labels.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#144
+ def predict_proba(x); end
+
+ # Return the random generator for performing random sampling.
+ #
+ # @return [Random]
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#45
+ def rng; end
+
+ # Return the weight vector for Kernel SVC.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes, n_trainig_sample])
+ #
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#37
+ def weight_vec; end
+
+ private
+
+ # source://rumale-kernel_machine//lib/rumale/kernel_machine/kernel_svc.rb#161
+ def partial_fit(x, bin_y); end
+end
+
+# source://rumale-kernel_machine//lib/rumale/kernel_machine/version.rb#8
+Rumale::KernelMachine::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-linear_model@1.0.0.rbi b/sorbet/rbi/gems/rumale-linear_model@1.0.0.rbi
new file mode 100644
index 00000000..7d950f4e
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-linear_model@1.0.0.rbi
@@ -0,0 +1,1052 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-linear_model` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-linear_model`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#5
+module Rumale; end
+
+# This module consists of the classes that implement generalized linear models.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#7
+module Rumale::LinearModel; end
+
+# BaseEstimator is an abstract class for implementation of linear model. This class is used internally.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#9
+class Rumale::LinearModel::BaseEstimator < ::Rumale::Base::Estimator
+ # Return the bias term (a.k.a. intercept).
+ #
+ # @return [Numo::DFloat] (shape: [n_outputs/n_classes])
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#16
+ def bias_term; end
+
+ # Return the weight vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_outputs/n_classes, n_features])
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#12
+ def weight_vec; end
+
+ private
+
+ # source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#22
+ def expand_feature(x); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#41
+ def fit_bias?; end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/base_estimator.rb#27
+ def split_weight(w); end
+end
+
+# ElasticNet is a class that implements Elastic-net Regression with cordinate descent optimization.
+#
+# *Reference*
+# - Friedman, J., Hastie, T., and Tibshirani, R., "Regularization Paths for Generalized Linear Models via Coordinate Descent," Journal of Statistical Software, 33 (1), pp. 1--22, 2010.
+# - Simon, N., Friedman, J., and Hastie, T., "A Blockwise Descent Algorithm for Group-penalized Multiresponse and Multinomial Regression," arXiv preprint arXiv:1311.6529, 2013.
+#
+# @example
+# require 'rumale/linear_model/elastic_net'
+#
+# estimator = Rumale::LinearModel::ElasticNet.new(reg_param: 0.1, l1_ratio: 0.5)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#23
+class Rumale::LinearModel::ElasticNet < ::Rumale::LinearModel::BaseEstimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new Elastic-net regressor.
+ #
+ # @param reg_param [Float] The regularization parameter.
+ # @param l1_ratio [Float] The elastic-net mixing parameter.
+ # If l1_ratio = 1, the regularization is similar to Lasso.
+ # If l1_ratio = 0, the regularization is similar to Ridge.
+ # If 0 < l1_ratio < 1, the regularization is a combination of L1 and L2.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @return [ElasticNet] a new instance of ElasticNet
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#42
+ def initialize(reg_param: T.unsafe(nil), l1_ratio: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [ElasticNet] The learned regressor itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#59
+ def fit(x, y); end
+
+ # Return the number of iterations performed in coordinate descent optimization.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#28
+ def n_iter; end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#80
+ def predict(x); end
+
+ private
+
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#88
+ def partial_fit(x, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#119
+ def partial_fit_multi(x, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#155
+ def sign(z); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#161
+ def single_target?(y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/elastic_net.rb#151
+ def soft_threshold(z, threshold); end
+end
+
+# Lasso is a class that implements Lasso Regression with coordinate descent optimization.
+#
+# *Reference*
+# - Friedman, J., Hastie, T., and Tibshirani, R., "Regularization Paths for Generalized Linear Models via Coordinate Descent," Journal of Statistical Software, 33 (1), pp. 1--22, 2010.
+# - Simon, N., Friedman, J., and Hastie, T., "A Blockwise Descent Algorithm for Group-penalized Multiresponse and Multinomial Regression," arXiv preprint arXiv:1311.6529, 2013.
+#
+# @example
+# require 'rumale/linear_model/lasso'
+#
+# estimator = Rumale::LinearModel::Lasso.new(reg_param: 0.1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#23
+class Rumale::LinearModel::Lasso < ::Rumale::LinearModel::BaseEstimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new Lasso regressor.
+ #
+ # @param reg_param [Float] The regularization parameter.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @return [Lasso] a new instance of Lasso
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#38
+ def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [Lasso] The learned regressor itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#54
+ def fit(x, y); end
+
+ # Return the number of iterations performed in coordinate descent optimization.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#28
+ def n_iter; end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#75
+ def predict(x); end
+
+ private
+
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#83
+ def partial_fit(x, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#112
+ def partial_fit_multi(x, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#146
+ def sign(z); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#152
+ def single_target?(y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/lasso.rb#142
+ def soft_threshold(z, threshold); end
+end
+
+# LinearRegression is a class that implements ordinary least square linear regression
+# with singular value decomposition (SVD) or L-BFGS optimization.
+#
+# @example
+# require 'rumale/linear_model/linear_regression'
+#
+# estimator = Rumale::LinearModel::LinearRegression.new
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# # If Numo::Linalg is installed, you can specify 'svd' for the solver option.
+# require 'numo/linalg/autoloader'
+# require 'rumale/linear_model/linear_regression'
+#
+# estimator = Rumale::LinearModel::LinearRegression.new(solver: 'svd')
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#30
+class Rumale::LinearModel::LinearRegression < ::Rumale::LinearModel::BaseEstimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new ordinary least square linear regressor.
+ #
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # If solver is 'svd', this parameter is ignored.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # If solver is 'svd', this parameter is ignored.
+ # @param solver [String] The algorithm to calculate weights. ('auto', 'svd' or 'lbfgs').
+ # 'auto' chooses the 'svd' solver if Numo::Linalg is loaded. Otherwise, it chooses the 'lbfgs' solver.
+ # 'svd' performs singular value decomposition of samples.
+ # 'lbfgs' uses the L-BFGS method for optimization.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # If solver is 'svd', this parameter is ignored.
+ # @return [LinearRegression] a new instance of LinearRegression
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#48
+ def initialize(fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), solver: T.unsafe(nil), verbose: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [LinearRegression] The learned regressor itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#69
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#87
+ def predict(x); end
+
+ private
+
+ # source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#102
+ def partial_fit_lbfgs(base_x, base_y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#95
+ def partial_fit_svd(x, y); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/linear_regression.rb#129
+ def single_target?(y); end
+end
+
+# LogisticRegression is a class that implements (multinomial) Logistic Regression.
+#
+# @example
+# require 'rumale/linear_model/logistic_regression'
+#
+# estimator = Rumale::LinearModel::LogisticRegression.new(reg_param: 1.0)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+# @note Rumale::SVM provides Logistic Regression based on LIBLINEAR.
+# If you prefer execution speed, you should use Rumale::SVM::LogisticRegression.
+# https://github.com/yoshoku/rumale-svm
+#
+# source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#26
+class Rumale::LinearModel::LogisticRegression < ::Rumale::LinearModel::BaseEstimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with Logisitc Regression.
+ #
+ # @param reg_param [Float] The regularization parameter.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # If fit_bias is true, the feature vector v becoms [v; bias_scale].
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @param n_jobs [Integer] The number of jobs for running the predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # 'iterate.dat' file is generated by lbfgsb.rb.
+ # @return [LogisticRegression] a new instance of LogisticRegression
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#48
+ def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (shape: [n_classes])
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#31
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#81
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [LogisticRegression] The learned classifier itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#66
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#91
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#108
+ def predict_proba(x); end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#178
+ def multiclass_problem?; end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/logistic_regression.rb#123
+ def partial_fit(base_x, base_y); end
+end
+
+# This module consists of the classes that implement loss function for linear model.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#86
+module Rumale::LinearModel::Loss; end
+
+# EpsilonInsensitive is a class that calculates epsilon insensitive for support vector regressor.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#158
+class Rumale::LinearModel::Loss::EpsilonInsensitive
+ # @return [EpsilonInsensitive] a new instance of EpsilonInsensitive
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#160
+ def initialize(epsilon: T.unsafe(nil)); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#170
+ def dloss(out, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#165
+ def loss(out, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#180
+ def name; end
+end
+
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#185
+Rumale::LinearModel::Loss::EpsilonInsensitive::NAME = T.let(T.unsafe(nil), String)
+
+# HingeLoss is a class that calculates hinge loss for support vector classifier.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#133
+class Rumale::LinearModel::Loss::HingeLoss
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#140
+ def dloss(out, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#135
+ def loss(out, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#148
+ def name; end
+end
+
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#153
+Rumale::LinearModel::Loss::HingeLoss::NAME = T.let(T.unsafe(nil), String)
+
+# LogLoss is a class that calculates logistic loss for logistic regression.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#111
+class Rumale::LinearModel::Loss::LogLoss
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#118
+ def dloss(out, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#113
+ def loss(out, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#123
+ def name; end
+end
+
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#128
+Rumale::LinearModel::Loss::LogLoss::NAME = T.let(T.unsafe(nil), String)
+
+# MeanSquaredError is a class that calculates mean squared error for linear regression model.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#89
+class Rumale::LinearModel::Loss::MeanSquaredError
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#96
+ def dloss(out, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#91
+ def loss(out, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#101
+ def name; end
+end
+
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#106
+Rumale::LinearModel::Loss::MeanSquaredError::NAME = T.let(T.unsafe(nil), String)
+
+# NNLS is a class that implements non-negative least squares regression.
+# NNLS solves least squares problem under non-negative constraints on the coefficient using L-BFGS-B method.
+#
+# @example
+# require 'rumale/linear_model/nnls'
+#
+# estimator = Rumale::LinearModel::NNLS.new(reg_param: 0.01)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#22
+class Rumale::LinearModel::NNLS < ::Rumale::LinearModel::BaseEstimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with non-negative least squares method.
+ #
+ # @param reg_param [Float] The regularization parameter for L2 regularization term.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # If solver = 'svd', this parameter is ignored.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # @return [NNLS] a new instance of NNLS
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#39
+ def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [NonneagtiveLeastSquare] The learned regressor itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#56
+ def fit(x, y); end
+
+ # Returns the number of iterations when converged.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#27
+ def n_iter; end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#86
+ def predict(x); end
+
+ private
+
+ # source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#94
+ def nnls_fnc(w, x, y, alpha); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/nnls.rb#104
+ def single_target?(y); end
+end
+
+# This module consists of the class that implements stochastic gradient descent (SGD) optimizer.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#52
+module Rumale::LinearModel::Optimizer; end
+
+# SGD is a class that implements SGD optimizer.
+# This class is used internally.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#56
+class Rumale::LinearModel::Optimizer::SGD
+ # Create a new SGD optimizer.
+ #
+ # @param learning_rate [Float] The initial value of learning rate.
+ # @param momentum [Float] The initial value of momentum.
+ # @param decay [Float] The smooting parameter.
+ # @return [SGD] a new instance of SGD
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#62
+ def initialize(learning_rate: T.unsafe(nil), momentum: T.unsafe(nil), decay: T.unsafe(nil)); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#75
+ def call(weight, gradient); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#70
+ def current_learning_rate; end
+end
+
+# This module consists of the classes that implement penalty (regularization) term.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#9
+module Rumale::LinearModel::Penalty; end
+
+# L1Penalty is a class that applies L1 penalty to weight vector of linear model.
+# This class is used internally.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#28
+class Rumale::LinearModel::Penalty::L1Penalty
+ # @return [L1Penalty] a new instance of L1Penalty
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#30
+ def initialize(reg_param:); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#36
+ def call(weight, lr); end
+end
+
+# L2Penalty is a class that applies L2 penalty to weight vector of linear model.
+# This class is used internally.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#13
+class Rumale::LinearModel::Penalty::L2Penalty
+ # @return [L2Penalty] a new instance of L2Penalty
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#15
+ def initialize(reg_param:); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#20
+ def call(weight, lr); end
+end
+
+# Ridge is a class that implements Ridge Regression
+# with singular value decomposition (SVD) or L-BFGS optimization.
+#
+# @example
+# require 'rumale/linear_model/ridge'
+#
+# estimator = Rumale::LinearModel::Ridge.new(reg_param: 0.1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# # If Numo::Linalg is installed, you can specify 'svd' for the solver option.
+# require 'numo/linalg/autoloader'
+# require 'rumale/linear_model/ridge'
+#
+# estimator = Rumale::LinearModel::Ridge.new(reg_param: 0.1, solver: 'svd')
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#29
+class Rumale::LinearModel::Ridge < ::Rumale::LinearModel::BaseEstimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new Ridge regressor.
+ #
+ # @param reg_param [Float] The regularization parameter.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # If solver is 'svd', this parameter is ignored.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # If solver is 'svd', this parameter is ignored.
+ # @param solver [String] The algorithm to calculate weights. ('auto', 'svd', or 'lbfgs').
+ # 'auto' chooses the 'svd' solver if Numo::Linalg is loaded. Otherwise, it chooses the 'lbfgs' solver.
+ # 'svd' performs singular value decomposition of samples.
+ # 'lbfgs' uses the L-BFGS method for optimization.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # If solver is 'svd', this parameter is ignored.
+ # @return [Ridge] a new instance of Ridge
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#48
+ def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), solver: T.unsafe(nil), verbose: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [Ridge] The learned regressor itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#70
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#88
+ def predict(x); end
+
+ private
+
+ # source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#105
+ def partial_fit_lbfgs(base_x, base_y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#96
+ def partial_fit_svd(x, y); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/ridge.rb#132
+ def single_target?(y); end
+end
+
+# SGDClassifier is a class that implements linear classifier with stochastic gradient descent optimization.
+#
+# *Reference*
+# - Shalev-Shwartz, S., and Singer, Y., "Pegasos: Primal Estimated sub-GrAdient SOlver for SVM," Proc. ICML'07, pp. 807--814, 2007.
+# - Tsuruoka, Y., Tsujii, J., and Ananiadou, S., "Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty," Proc. ACL'09, pp. 477--485, 2009.
+# - Bottou, L., "Large-Scale Machine Learning with Stochastic Gradient Descent," Proc. COMPSTAT'10, pp. 177--186, 2010.
+#
+# @example
+# require 'rumale/linear_model/sgd_classifier'
+#
+# estimator =
+# Rumale::LinearModel::SGDClassifier.new(loss: 'hinge', reg_param: 1.0, max_iter: 1000, batch_size: 50, random_seed: 1)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#25
+class Rumale::LinearModel::SGDClassifier < ::Rumale::LinearModel::SGDEstimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new linear classifier with stochastic gradient descent optimization.
+ #
+ # @param loss [String] The loss function to be used ('hinge' and 'log_loss').
+ # @param learning_rate [Float] The initial value of learning rate.
+ # The learning rate decreases as the iteration proceeds according to the equation: learning_rate / (1 + decay * t).
+ # @param decay [Float] The smoothing parameter for decreasing learning rate as the iteration proceeds.
+ # If nil is given, the decay sets to 'reg_param * learning_rate'.
+ # @param momentum [Float] The momentum factor.
+ # @param penalty [String] The regularization type to be used ('l1', 'l2', and 'elasticnet').
+ # @param l1_ratio [Float] The elastic-net type regularization mixing parameter.
+ # If penalty set to 'l2' or 'l1', this parameter is ignored.
+ # If l1_ratio = 1, the regularization is similar to Lasso.
+ # If l1_ratio = 0, the regularization is similar to Ridge.
+ # If 0 < l1_ratio < 1, the regularization is a combination of L1 and L2.
+ # @param reg_param [Float] The regularization parameter.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param batch_size [Integer] The size of the mini batches.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [SGDClassifier] a new instance of SGDClassifier
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#63
+ def initialize(loss: T.unsafe(nil), learning_rate: T.unsafe(nil), decay: T.unsafe(nil), momentum: T.unsafe(nil), penalty: T.unsafe(nil), reg_param: T.unsafe(nil), l1_ratio: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), batch_size: T.unsafe(nil), tol: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (shape: [n_classes])
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#30
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#147
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [SGDClassifier] The learned classifier itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#105
+ def fit(x, y); end
+
+ # Perform 1-epoch of stochastic gradient descent optimization with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The binary labels to be used for fitting the model.
+ # @return [SGDClassifier] The learned classifier itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#122
+ def partial_fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#157
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#167
+ def predict_proba(x); end
+
+ # Return the random generator for performing random sampling.
+ #
+ # @return [Random]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#34
+ def rng; end
+
+ private
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#175
+ def fit_hinge(x, y); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#207
+ def fit_log_loss(x, y); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#283
+ def multiclass_problem?; end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#256
+ def predict_hinge(x); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#272
+ def predict_log_loss(x); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#232
+ def predict_proba_hinge(x); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_classifier.rb#245
+ def predict_proba_log_loss(x); end
+end
+
+# SGDEstimator is an abstract class for implementation of linear model with mini-batch stochastic gradient descent (SGD) optimization.
+# This class is used internally.
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#191
+class Rumale::LinearModel::SGDEstimator < ::Rumale::LinearModel::BaseEstimator
+ # Create an initial linear model with SGD.
+ #
+ # @return [SGDEstimator] a new instance of SGDEstimator
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#193
+ def initialize; end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#267
+ def apply_l1_penalty?; end
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#263
+ def apply_l2_penalty?; end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#218
+ def init_vars(n_features); end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#282
+ def l1_reg_param; end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#271
+ def l2_reg_param; end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#228
+ def partial_fit_(x, y, max_iter: T.unsafe(nil), init: T.unsafe(nil)); end
+end
+
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#214
+Rumale::LinearModel::SGDEstimator::ELASTICNET_PENALTY = T.let(T.unsafe(nil), String)
+
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#213
+Rumale::LinearModel::SGDEstimator::L1_PENALTY = T.let(T.unsafe(nil), String)
+
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_estimator.rb#212
+Rumale::LinearModel::SGDEstimator::L2_PENALTY = T.let(T.unsafe(nil), String)
+
+# SGDRegressor is a class that implements linear regressor with stochastic gradient descent optimization.
+#
+# *Reference*
+# - Shalev-Shwartz, S., and Singer, Y., "Pegasos: Primal Estimated sub-GrAdient SOlver for SVM," Proc. ICML'07, pp. 807--814, 2007.
+# - Tsuruoka, Y., Tsujii, J., and Ananiadou, S., "Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty," Proc. ACL'09, pp. 477--485, 2009.
+# - Bottou, L., "Large-Scale Machine Learning with Stochastic Gradient Descent," Proc. COMPSTAT'10, pp. 177--186, 2010.
+#
+# @example
+# require 'rumale/linear_model/sgd_regressor'
+#
+# estimator =
+# Rumale::LinearModel::SGDRegressor.new(loss: 'squared_error', reg_param: 1.0, max_iter: 1000, batch_size: 50, random_seed: 1)
+# estimator.fit(training_samples, traininig_target_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#24
+class Rumale::LinearModel::SGDRegressor < ::Rumale::LinearModel::SGDEstimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new linear regressor with stochastic gradient descent optimization.
+ #
+ # @param loss [String] The loss function to be used ('squared_error' and 'epsilon_insensitive').
+ # @param learning_rate [Float] The initial value of learning rate.
+ # The learning rate decreases as the iteration proceeds according to the equation: learning_rate / (1 + decay * t).
+ # @param decay [Float] The smoothing parameter for decreasing learning rate as the iteration proceeds.
+ # If nil is given, the decay sets to 'reg_param * learning_rate'.
+ # @param momentum [Float] The momentum factor.
+ # @param penalty [String] The regularization type to be used ('l1', 'l2', and 'elasticnet').
+ # @param l1_ratio [Float] The elastic-net type regularization mixing parameter.
+ # If penalty set to 'l2' or 'l1', this parameter is ignored.
+ # If l1_ratio = 1, the regularization is similar to Lasso.
+ # If l1_ratio = 0, the regularization is similar to Ridge.
+ # If 0 < l1_ratio < 1, the regularization is a combination of L1 and L2.
+ # @param reg_param [Float] The regularization parameter.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param epsilon [Float] The margin of tolerance. If loss set to 'squared_error', this parameter is ignored.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param batch_size [Integer] The size of the mini batches.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
+ # If nil is given, the method does not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [SGDRegressor] a new instance of SGDRegressor
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#59
+ def initialize(loss: T.unsafe(nil), learning_rate: T.unsafe(nil), decay: T.unsafe(nil), momentum: T.unsafe(nil), penalty: T.unsafe(nil), reg_param: T.unsafe(nil), l1_ratio: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), epsilon: T.unsafe(nil), max_iter: T.unsafe(nil), batch_size: T.unsafe(nil), tol: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @retu:rn [SGDRegressor] The learned regressor itself.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#103
+ def fit(x, y); end
+
+ # Perform 1-epoch of stochastic gradient descent optimization with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples]) The single target variables to be used for fitting the model.
+ # @return [SGDRegressor] The learned regressor itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#132
+ def partial_fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#150
+ def predict(x); end
+
+ # Return the random generator for performing random sampling.
+ #
+ # @return [Random]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/sgd_regressor.rb#29
+ def rng; end
+end
+
+# SVC is a class that implements Support Vector Classifier with the squared hinge loss.
+# For multiclass classification problem, it uses one-vs-the-rest strategy.
+#
+# @example
+# require 'rumale/linear_model/svc'
+#
+# estimator =
+# Rumale::LinearModel::SVC.new(reg_param: 1.0)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+# @note Rumale::SVM provides linear support vector classifier based on LIBLINEAR.
+# If you prefer execution speed, you should use Rumale::SVM::LinearSVC.
+# https://github.com/yoshoku/rumale-svm
+#
+# source://rumale-linear_model//lib/rumale/linear_model/svc.rb#28
+class Rumale::LinearModel::SVC < ::Rumale::LinearModel::BaseEstimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new linear classifier with Support Vector Machine with the squared hinge loss.
+ #
+ # @param reg_param [Float] The regularization parameter.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @param probability [Boolean] The flag indicating whether to perform probability estimation.
+ # @param n_jobs [Integer] The number of jobs for running the fit and predict methods in parallel.
+ # If nil is given, the methods do not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # 'iterate.dat' file is generated by lbfgsb.rb.
+ # @return [SVC] a new instance of SVC
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svc.rb#50
+ def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), probability: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (shape: [n_classes])
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svc.rb#33
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svc.rb#110
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [SVC] The learned classifier itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svc.rb#70
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svc.rb#120
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svc.rb#142
+ def predict_proba(x); end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svc.rb#191
+ def multiclass_problem?; end
+
+ # source://rumale-linear_model//lib/rumale/linear_model/svc.rb#159
+ def partial_fit(base_x, bin_y); end
+end
+
+# SVR is a class that implements Support Vector Regressor with the squared epsilon-insensitive loss.
+#
+# @example
+# require 'rumale/linear_model/svr'
+#
+# estimator = Rumale::LinearModel::SVR.new(reg_param: 1.0, epsilon: 0.1)
+# estimator.fit(training_samples, traininig_target_values)
+# results = estimator.predict(testing_samples)
+# @note Rumale::SVM provides linear and kernel support vector regressor based on LIBLINEAR and LIBSVM.
+# If you prefer execution speed, you should use Rumale::SVM::LinearSVR.
+# https://github.com/yoshoku/rumale-svm
+#
+# source://rumale-linear_model//lib/rumale/linear_model/svr.rb#25
+class Rumale::LinearModel::SVR < ::Rumale::LinearModel::BaseEstimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with Support Vector Machine by the SGD optimization.
+ #
+ # @param reg_param [Float] The regularization parameter.
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
+ # @param bias_scale [Float] The scale of the bias term.
+ # @param epsilon [Float] The margin of tolerance.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
+ # If nil is given, the method does not execute in parallel.
+ # If zero or less is given, it becomes equal to the number of processors.
+ # This parameter is ignored if the Parallel gem is not loaded.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # @return [SVR] a new instance of SVR
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svr.rb#42
+ def initialize(reg_param: T.unsafe(nil), fit_bias: T.unsafe(nil), bias_scale: T.unsafe(nil), epsilon: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), n_jobs: T.unsafe(nil), verbose: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [SVR] The learned regressor itself.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svr.rb#62
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-linear_model//lib/rumale/linear_model/svr.rb#90
+ def predict(x); end
+
+ private
+
+ # source://rumale-linear_model//lib/rumale/linear_model/svr.rb#98
+ def partial_fit(base_x, single_y); end
+end
+
+# source://rumale-linear_model//lib/rumale/linear_model/version.rb#8
+Rumale::LinearModel::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-manifold@1.0.0.rbi b/sorbet/rbi/gems/rumale-manifold@1.0.0.rbi
new file mode 100644
index 00000000..628d5ee9
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-manifold@1.0.0.rbi
@@ -0,0 +1,469 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-manifold` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-manifold`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#8
+module Rumale; end
+
+# Module for data embedding algorithms.
+#
+# source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#9
+module Rumale::Manifold; end
+
+# HessianEigenmaps is a class that implements Hessian Eigenmaps.
+#
+# *Reference*
+# - Donoho, D. L., and Grimes, C., "Hessian eigenmaps: Locally linear embedding techniques for high-dimensional data," Proc. Natl. Acad. Sci. USA, vol. 100, no. 10, pp. 5591--5596, 2003.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/manifold/hessian_eigenmaps'
+#
+# hem = Rumale::Manifold::HessianEigenmaps.new(n_components: 2, n_neighbors: 15)
+# z = hem.fit_transform(x)
+#
+# source://rumale-manifold//lib/rumale/manifold/hessian_eigenmaps.rb#21
+class Rumale::Manifold::HessianEigenmaps < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with Hessian Eigenmaps.
+ #
+ # @param n_components [Integer] The number of dimensions on representation space.
+ # @param n_neighbors [Integer] The number of nearest neighbors for k-nearest neighbor graph construction.
+ # @param reg_param [Float] The reguralization parameter for local gram matrix in transform method.
+ # @return [HessianEigenmaps] a new instance of HessianEigenmaps
+ #
+ # source://rumale-manifold//lib/rumale/manifold/hessian_eigenmaps.rb#33
+ def initialize(n_neighbors: T.unsafe(nil), n_components: T.unsafe(nil), reg_param: T.unsafe(nil)); end
+
+ # Return the data in representation space.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples, n_components])
+ #
+ # source://rumale-manifold//lib/rumale/manifold/hessian_eigenmaps.rb#26
+ def embedding; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-manifold//lib/rumale/manifold/hessian_eigenmaps.rb#47
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-manifold//lib/rumale/manifold/hessian_eigenmaps.rb#92
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Transform the given data with the learned model.
+ # For out-of-sample data embedding, the same method as Locally Linear Embedding is used.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-manifold//lib/rumale/manifold/hessian_eigenmaps.rb#107
+ def transform(x); end
+
+ private
+
+ # source://rumale-manifold//lib/rumale/manifold/hessian_eigenmaps.rb#130
+ def neighbor_ids(distance_mat, n_neighbors, contain_self); end
+
+ # source://rumale-manifold//lib/rumale/manifold/hessian_eigenmaps.rb#141
+ def tangent_coordinates(x); end
+end
+
+# LaplacianEigenmaps is a class that implements Laplacian Eigenmaps.
+#
+# *Reference*
+# - Belkin, M., and Niyogi, P., "Laplacian Eigenmaps and Spectral Techniques for Embedding and Clustering," Proc. NIPS'01, pp. 585--591, 2001.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/manifold/laplacian_eigenmaps'
+#
+# lem = Rumale::Manifold::LaplacianEigenmaps.new(n_components: 2, n_neighbors: 15)
+# z = lem.fit_transform(x)
+#
+# source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#21
+class Rumale::Manifold::LaplacianEigenmaps < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with Laplacian Eigenmaps.
+ #
+ # @param n_components [Integer] The number of dimensions on representation space.
+ # @param gamma [Nil/Float] The parameter of RBF kernel. If nil is given, the weight of affinity matrix sets to 1.
+ # @param n_neighbors [Integer] The number of nearest neighbors for k-nearest neighbor graph construction.
+ # @return [LaplacianEigenmaps] a new instance of LaplacianEigenmaps
+ #
+ # source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#33
+ def initialize(n_components: T.unsafe(nil), gamma: T.unsafe(nil), n_neighbors: T.unsafe(nil)); end
+
+ # Return the data in representation space.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples, n_components])
+ #
+ # source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#26
+ def embedding; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#47
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#75
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#87
+ def transform(x); end
+
+ private
+
+ # source://rumale-manifold//lib/rumale/manifold/laplacian_eigenmaps.rb#105
+ def k_neighbor_graph(distance_mat, n_neighbors, contain_self); end
+end
+
+# LocalTangentSpaceAlignment is a class that implements Local Tangent Space Alignment.
+#
+# *Reference*
+# - Zhang, A., and Zha, H., "Principal Manifolds and Nonlinear Diemnsion Reduction via Local Tangent Space Alignment," SIAM Journal on Scientific Computing, vol. 26, iss. 1, pp. 313-338, 2004.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/manifold/local_tangent_space_alignment'
+#
+# lem = Rumale::Manifold::LocalTangentSpaceAlignment.new(n_components: 2, n_neighbors: 15)
+# z = lem.fit_transform(x)
+#
+# source://rumale-manifold//lib/rumale/manifold/local_tangent_space_alignment.rb#21
+class Rumale::Manifold::LocalTangentSpaceAlignment < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with Local Tangent Space Alignment.
+ #
+ # @param n_components [Integer] The number of dimensions on representation space.
+ # @param n_neighbors [Integer] The number of nearest neighbors for finding k-nearest neighbors
+ # @param reg_param [Float] The reguralization parameter for local gram matrix in transform method.
+ # @return [LocalTangentSpaceAlignment] a new instance of LocalTangentSpaceAlignment
+ #
+ # source://rumale-manifold//lib/rumale/manifold/local_tangent_space_alignment.rb#33
+ def initialize(n_components: T.unsafe(nil), n_neighbors: T.unsafe(nil), reg_param: T.unsafe(nil)); end
+
+ # Return the data in representation space.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples, n_components])
+ #
+ # source://rumale-manifold//lib/rumale/manifold/local_tangent_space_alignment.rb#26
+ def embedding; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-manifold//lib/rumale/manifold/local_tangent_space_alignment.rb#47
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-manifold//lib/rumale/manifold/local_tangent_space_alignment.rb#86
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Transform the given data with the learned model.
+ # For out-of-sample data embedding, the same method as Locally Linear Embedding is used.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-manifold//lib/rumale/manifold/local_tangent_space_alignment.rb#99
+ def transform(x); end
+
+ private
+
+ # source://rumale-manifold//lib/rumale/manifold/local_tangent_space_alignment.rb#122
+ def neighbor_ids(distance_mat, n_neighbors, contain_self); end
+
+ # source://rumale-manifold//lib/rumale/manifold/local_tangent_space_alignment.rb#133
+ def right_singular_vectors(x_local, n_singulars); end
+end
+
+# LocallyLinearEmbedding is a class that implements Locally Linear Embedding.
+#
+# *Reference*
+# - Roweis, S., and Saul, L., "Nonlinear Dimensionality Reduction by Locally Linear Embedding," J. of Science, vol. 290, pp. 2323-2326, 2000.
+#
+# @example
+# require 'numo/linalg/autoloader'
+# require 'rumale/manifold/locally_linear_embedding'
+#
+# lem = Rumale::Manifold::LocallyLinearEmbedding.new(n_components: 2, n_neighbors: 15)
+# z = lem.fit_transform(x)
+#
+# source://rumale-manifold//lib/rumale/manifold/locally_linear_embedding.rb#21
+class Rumale::Manifold::LocallyLinearEmbedding < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with Locally Linear Embedding.
+ #
+ # @param n_components [Integer] The number of dimensions on representation space.
+ # @param n_neighbors [Integer] The number of nearest neighbors for k-nearest neighbor graph construction.
+ # @param reg_param [Float] The reguralization parameter for local gram matrix.
+ # @return [LocallyLinearEmbedding] a new instance of LocallyLinearEmbedding
+ #
+ # source://rumale-manifold//lib/rumale/manifold/locally_linear_embedding.rb#33
+ def initialize(n_components: T.unsafe(nil), n_neighbors: T.unsafe(nil), reg_param: T.unsafe(nil)); end
+
+ # Return the data in representation space.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples, n_components])
+ #
+ # source://rumale-manifold//lib/rumale/manifold/locally_linear_embedding.rb#26
+ def embedding; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-manifold//lib/rumale/manifold/locally_linear_embedding.rb#47
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-manifold//lib/rumale/manifold/locally_linear_embedding.rb#81
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-manifold//lib/rumale/manifold/locally_linear_embedding.rb#93
+ def transform(x); end
+
+ private
+
+ # source://rumale-manifold//lib/rumale/manifold/locally_linear_embedding.rb#116
+ def neighbor_ids(distance_mat, n_neighbors, contain_self); end
+end
+
+# MDS is a class that implements Metric Multidimensional Scaling (MDS)
+# with Scaling by MAjorizing a COmplicated Function (SMACOF) algorithm.
+#
+# *Reference*
+# - Groenen, P J. F. and van de Velden, M., "Multidimensional Scaling by Majorization: A Review," J. of Statistical Software, Vol. 73 (8), 2016.
+#
+# @example
+# require 'rumale/manifold/mds'
+#
+# mds = Rumale::Manifold::MDS.new(init: 'pca', max_iter: 500, random_seed: 1)
+# representations = mds.fit_transform(samples)
+#
+# source://rumale-manifold//lib/rumale/manifold/mds.rb#23
+class Rumale::Manifold::MDS < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with MDS.
+ #
+ # @param n_components [Integer] The number of dimensions on representation space.
+ # @param metric [String] The metric to calculate the distances in original space.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance in original space.
+ # If metric is 'precomputed', the fit and fit_transform methods expect to be given a distance matrix.
+ # @param init [String] The init is a method to initialize the representaion space.
+ # If init is 'random', the representaion space is initialized with normal random variables.
+ # If init is 'pca', the result of principal component analysis as the initial value of the representation space.
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of stress value for terminating optimization.
+ # If tol is nil, it does not use stress value as a criterion for terminating the optimization.
+ # @param verbose [Boolean] The flag indicating whether to output stress value during iteration.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [MDS] a new instance of MDS
+ #
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#56
+ def initialize(n_components: T.unsafe(nil), metric: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the data in representation space.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples, n_components])
+ #
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#28
+ def embedding; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#77
+ def fit(x, _not_used = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#120
+ def fit_transform(x, _not_used = T.unsafe(nil)); end
+
+ # Return the number of iterations run for optimization
+ #
+ # @return [Integer]
+ #
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#36
+ def n_iter; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#40
+ def rng; end
+
+ # Return the stress function value after optimization.
+ #
+ # @return [Float]
+ #
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#32
+ def stress; end
+
+ private
+
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#147
+ def calc_stress(hi_distance_mat, lo_distance_mat); end
+
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#129
+ def init_embedding(x); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-manifold//lib/rumale/manifold/mds.rb#140
+ def terminate?(old_stress, new_stress); end
+end
+
+# TSNE is a class that implements t-Distributed Stochastic Neighbor Embedding (t-SNE)
+# with fixed-point optimization algorithm.
+# Fixed-point algorithm usually converges faster than gradient descent method and
+# do not need the learning parameters such as the learning rate and momentum.
+#
+# *Reference*
+# - van der Maaten, L., and Hinton, G., "Visualizing data using t-SNE," J. of Machine Learning Research, vol. 9, pp. 2579--2605, 2008.
+# - Yang, Z., King, I., Xu, Z., and Oja, E., "Heavy-Tailed Symmetric Stochastic Neighbor Embedding," Proc. NIPS'09, pp. 2169--2177, 2009.
+#
+# @example
+# require 'rumale/manifold/tsne'
+#
+# tsne = Rumale::Manifold::TSNE.new(perplexity: 40.0, init: 'pca', max_iter: 500, random_seed: 1)
+# representations = tsne.fit_transform(samples)
+#
+# source://rumale-manifold//lib/rumale/manifold/tsne.rb#26
+class Rumale::Manifold::TSNE < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with t-SNE.
+ #
+ # @param n_components [Integer] The number of dimensions on representation space.
+ # @param perplexity [Float] The effective number of neighbors for each point. Perplexity are typically set from 5 to 50.
+ # @param metric [String] The metric to calculate the distances in original space.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance in original space.
+ # If metric is 'precomputed', the fit and fit_transform methods expect to be given a distance matrix.
+ # @param init [String] The init is a method to initialize the representaion space.
+ # If init is 'random', the representaion space is initialized with normal random variables.
+ # If init is 'pca', the result of principal component analysis as the initial value of the representation space.
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of KL-divergence for terminating optimization.
+ # If tol is nil, it does not use KL divergence as a criterion for terminating the optimization.
+ # @param verbose [Boolean] The flag indicating whether to output KL divergence during iteration.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [TSNE] a new instance of TSNE
+ #
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#60
+ def initialize(n_components: T.unsafe(nil), perplexity: T.unsafe(nil), metric: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the data in representation space.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples, n_components])
+ #
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#31
+ def embedding; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ #
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#82
+ def fit(x, _not_used = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ #
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#120
+ def fit_transform(x, _not_used = T.unsafe(nil)); end
+
+ # Return the Kullback-Leibler divergence after optimization.
+ #
+ # @return [Float]
+ #
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#35
+ def kl_divergence; end
+
+ # Return the number of iterations run for optimization
+ #
+ # @return [Integer]
+ #
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#39
+ def n_iter; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#43
+ def rng; end
+
+ private
+
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#207
+ def cost(p, q); end
+
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#140
+ def gaussian_distributed_probability_matrix(distance_mat); end
+
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#191
+ def gaussian_distributed_probability_vector(n, distance_vec, beta); end
+
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#129
+ def init_embedding(x); end
+
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#159
+ def optimal_probabilities(sample_id, distance_vec, max_iter = T.unsafe(nil)); end
+
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#200
+ def t_distributed_probability_matrix(y); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-manifold//lib/rumale/manifold/tsne.rb#211
+ def terminate?(p, q); end
+end
+
+# source://rumale-manifold//lib/rumale/manifold/version.rb#8
+Rumale::Manifold::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-metric_learning@1.0.0.rbi b/sorbet/rbi/gems/rumale-metric_learning@1.0.0.rbi
new file mode 100644
index 00000000..14dcdd18
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-metric_learning@1.0.0.rbi
@@ -0,0 +1,349 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-metric_learning` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-metric_learning`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#7
+module Rumale; end
+
+# Module for metric learning algorithms.
+#
+# source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#8
+module Rumale::MetricLearning; end
+
+# FisherDiscriminantAnalysis is a class that implements Fisher Discriminant Analysis.
+#
+# *Reference*
+# - Fisher, R. A., "The use of multiple measurements in taxonomic problems," Annals of Eugenics, vol. 7, pp. 179--188, 1936.
+# - Sugiyama, M., "Local Fisher Discriminant Analysis for Supervised Dimensionality Reduction," Proc. ICML'06, pp. 905--912, 2006.
+#
+# @example
+# require 'rumale/metric_learning/fisher_discriminant_analysis'
+#
+# transformer = Rumale::MetricLearning::FisherDiscriminantAnalysis.new
+# transformer.fit(training_samples, traininig_labels)
+# low_samples = transformer.transform(testing_samples)
+#
+# source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#21
+class Rumale::MetricLearning::FisherDiscriminantAnalysis < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with FisherDiscriminantAnalysis.
+ #
+ # @param n_components [Integer] The number of components.
+ # If nil is given, the number of components will be set to [n_features, n_classes - 1].min
+ # @return [FisherDiscriminantAnalysis] a new instance of FisherDiscriminantAnalysis
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#44
+ def initialize(n_components: T.unsafe(nil)); end
+
+ # Returns the class mean vectors.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes, n_features])
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#34
+ def class_means; end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (shape: [n_classes])
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#38
+ def classes; end
+
+ # Returns the transform matrix.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#26
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [FisherDiscriminantAnalysis] The learned classifier itself.
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#54
+ def fit(x, y); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#99
+ def fit_transform(x, y); end
+
+ # Returns the mean vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#30
+ def mean; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/fisher_discriminant_analysis.rb#111
+ def transform(x); end
+end
+
+# LocalFisherDiscriminantAnalysis is a class that implements Local Fisher Discriminant Analysis.
+#
+# *Reference*
+# - Sugiyama, M., "Local Fisher Discriminant Analysis for Supervised Dimensionality Reduction," Proc. ICML'06, pp. 905--912, 2006.
+#
+# @example
+# require 'rumale/metric_learning/local_fisher_discriminant_analysis'
+#
+# transformer = Rumale::MetricLearning::LocalFisherDiscriminantAnalysis.new
+# transformer.fit(training_samples, traininig_labels)
+# low_samples = transformer.transform(testing_samples)
+#
+# source://rumale-metric_learning//lib/rumale/metric_learning/local_fisher_discriminant_analysis.rb#20
+class Rumale::MetricLearning::LocalFisherDiscriminantAnalysis < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with LocalFisherDiscriminantAnalysis.
+ #
+ # @param n_components [Integer] The number of components.
+ # @param gamma [Float] The parameter of rbf kernel, if nil it is 1 / n_features.
+ # @return [LocalFisherDiscriminantAnalysis] a new instance of LocalFisherDiscriminantAnalysis
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/local_fisher_discriminant_analysis.rb#35
+ def initialize(n_components: T.unsafe(nil), gamma: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (shape: [n_classes])
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/local_fisher_discriminant_analysis.rb#29
+ def classes; end
+
+ # Returns the transform matrix.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/local_fisher_discriminant_analysis.rb#25
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [LocalFisherDiscriminantAnalysis] The learned classifier itself.
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/local_fisher_discriminant_analysis.rb#48
+ def fit(x, y); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/local_fisher_discriminant_analysis.rb#96
+ def fit_transform(x, y); end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/local_fisher_discriminant_analysis.rb#108
+ def transform(x); end
+end
+
+# MLKR is a class that implements Metric Learning for Kernel Regression.
+#
+# *Reference*
+# - Weinberger, K. Q. and Tesauro, G., "Metric Learning for Kernel Regression," Proc. AISTATS'07, pp. 612--629, 2007.
+#
+# @example
+# require 'rumale/metric_learning/mlkr'
+#
+# transformer = Rumale::MetricLearning::MLKR.new
+# transformer.fit(training_samples, traininig_target_values)
+# low_samples = transformer.transform(testing_samples)
+#
+# source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#25
+class Rumale::MetricLearning::MLKR < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with MLKR.
+ #
+ # @param n_components [Integer] The number of components.
+ # @param init [String] The initialization method for components ('random' or 'pca').
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # This value is given as tol / Lbfgsb::DBL_EPSILON to the factr argument of Lbfgsb.minimize method.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # If true is given, 'iterate.dat' file is generated by lbfgsb.rb.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [MLKR] a new instance of MLKR
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#50
+ def initialize(n_components: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Returns the metric components.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#30
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples]) The target values to be used for fitting the model.
+ # @return [MLKR] The learned classifier itself.
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#68
+ def fit(x, y); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples]) The target values to be used for fitting the model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#90
+ def fit_transform(x, y); end
+
+ # Return the number of iterations run for optimization
+ #
+ # @return [Integer]
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#34
+ def n_iter; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#38
+ def rng; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#102
+ def transform(x); end
+
+ private
+
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#110
+ def init_components(x, n_features, n_components); end
+
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#138
+ def mlkr_fnc(w, x, y); end
+
+ # source://rumale-metric_learning//lib/rumale/metric_learning/mlkr.rb#119
+ def optimize_components(x, y, n_features, n_components); end
+end
+
+# NeighbourhoodComponentAnalysis is a class that implements Neighbourhood Component Analysis.
+#
+# *Reference*
+# - Goldberger, J., Roweis, S., Hinton, G., and Salakhutdinov, R., "Neighbourhood Component Analysis," Advances in NIPS'17, pp. 513--520, 2005.
+#
+# @example
+# require 'rumale/metric_learning/neighbourhood_component_analysis'
+#
+# transformer = Rumale::MetricLearning::NeighbourhoodComponentAnalysis.new
+# transformer.fit(training_samples, traininig_labels)
+# low_samples = transformer.transform(testing_samples)
+#
+# source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#24
+class Rumale::MetricLearning::NeighbourhoodComponentAnalysis < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer with NeighbourhoodComponentAnalysis.
+ #
+ # @param n_components [Integer] The number of components.
+ # @param init [String] The initialization method for components ('random' or 'pca').
+ # @param max_iter [Integer] The maximum number of iterations.
+ # @param tol [Float] The tolerance of termination criterion.
+ # This value is given as tol / Lbfgsb::DBL_EPSILON to the factr argument of Lbfgsb.minimize method.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # If true is given, 'iterate.dat' file is generated by lbfgsb.rb.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [NeighbourhoodComponentAnalysis] a new instance of NeighbourhoodComponentAnalysis
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#49
+ def initialize(n_components: T.unsafe(nil), init: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Returns the neighbourhood components.
+ #
+ # @return [Numo::DFloat] (shape: [n_components, n_features])
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#29
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [NeighbourhoodComponentAnalysis] The learned classifier itself.
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#67
+ def fit(x, y); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#87
+ def fit_transform(x, y); end
+
+ # Return the number of iterations run for optimization
+ #
+ # @return [Integer]
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#33
+ def n_iter; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#37
+ def rng; end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
+ #
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#99
+ def transform(x); end
+
+ private
+
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#107
+ def init_components(x, n_features, n_components); end
+
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#135
+ def nca_fnc(w, x, y); end
+
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#116
+ def optimize_components(x, y, n_features, n_components); end
+
+ # source://rumale-metric_learning//lib/rumale/metric_learning/neighbourhood_component_analysis.rb#159
+ def probability_matrix(z); end
+end
+
+# source://rumale-metric_learning//lib/rumale/metric_learning/version.rb#8
+Rumale::MetricLearning::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-model_selection@1.0.0.rbi b/sorbet/rbi/gems/rumale-model_selection@1.0.0.rbi
new file mode 100644
index 00000000..b96d2fd3
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-model_selection@1.0.0.rbi
@@ -0,0 +1,723 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-model_selection` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-model_selection`.
+
+
+# source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#5
+module Rumale; end
+
+# This module consists of the classes for model validation techniques.
+#
+# source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#6
+module Rumale::ModelSelection
+ private
+
+ # Split randomly data set into test and train data.
+ #
+ # @example
+ # require 'rumale/model_selection/function'
+ #
+ # x_train, x_test, y_train, y_test = Rumale::ModelSelection.train_test_split(x, y, test_size: 0.2, stratify: true, random_seed: 1)
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The dataset to be used to generate data indices.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used to generate data indices for stratified random permutation.
+ # If stratify = false, this parameter is ignored.
+ # @param test_size [Float] The ratio of number of samples for test data.
+ # @param train_size [Float] The ratio of number of samples for train data.
+ # If nil is given, it sets to 1 - test_size.
+ # @param stratify [Boolean] The flag indicating whether to perform stratify split.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [Array] The set of training and testing data.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/function.rb#29
+ def train_test_split(x, y = T.unsafe(nil), test_size: T.unsafe(nil), train_size: T.unsafe(nil), stratify: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ class << self
+ # Split randomly data set into test and train data.
+ #
+ # @example
+ # require 'rumale/model_selection/function'
+ #
+ # x_train, x_test, y_train, y_test = Rumale::ModelSelection.train_test_split(x, y, test_size: 0.2, stratify: true, random_seed: 1)
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The dataset to be used to generate data indices.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used to generate data indices for stratified random permutation.
+ # If stratify = false, this parameter is ignored.
+ # @param test_size [Float] The ratio of number of samples for test data.
+ # @param train_size [Float] The ratio of number of samples for train data.
+ # If nil is given, it sets to 1 - test_size.
+ # @param stratify [Boolean] The flag indicating whether to perform stratify split.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [Array] The set of training and testing data.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/function.rb#29
+ def train_test_split(x, y = T.unsafe(nil), test_size: T.unsafe(nil), train_size: T.unsafe(nil), stratify: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+ end
+end
+
+# CrossValidation is a class that evaluates a given classifier with cross-validation method.
+#
+# @example
+# require 'rumale/linear_model'
+# require 'rumale/model_selection/stratified_k_fold'
+# require 'rumale/model_selection/cross_validation'
+#
+# svc = Rumale::LinearModel::SVC.new
+# kf = Rumale::ModelSelection::StratifiedKFold.new(n_splits: 5)
+# cv = Rumale::ModelSelection::CrossValidation.new(estimator: svc, splitter: kf)
+# report = cv.perform(samples, labels)
+# mean_test_score = report[:test_score].inject(:+) / kf.n_splits
+#
+# source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#21
+class Rumale::ModelSelection::CrossValidation
+ # Create a new evaluator with cross-validation method.
+ #
+ # @param estimator [Classifier] The classifier of which performance is evaluated.
+ # @param splitter [Splitter] The splitter that divides dataset to training and testing dataset.
+ # @param evaluator [Evaluator] The evaluator that calculates score of estimator results.
+ # @param return_train_score [Boolean] The flag indicating whether to calculate the score of training dataset.
+ # @return [CrossValidation] a new instance of CrossValidation
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#44
+ def initialize(estimator: T.unsafe(nil), splitter: T.unsafe(nil), evaluator: T.unsafe(nil), return_train_score: T.unsafe(nil)); end
+
+ # Return the classifier of which performance is evaluated.
+ #
+ # @return [Classifier]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#24
+ def estimator; end
+
+ # Return the evaluator that calculates score.
+ #
+ # @return [Evaluator]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#32
+ def evaluator; end
+
+ # Perform the evalution of given classifier with cross-validation method.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features])
+ # The dataset to be used to evaluate the estimator.
+ # @param y [Numo::Int32 / Numo::DFloat] (shape: [n_samples] / [n_samples, n_outputs])
+ # The labels to be used to evaluate the classifier / The target values to be used to evaluate the regressor.
+ # @return [Hash] The report summarizing the results of cross-validation.
+ # * :fit_time (Array) The calculation times of fitting the estimator for each split.
+ # * :test_score (Array) The scores of testing dataset for each split.
+ # * :train_score (Array) The scores of training dataset for each split. This option is nil if
+ # the return_train_score is false.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#62
+ def perform(x, y); end
+
+ # Return the flag indicating whether to caculate the score of training dataset.
+ #
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#36
+ def return_train_score; end
+
+ # Return the splitter that divides dataset.
+ #
+ # @return [Splitter]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#28
+ def splitter; end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#98
+ def kernel_machine?; end
+
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/cross_validation.rb#102
+ def log_loss?; end
+end
+
+# GridSearchCV is a class that performs hyperparameter optimization with grid search method.
+#
+# @example
+# require 'rumale/ensemble'
+# require 'rumale/model_selection/stratified_k_fold'
+# require 'rumale/model_selection/grid_search_cv'
+#
+# rfc = Rumale::Ensemble::RandomForestClassifier.new(random_seed: 1)
+# pg = { n_estimators: [5, 10], max_depth: [3, 5], max_leaf_nodes: [15, 31] }
+# kf = Rumale::ModelSelection::StratifiedKFold.new(n_splits: 5)
+# gs = Rumale::ModelSelection::GridSearchCV.new(estimator: rfc, param_grid: pg, splitter: kf)
+# gs.fit(samples, labels)
+# p gs.cv_results
+# p gs.best_params
+# @example
+# rbf = Rumale::KernelApproximation::RBF.new(random_seed: 1)
+# svc = Rumale::LinearModel::SVC.new
+# pipe = Rumale::Pipeline::Pipeline.new(steps: { rbf: rbf, svc: svc })
+# pg = { rbf__gamma: [32.0, 1.0], rbf__n_components: [4, 128], svc__reg_param: [16.0, 0.1] }
+# kf = Rumale::ModelSelection::StratifiedKFold.new(n_splits: 5)
+# gs = Rumale::ModelSelection::GridSearchCV.new(estimator: pipe, param_grid: pg, splitter: kf)
+# gs.fit(samples, labels)
+# p gs.cv_results
+# p gs.best_params
+#
+# source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#34
+class Rumale::ModelSelection::GridSearchCV < ::Rumale::Base::Estimator
+ # Create a new grid search method.
+ #
+ # @param estimator [Classifier/Regresor] The estimator to be searched for optimal parameters with grid search method.
+ # @param param_grid [Array] The parameter sets is represented with array of hash that
+ # consists of parameter names as keys and array of parameter values as values.
+ # @param splitter [Splitter] The splitter that divides dataset to training and testing dataset on cross validation.
+ # @param evaluator [Evaluator] The evaluator that calculates score of estimator results on cross validation.
+ # If nil is given, the score method of estimator is used to evaluation.
+ # @param greater_is_better [Boolean] The flag that indicates whether the estimator is better as
+ # evaluation score is larger.
+ # @return [GridSearchCV] a new instance of GridSearchCV
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#65
+ def initialize(estimator: T.unsafe(nil), param_grid: T.unsafe(nil), splitter: T.unsafe(nil), evaluator: T.unsafe(nil), greater_is_better: T.unsafe(nil)); end
+
+ # Return the estimator learned with the best parameter.
+ #
+ # @return [Estimator]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#53
+ def best_estimator; end
+
+ # Return the index of the best parameter.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#49
+ def best_index; end
+
+ # Return the best parameter set.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#45
+ def best_params; end
+
+ # Return the score of the estimator learned with the best parameter.
+ #
+ # @return [Float]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#41
+ def best_score; end
+
+ # Return the result of cross validation for each parameter.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#37
+ def cv_results; end
+
+ # Call the decision_function method of learned estimator with the best parameter.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples]) Confidence score per sample.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#102
+ def decision_function(x); end
+
+ # Fit the model with given training data and all sets of parameters.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::NArray] (shape: [n_samples, n_outputs]) The target values or labels to be used for fitting the model.
+ # @return [GridSearchCV] The learned estimator with grid search.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#81
+ def fit(x, y); end
+
+ # Call the predict method of learned estimator with the best parameter.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to obtain prediction result.
+ # @return [Numo::NArray] Predicted results.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#110
+ def predict(x); end
+
+ # Call the predict_log_proba method of learned estimator with the best parameter.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the log-probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted log-probability of each class per sample.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#118
+ def predict_log_proba(x); end
+
+ # Call the predict_proba method of learned estimator with the best parameter.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#126
+ def predict_proba(x); end
+
+ # Call the score method of learned estimator with the best parameter.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) Testing data.
+ # @param y [Numo::NArray] (shape: [n_samples, n_outputs]) True target values or labels for testing data.
+ # @return [Float] The score of estimator.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#135
+ def score(x, y); end
+
+ private
+
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#166
+ def configurated_estimator(prms); end
+
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#202
+ def find_best_params; end
+
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#179
+ def init_attrs; end
+
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#152
+ def param_combinations; end
+
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#159
+ def perform_cross_validation(x, y, prms); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#208
+ def pipeline?; end
+
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#189
+ def store_cv_result(prms, report); end
+
+ # @raise [TypeError]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/grid_search_cv.rb#141
+ def valid_param_grid(grid); end
+end
+
+# GroupKFold is a class that generates the set of data indices for K-fold cross-validation.
+# The data points belonging to the same group do not be split into different folds.
+# The number of groups should be greater than or equal to the number of splits.
+#
+# @example
+# require 'rumale/model_selection/group_k_fold'
+#
+# cv = Rumale::ModelSelection::GroupKFold.new(n_splits: 3)
+# x = Numo::DFloat.new(8, 2).rand
+# groups = Numo::Int32[1, 1, 1, 2, 2, 3, 3, 3]
+# cv.split(x, nil, groups).each do |train_ids, test_ids|
+# puts '---'
+# pp train_ids
+# pp test_ids
+# end
+#
+# # ---
+# # [0, 1, 2, 3, 4]
+# # [5, 6, 7]
+# # ---
+# # [3, 4, 5, 6, 7]
+# # [0, 1, 2]
+# # ---
+# # [0, 1, 2, 5, 6, 7]
+# # [3, 4]
+#
+# source://rumale-model_selection//lib/rumale/model_selection/group_k_fold.rb#34
+class Rumale::ModelSelection::GroupKFold
+ include ::Rumale::Base::Splitter
+
+ # Create a new data splitter for grouped K-fold cross validation.
+ #
+ # @param n_splits [Integer] The number of folds.
+ # @return [GroupKFold] a new instance of GroupKFold
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/group_k_fold.rb#44
+ def initialize(n_splits: T.unsafe(nil)); end
+
+ # Return the number of folds.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/group_k_fold.rb#39
+ def n_splits; end
+
+ # Generate data indices for grouped K-fold cross validation.
+ #
+ # @overload split
+ # @return [Array] The set of data indices for constructing the training and testing dataset in each fold.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/group_k_fold.rb#58
+ def split(x, _y, groups); end
+end
+
+# GroupShuffleSplit is a class that generates the set of data indices
+# for random permutation cross-validation by randomly selecting group labels.
+#
+# @example
+# require 'rumale/model_selection/group_shuffle_split'
+#
+# cv = Rumale::ModelSelection::GroupShuffleSplit.new(n_splits: 2, test_size: 0.2, random_seed: 1)
+# x = Numo::DFloat.new(8, 2).rand
+# groups = Numo::Int32[1, 1, 1, 2, 2, 3, 3, 3]
+# cv.split(x, nil, groups).each do |train_ids, test_ids|
+# puts '---'
+# pp train_ids
+# pp test_ids
+# end
+#
+# # ---
+# # [0, 1, 2, 5, 6, 7]
+# # [3, 4]
+# # ---
+# # [3, 4, 5, 6, 7]
+# # [0, 1, 2]
+#
+# source://rumale-model_selection//lib/rumale/model_selection/group_shuffle_split.rb#29
+class Rumale::ModelSelection::GroupShuffleSplit
+ include ::Rumale::Base::Splitter
+
+ # Create a new data splitter for random permutation cross validation with given group labels.
+ #
+ # @param n_splits [Integer] The number of folds.
+ # @param test_size [Float] The ratio of number of groups for test data.
+ # @param train_size [Float/Nil] The ratio of number of groups for train data.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [GroupShuffleSplit] a new instance of GroupShuffleSplit
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/group_shuffle_split.rb#46
+ def initialize(n_splits: T.unsafe(nil), test_size: T.unsafe(nil), train_size: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the number of folds.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/group_shuffle_split.rb#34
+ def n_splits; end
+
+ # Return the random generator for shuffling the dataset.
+ #
+ # @return [Random]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/group_shuffle_split.rb#38
+ def rng; end
+
+ # Generate train and test data indices by randomly selecting group labels.
+ #
+ # @overload split
+ # @return [Array] The set of data indices for constructing the training and testing dataset in each fold.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/group_shuffle_split.rb#65
+ def split(_x, _y, groups); end
+
+ private
+
+ # source://rumale-model_selection//lib/rumale/model_selection/group_shuffle_split.rb#101
+ def in1d(a, b); end
+end
+
+# KFold is a class that generates the set of data indices for K-fold cross-validation.
+#
+# @example
+# require 'rumale/model_selection/k_fold'
+#
+# kf = Rumale::ModelSelection::KFold.new(n_splits: 3, shuffle: true, random_seed: 1)
+# kf.split(samples, labels).each do |train_ids, test_ids|
+# train_samples = samples[train_ids, true]
+# test_samples = samples[test_ids, true]
+# ...
+# end
+#
+# source://rumale-model_selection//lib/rumale/model_selection/k_fold.rb#20
+class Rumale::ModelSelection::KFold
+ include ::Rumale::Base::Splitter
+
+ # Create a new data splitter for K-fold cross validation.
+ #
+ # @param n_splits [Integer] The number of folds.
+ # @param shuffle [Boolean] The flag indicating whether to shuffle the dataset.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [KFold] a new instance of KFold
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/k_fold.rb#40
+ def initialize(n_splits: T.unsafe(nil), shuffle: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the number of folds.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/k_fold.rb#25
+ def n_splits; end
+
+ # Return the random generator for shuffling the dataset.
+ #
+ # @return [Random]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/k_fold.rb#33
+ def rng; end
+
+ # Return the flag indicating whether to shuffle the dataset.
+ #
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/k_fold.rb#29
+ def shuffle; end
+
+ # Generate data indices for K-fold cross validation.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features])
+ # The dataset to be used to generate data indices for K-fold cross validation.
+ # @return [Array] The set of data indices for constructing the training and testing dataset in each fold.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/k_fold.rb#53
+ def split(x, _y = T.unsafe(nil)); end
+end
+
+# ShuffleSplit is a class that generates the set of data indices for random permutation cross-validation.
+#
+# @example
+# require 'rumale/model_selection/shuffle_split'
+#
+# ss = Rumale::ModelSelection::ShuffleSplit.new(n_splits: 3, test_size: 0.2, random_seed: 1)
+# ss.split(samples, labels).each do |train_ids, test_ids|
+# train_samples = samples[train_ids, true]
+# test_samples = samples[test_ids, true]
+# ...
+# end
+#
+# source://rumale-model_selection//lib/rumale/model_selection/shuffle_split.rb#19
+class Rumale::ModelSelection::ShuffleSplit
+ include ::Rumale::Base::Splitter
+
+ # Create a new data splitter for random permutation cross validation.
+ #
+ # @param n_splits [Integer] The number of folds.
+ # @param test_size [Float] The ratio of number of samples for test data.
+ # @param train_size [Float] The ratio of number of samples for train data.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [ShuffleSplit] a new instance of ShuffleSplit
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/shuffle_split.rb#36
+ def initialize(n_splits: T.unsafe(nil), test_size: T.unsafe(nil), train_size: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the number of folds.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/shuffle_split.rb#24
+ def n_splits; end
+
+ # Return the random generator for shuffling the dataset.
+ #
+ # @return [Random]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/shuffle_split.rb#28
+ def rng; end
+
+ # Generate data indices for random permutation cross validation.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features])
+ # The dataset to be used to generate data indices for random permutation cross validation.
+ # @return [Array] The set of data indices for constructing the training and testing dataset in each fold.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/shuffle_split.rb#50
+ def split(x, _y = T.unsafe(nil)); end
+end
+
+# StratifiedKFold is a class that generates the set of data indices for K-fold cross-validation.
+# The proportion of the number of samples in each class will be almost equal for each fold.
+#
+# @example
+# require 'rumale/model_selection/stratified_k_fold'
+#
+# kf = Rumale::ModelSelection::StratifiedKFold.new(n_splits: 3, shuffle: true, random_seed: 1)
+# kf.split(samples, labels).each do |train_ids, test_ids|
+# train_samples = samples[train_ids, true]
+# test_samples = samples[test_ids, true]
+# ...
+# end
+#
+# source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#20
+class Rumale::ModelSelection::StratifiedKFold
+ include ::Rumale::Base::Splitter
+
+ # Create a new data splitter for stratified K-fold cross validation.
+ #
+ # @param n_splits [Integer] The number of folds.
+ # @param shuffle [Boolean] The flag indicating whether to shuffle the dataset.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [StratifiedKFold] a new instance of StratifiedKFold
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#40
+ def initialize(n_splits: T.unsafe(nil), shuffle: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the number of folds.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#25
+ def n_splits; end
+
+ # Return the random generator for shuffling the dataset.
+ #
+ # @return [Random]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#33
+ def rng; end
+
+ # Return the flag indicating whether to shuffle the dataset.
+ #
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#29
+ def shuffle; end
+
+ # Generate data indices for stratified K-fold cross validation.
+ #
+ # @overload split
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#57
+ def split(_x, y); end
+
+ private
+
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#76
+ def fold_sets(y, label, sub_rng); end
+
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#87
+ def train_test_sets(fold_sets_each_class, fold_id); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_k_fold.rb#72
+ def valid_n_splits?(y); end
+end
+
+# StratifiedShuffleSplit is a class that generates the set of data indices for random permutation cross-validation.
+# The proportion of the number of samples in each class will be almost equal for each fold.
+#
+# @example
+# require 'rumale/model_selection/stratified_shuffle_split'
+#
+# ss = Rumale::ModelSelection::StratifiedShuffleSplit.new(n_splits: 3, test_size: 0.2, random_seed: 1)
+# ss.split(samples, labels).each do |train_ids, test_ids|
+# train_samples = samples[train_ids, true]
+# test_samples = samples[test_ids, true]
+# ...
+# end
+#
+# source://rumale-model_selection//lib/rumale/model_selection/stratified_shuffle_split.rb#20
+class Rumale::ModelSelection::StratifiedShuffleSplit
+ include ::Rumale::Base::Splitter
+
+ # Create a new data splitter for random permutation cross validation.
+ #
+ # @param n_splits [Integer] The number of folds.
+ # @param test_size [Float] The ratio of number of samples for test data.
+ # @param train_size [Float] The ratio of number of samples for train data.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [StratifiedShuffleSplit] a new instance of StratifiedShuffleSplit
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_shuffle_split.rb#37
+ def initialize(n_splits: T.unsafe(nil), test_size: T.unsafe(nil), train_size: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the number of folds.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_shuffle_split.rb#25
+ def n_splits; end
+
+ # Return the random generator for shuffling the dataset.
+ #
+ # @return [Random]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_shuffle_split.rb#29
+ def rng; end
+
+ # Generate data indices for stratified random permutation cross validation.
+ #
+ # @overload split
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_shuffle_split.rb#55
+ def split(_x, y); end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_shuffle_split.rb#104
+ def enough_data_size_each_class?(y, data_size, data_type); end
+
+ # @return [Boolean]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/stratified_shuffle_split.rb#100
+ def valid_n_splits?(y); end
+end
+
+# TimeSeriesSplit is a class that generates the set of data indices for time series cross-validation.
+# It is assumed that the dataset given are already ordered by time information.
+#
+# @example
+# require 'rumale/model_selection/time_series_split'
+#
+# cv = Rumale::ModelSelection::TimeSeriesSplit.new(n_splits: 5)
+# x = Numo::DFloat.new(6, 2).rand
+# cv.split(x, nil).each do |train_ids, test_ids|
+# puts '---'
+# pp train_ids
+# pp test_ids
+# end
+#
+# # ---
+# # [0]
+# # [1]
+# # ---
+# # [0, 1]
+# # [2]
+# # ---
+# # [0, 1, 2]
+# # [3]
+# # ---
+# # [0, 1, 2, 3]
+# # [4]
+# # ---
+# # [0, 1, 2, 3, 4]
+# # [5]
+#
+# source://rumale-model_selection//lib/rumale/model_selection/time_series_split.rb#37
+class Rumale::ModelSelection::TimeSeriesSplit
+ include ::Rumale::Base::Splitter
+
+ # Create a new data splitter for time series cross-validation.
+ #
+ # @param n_splits [Integer] The number of splits.
+ # @param max_train_size [Integer/Nil] The maximum number of training samples in a split.
+ # @return [TimeSeriesSplit] a new instance of TimeSeriesSplit
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/time_series_split.rb#52
+ def initialize(n_splits: T.unsafe(nil), max_train_size: T.unsafe(nil)); end
+
+ # Return the maximum number of training samples in a split.
+ #
+ # @return [Integer/Nil]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/time_series_split.rb#46
+ def max_train_size; end
+
+ # Return the number of splits.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/time_series_split.rb#42
+ def n_splits; end
+
+ # Generate data indices for time series cross-validation.
+ #
+ # @overload split
+ # @return [Array] The set of data indices for constructing the training and testing dataset in each fold.
+ #
+ # source://rumale-model_selection//lib/rumale/model_selection/time_series_split.rb#66
+ def split(x, _y); end
+end
+
+# source://rumale-model_selection//lib/rumale/model_selection/version.rb#6
+Rumale::ModelSelection::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-naive_bayes@1.0.0.rbi b/sorbet/rbi/gems/rumale-naive_bayes@1.0.0.rbi
new file mode 100644
index 00000000..990cdc92
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-naive_bayes@1.0.0.rbi
@@ -0,0 +1,378 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-naive_bayes` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-naive_bayes`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/base_naive_bayes.rb#7
+module Rumale; end
+
+# This module consists of the classes that implement naive bayes models.
+#
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/base_naive_bayes.rb#8
+module Rumale::NaiveBayes; end
+
+# BaseNaiveBayes is a class that has methods for common processes of naive bayes classifier.
+# This class is used internally.
+#
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/base_naive_bayes.rb#11
+class Rumale::NaiveBayes::BaseNaiveBayes < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # @return [BaseNaiveBayes] a new instance of BaseNaiveBayes
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/base_naive_bayes.rb#14
+ def initialize; end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/base_naive_bayes.rb#22
+ def predict(x); end
+
+ # Predict log-probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the log-probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted log-probability of each class per sample.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/base_naive_bayes.rb#34
+ def predict_log_proba(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/base_naive_bayes.rb#46
+ def predict_proba(x); end
+end
+
+# BernoulliNB is a class that implements Bernoulli Naive Bayes classifier.
+#
+# *Reference*
+# - Manning, C D., Raghavan, P., and Schutze, H., "Introduction to Information Retrieval," Cambridge University Press., 2008.
+#
+# @example
+# require 'rumale/naive_bayes/bernoulli_nb'
+#
+# estimator = Rumale::NaiveBayes::BernoulliNB.new(smoothing_param: 1.0, bin_threshold: 0.0)
+# estimator.fit(training_samples, training_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/bernoulli_nb.rb#18
+class Rumale::NaiveBayes::BernoulliNB < ::Rumale::NaiveBayes::BaseNaiveBayes
+ # Create a new classifier with Bernoulli Naive Bayes.
+ #
+ # @param smoothing_param [Float] The Laplace smoothing parameter.
+ # @param bin_threshold [Float] The threshold for binarizing of features.
+ # @return [BernoulliNB] a new instance of BernoulliNB
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/bernoulli_nb.rb#35
+ def initialize(smoothing_param: T.unsafe(nil), bin_threshold: T.unsafe(nil)); end
+
+ # Return the prior probabilities of the classes.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/bernoulli_nb.rb#25
+ def class_priors; end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/bernoulli_nb.rb#21
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence scores per sample for each class.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/bernoulli_nb.rb#71
+ def decision_function(x); end
+
+ # Return the conditional probabilities for features of each class.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes, n_features])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/bernoulli_nb.rb#29
+ def feature_probs; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The categorical variables (e.g. labels)
+ # to be used for fitting the model.
+ # @return [BernoulliNB] The learned classifier itself.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/bernoulli_nb.rb#49
+ def fit(x, y); end
+end
+
+# ComplementNB is a class that implements Complement Naive Bayes classifier.
+#
+# *Reference*
+# - Rennie, J. D. M., Shih, L., Teevan, J., and Karger, D. R., "Tackling the Poor Assumptions of Naive Bayes Text Classifiers," ICML' 03, pp. 616--623, 2013.
+#
+# @example
+# require 'rumale/naive_bayes/complement_nb'
+#
+# estimator = Rumale::NaiveBayes::ComplementNB.new(smoothing_param: 1.0)
+# estimator.fit(training_samples, training_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/complement_nb.rb#18
+class Rumale::NaiveBayes::ComplementNB < ::Rumale::NaiveBayes::BaseNaiveBayes
+ # Create a new classifier with Complement Naive Bayes.
+ #
+ # @param smoothing_param [Float] The smoothing parameter.
+ # @param norm [Boolean] The flag indicating whether to normlize the weight vectors.
+ # @return [ComplementNB] a new instance of ComplementNB
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/complement_nb.rb#35
+ def initialize(smoothing_param: T.unsafe(nil), norm: T.unsafe(nil)); end
+
+ # Return the prior probabilities of the classes.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/complement_nb.rb#25
+ def class_priors; end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/complement_nb.rb#21
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence scores per sample for each class.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/complement_nb.rb#75
+ def decision_function(x); end
+
+ # Return the conditional probabilities for features of each class.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes, n_features])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/complement_nb.rb#29
+ def feature_probs; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The categorical variables (e.g. labels)
+ # to be used for fitting the model.
+ # @return [ComplementNB] The learned classifier itself.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/complement_nb.rb#49
+ def fit(x, y); end
+
+ private
+
+ # @return [Boolean]
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/complement_nb.rb#83
+ def normalize?; end
+end
+
+# GaussianNB is a class that implements Gaussian Naive Bayes classifier.
+#
+# @example
+# require 'rumale/naive_bayes/gaussian_nb'
+#
+# estimator = Rumale::NaiveBayes::GaussianNB.new
+# estimator.fit(training_samples, training_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/gaussian_nb.rb#15
+class Rumale::NaiveBayes::GaussianNB < ::Rumale::NaiveBayes::BaseNaiveBayes
+ # Create a new classifier with Gaussian Naive Bayes.
+ #
+ # @return [GaussianNB] a new instance of GaussianNB
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/gaussian_nb.rb#33
+ def initialize; end
+
+ # Return the prior probabilities of the classes.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/gaussian_nb.rb#22
+ def class_priors; end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/gaussian_nb.rb#18
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence scores per sample for each class.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/gaussian_nb.rb#61
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The categorical variables (e.g. labels)
+ # to be used for fitting the model.
+ # @return [GaussianNB] The learned classifier itself.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/gaussian_nb.rb#44
+ def fit(x, y); end
+
+ # Return the mean vectors of the classes.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes, n_features])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/gaussian_nb.rb#26
+ def means; end
+
+ # Return the variance vectors of the classes.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes, n_features])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/gaussian_nb.rb#30
+ def variances; end
+end
+
+# MultinomialNB is a class that implements Multinomial Naive Bayes classifier.
+#
+# *Reference*
+# - Manning, C D., Raghavan, P., and Schutze, H., "Introduction to Information Retrieval," Cambridge University Press., 2008.
+#
+# @example
+# require 'rumale/naive_bayes/multinomial_nb'
+#
+# estimator = Rumale::NaiveBayes::MultinomialNB.new(smoothing_param: 1.0)
+# estimator.fit(training_samples, training_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/multinomial_nb.rb#18
+class Rumale::NaiveBayes::MultinomialNB < ::Rumale::NaiveBayes::BaseNaiveBayes
+ # Create a new classifier with Multinomial Naive Bayes.
+ #
+ # @param smoothing_param [Float] The Laplace smoothing parameter.
+ # @return [MultinomialNB] a new instance of MultinomialNB
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/multinomial_nb.rb#34
+ def initialize(smoothing_param: T.unsafe(nil)); end
+
+ # Return the prior probabilities of the classes.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/multinomial_nb.rb#25
+ def class_priors; end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/multinomial_nb.rb#21
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence scores per sample for each class.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/multinomial_nb.rb#64
+ def decision_function(x); end
+
+ # Return the conditional probabilities for features of each class.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes, n_features])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/multinomial_nb.rb#29
+ def feature_probs; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The categorical variables (e.g. labels)
+ # to be used for fitting the model.
+ # @return [MultinomialNB] The learned classifier itself.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/multinomial_nb.rb#45
+ def fit(x, y); end
+end
+
+# NegationNB is a class that implements Negation Naive Bayes classifier.
+#
+# *Reference*
+# - Komiya, K., Sato, N., Fujimoto, K., and Kotani, Y., "Negation Naive Bayes for Categorization of Product Pages on the Web," RANLP' 11, pp. 586--592, 2011.
+#
+# @example
+# require 'rumale/naive_bayes/negation_nb'
+#
+# estimator = Rumale::NaiveBayes::NegationNB.new(smoothing_param: 1.0)
+# estimator.fit(training_samples, training_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/negation_nb.rb#18
+class Rumale::NaiveBayes::NegationNB < ::Rumale::NaiveBayes::BaseNaiveBayes
+ # Create a new classifier with Complement Naive Bayes.
+ #
+ # @param smoothing_param [Float] The smoothing parameter.
+ # @return [NegationNB] a new instance of NegationNB
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/negation_nb.rb#34
+ def initialize(smoothing_param: T.unsafe(nil)); end
+
+ # Return the prior probabilities of the classes.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/negation_nb.rb#25
+ def class_priors; end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/negation_nb.rb#21
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence scores per sample for each class.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/negation_nb.rb#66
+ def decision_function(x); end
+
+ # Return the conditional probabilities for features of each class.
+ #
+ # @return [Numo::DFloat] (shape: [n_classes, n_features])
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/negation_nb.rb#29
+ def feature_probs; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The categorical variables (e.g. labels)
+ # to be used for fitting the model.
+ # @return [ComplementNB] The learned classifier itself.
+ #
+ # source://rumale-naive_bayes//lib/rumale/naive_bayes/negation_nb.rb#45
+ def fit(x, y); end
+end
+
+# source://rumale-naive_bayes//lib/rumale/naive_bayes/version.rb#8
+Rumale::NaiveBayes::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-nearest_neighbors@1.0.0.rbi b/sorbet/rbi/gems/rumale-nearest_neighbors@1.0.0.rbi
new file mode 100644
index 00000000..efe3aa4d
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-nearest_neighbors@1.0.0.rbi
@@ -0,0 +1,158 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-nearest_neighbors` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-nearest_neighbors`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#8
+module Rumale; end
+
+# This module consists of the classes that implement estimators based on nearest neighbors rule.
+#
+# source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#10
+module Rumale::NearestNeighbors; end
+
+# KNeighborsClassifier is a class that implements the classifier with the k-nearest neighbors rule.
+# The current implementation uses the Euclidean distance for finding the neighbors.
+#
+# @example
+# require 'rumale/nearest_neighbors/k_neighbors_classifier'
+#
+# estimator =
+# Rumale::NearestNeighbors::KNeighborsClassifier.new(n_neighbors: 5)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#22
+class Rumale::NearestNeighbors::KNeighborsClassifier < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with the nearest neighbor rule.
+ #
+ # @param n_neighbors [Integer] The number of neighbors.
+ # @param metric [String] The metric to calculate the distances.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance between points.
+ # If metric is 'precomputed', the fit and predict methods expect to be given a distance matrix.
+ # @return [KNeighborsClassifier] a new instance of KNeighborsClassifier
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#44
+ def initialize(n_neighbors: T.unsafe(nil), metric: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#36
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_features]) The samples to compute the scores.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_testing_samples, n_training_samples]).
+ # @return [Numo::DFloat] (shape: [n_testing_samples, n_classes]) Confidence scores per sample for each class.
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#77
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_training_samples, n_features]) The training data to be used for fitting the model.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_training_samples, n_training_samples]).
+ # @param y [Numo::Int32] (shape: [n_training_samples]) The labels to be used for fitting the model.
+ # @return [KNeighborsClassifier] The learned classifier itself.
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#58
+ def fit(x, y); end
+
+ # Return the labels of the prototypes
+ #
+ # @return [Numo::Int32] (size: n_training_samples)
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#32
+ def labels; end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_features]) The samples to predict the labels.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_testing_samples, n_training_samples]).
+ # @return [Numo::Int32] (shape: [n_testing_samples]) Predicted class label per sample.
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#103
+ def predict(x); end
+
+ # Return the prototypes for the nearest neighbor classifier.
+ # If the metric is 'precomputed', that returns nil.
+ #
+ # @return [Numo::DFloat] (shape: [n_training_samples, n_features])
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_classifier.rb#28
+ def prototypes; end
+end
+
+# KNeighborsRegressor is a class that implements the regressor with the k-nearest neighbors rule.
+# The current implementation uses the Euclidean distance for finding the neighbors.
+#
+# @example
+# require 'rumale/nearest_neighbors/k_neighbors_regressor'
+#
+# estimator =
+# Rumale::NearestNeighbors::KNeighborsRegressor.new(n_neighbors: 5)
+# estimator.fit(training_samples, traininig_target_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_regressor.rb#21
+class Rumale::NearestNeighbors::KNeighborsRegressor < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with the nearest neighbor rule.
+ #
+ # @param n_neighbors [Integer] The number of neighbors.
+ # @param metric [String] The metric to calculate the distances.
+ # If metric is 'euclidean', Euclidean distance is calculated for distance between points.
+ # If metric is 'precomputed', the fit and predict methods expect to be given a distance matrix.
+ # @return [KNeighborsRegressor] a new instance of KNeighborsRegressor
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_regressor.rb#40
+ def initialize(n_neighbors: T.unsafe(nil), metric: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_training_samples, n_features]) The training data to be used for fitting the model.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_training_samples, n_training_samples]).
+ # @param y [Numo::DFloat] (shape: [n_training_samples, n_outputs]) The target values to be used for fitting the model.
+ # @return [KNeighborsRegressor] The learned regressor itself.
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_regressor.rb#54
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_features]) The samples to predict the values.
+ # If the metric is 'precomputed', x must be a square distance matrix (shape: [n_testing_samples, n_training_samples]).
+ # @return [Numo::DFloat] (shape: [n_testing_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_regressor.rb#72
+ def predict(x); end
+
+ # Return the prototypes for the nearest neighbor regressor.
+ # If the metric is 'precomputed', that returns nil.
+ # If the algorithm is 'vptree', that returns Rumale::NearestNeighbors::VPTree.
+ #
+ # @return [Numo::DFloat] (shape: [n_training_samples, n_features])
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_regressor.rb#28
+ def prototypes; end
+
+ # Return the values of the prototypes
+ #
+ # @return [Numo::DFloat] (shape: [n_training_samples, n_outputs])
+ #
+ # source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/k_neighbors_regressor.rb#32
+ def values; end
+end
+
+# source://rumale-nearest_neighbors//lib/rumale/nearest_neighbors/version.rb#8
+Rumale::NearestNeighbors::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-neural_network@1.0.0.rbi b/sorbet/rbi/gems/rumale-neural_network@1.0.0.rbi
new file mode 100644
index 00000000..fe943b47
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-neural_network@1.0.0.rbi
@@ -0,0 +1,762 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-neural_network` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-neural_network`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/version.rb#4
+module Rumale; end
+
+# This module consists of the modules and classes for implementation multi-layer perceptron estimator.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/version.rb#6
+module Rumale::NeuralNetwork; end
+
+# BaseMLP is an abstract class for implementation of multi-layer peceptron estimator.
+# This class is used internally.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#216
+class Rumale::NeuralNetwork::BaseMLP < ::Rumale::Base::Estimator
+ # Create a multi-layer perceptron estimator.
+ #
+ # @param hidden_units [Array] The number of units in the i-th hidden layer.
+ # @param dropout_rate [Float] The rate of the units to drop.
+ # @param learning_rate [Float] The initial value of learning rate in Adam optimizer.
+ # @param decay1 [Float] The smoothing parameter for the first moment in Adam optimizer.
+ # @param decay2 [Float] The smoothing parameter for the second moment in Adam optimizer.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param batch_size [Intger] The size of the mini batches.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [BaseMLP] a new instance of BaseMLP
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#230
+ def initialize(hidden_units: T.unsafe(nil), dropout_rate: T.unsafe(nil), learning_rate: T.unsafe(nil), decay1: T.unsafe(nil), decay2: T.unsafe(nil), max_iter: T.unsafe(nil), batch_size: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ private
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#250
+ def buld_network(n_inputs, n_outputs, srng = T.unsafe(nil)); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#265
+ def train(x, y, network, loss_func, srng = T.unsafe(nil)); end
+end
+
+# BaseRBF is an abstract class for implementation of radial basis function (RBF) network estimator.
+# This class is used internally.
+#
+# *Reference*
+# - Bugmann, G., "Normalized Gaussian Radial Basis Function networks," Neural Computation, vol. 20, pp. 97--110, 1998.
+# - Que, Q., and Belkin, M., "Back to the Future: Radial Basis Function Networks Revisited," Proc. of AISTATS'16, pp. 1375--1383, 2016.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_rbf.rb#14
+class Rumale::NeuralNetwork::BaseRBF < ::Rumale::Base::Estimator
+ # Create a radial basis function network estimator.
+ #
+ # @param hidden_units [Array] The number of units in the hidden layer.
+ # @param gamma [Float] The parameter for the radial basis function, if nil it is 1 / n_features.
+ # @param reg_param [Float] The regularization parameter.
+ # @param normalize [Boolean] The flag indicating whether to normalize the hidden layer output or not.
+ # @param max_iter [Integer] The maximum number of iterations for finding centers.
+ # @param tol [Float] The tolerance of termination criterion for finding centers.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [BaseRBF] a new instance of BaseRBF
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rbf.rb#24
+ def initialize(hidden_units: T.unsafe(nil), gamma: T.unsafe(nil), reg_param: T.unsafe(nil), normalize: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ private
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rbf.rb#83
+ def assign_centers(x); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rbf.rb#63
+ def find_centers(x); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rbf.rb#57
+ def hidden_output(x); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rbf.rb#88
+ def n_centers; end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rbf.rb#41
+ def partial_fit(x, y); end
+end
+
+# BaseRVFL is an abstract class for implementation of random vector functional link (RVFL) network.
+# This class is used internally.
+#
+# *Reference*
+# - Malik, A. K., Gao, R., Ganaie, M. A., Tanveer, M., and Suganthan, P. N., "Random vector functional link network: recent developments, applications, and future directions," Applied Soft Computing, vol. 143, 2023.
+# - Zhang, L., and Suganthan, P. N., "A comprehensive evaluation of random vector functional link networks," Information Sciences, vol. 367--368, pp. 1094--1105, 2016.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_rvfl.rb#14
+class Rumale::NeuralNetwork::BaseRVFL < ::Rumale::Base::Estimator
+ # Create a random vector functional link network estimator.
+ #
+ # @param hidden_units [Array] The number of units in the hidden layer.
+ # @param reg_param [Float] The regularization parameter.
+ # @param scale [Float] The scale parameter for random weight and bias.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [BaseRVFL] a new instance of BaseRVFL
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rvfl.rb#21
+ def initialize(hidden_units: T.unsafe(nil), reg_param: T.unsafe(nil), scale: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ private
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rvfl.rb#48
+ def hidden_output(x); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_rvfl.rb#34
+ def partial_fit(x, y); end
+end
+
+# This module consists of the classes that implement layer functions of neural network.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#10
+module Rumale::NeuralNetwork::Layer; end
+
+# Affine is a class that calculates the linear transform.
+# This class is used internally.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#14
+class Rumale::NeuralNetwork::Layer::Affine
+ # @return [Affine] a new instance of Affine
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#16
+ def initialize(n_inputs: T.unsafe(nil), n_outputs: T.unsafe(nil), optimizer: T.unsafe(nil), rng: T.unsafe(nil)); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#24
+ def forward(x); end
+end
+
+# Dropout is a class that performs dropout regularization.
+# This class is used internally.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#45
+class Rumale::NeuralNetwork::Layer::Dropout
+ # @return [Dropout] a new instance of Dropout
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#47
+ def initialize(rate: T.unsafe(nil), rng: T.unsafe(nil)); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#53
+ def forward(x); end
+end
+
+# ReLU is a class that calculates rectified linear function.
+# This class is used internally.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#68
+class Rumale::NeuralNetwork::Layer::Relu
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#70
+ def forward(x); end
+end
+
+# This module consists of the classes that implement loss function for neural network.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#83
+module Rumale::NeuralNetwork::Loss; end
+
+# MeanSquaredError is a class that calculates mean squared error for regression task.
+# This class is used internally.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#87
+class Rumale::NeuralNetwork::Loss::MeanSquaredError
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#89
+ def call(out, y); end
+end
+
+# SoftmaxCrossEntropy is a class that calculates softmax cross-entropy for classification task.
+# This class is used internally.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#101
+class Rumale::NeuralNetwork::Loss::SoftmaxCrossEntropy
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#103
+ def call(out, y); end
+
+ private
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#113
+ def softmax(x); end
+end
+
+# MLPClassifier is a class that implements classifier based on multi-layer perceptron.
+# MLPClassifier use ReLu as the activation function and Adam as the optimization method
+# and softmax cross entropy as the loss function.
+#
+# @example
+# require 'rumale/neural_network/mlp_classifier'
+#
+# estimator = Rumale::NeuralNetwork::MLPClassifier.new(hidden_units: [100, 100], dropout_rate: 0.3)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#20
+class Rumale::NeuralNetwork::MLPClassifier < ::Rumale::NeuralNetwork::BaseMLP
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with multi-layer preceptron.
+ #
+ # @param hidden_units [Array] The number of units in the i-th hidden layer.
+ # @param dropout_rate [Float] The rate of the units to drop.
+ # @param learning_rate [Float] The initial value of learning rate in Adam optimizer.
+ # @param decay1 [Float] The smoothing parameter for the first moment in Adam optimizer.
+ # @param decay2 [Float] The smoothing parameter for the second moment in Adam optimizer.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param batch_size [Intger] The size of the mini batches.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [MLPClassifier] a new instance of MLPClassifier
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#52
+ def initialize(hidden_units: T.unsafe(nil), dropout_rate: T.unsafe(nil), learning_rate: T.unsafe(nil), decay1: T.unsafe(nil), decay2: T.unsafe(nil), max_iter: T.unsafe(nil), batch_size: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#29
+ def classes; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [MLPClassifier] The learned classifier itself.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#62
+ def fit(x, y); end
+
+ # Return the number of iterations run for optimization
+ #
+ # @return [Integer]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#33
+ def n_iter; end
+
+ # Return the network.
+ #
+ # @return [Rumale::NeuralNetwork::Model::Sequential]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#25
+ def network; end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#84
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#97
+ def predict_proba(x); end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#37
+ def rng; end
+
+ private
+
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#106
+ def one_hot_encode(y); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_classifier.rb#110
+ def softmax(x); end
+end
+
+# MLPRegressor is a class that implements regressor based on multi-layer perceptron.
+# MLPRegressor use ReLu as the activation function and Adam as the optimization method
+# and mean squared error as the loss function.
+#
+# @example
+# require 'rumale/neural_network/mlp_regressor'
+#
+# estimator = Rumale::NeuralNetwork::MLPRegressor.new(hidden_units: [100, 100], dropout_rate: 0.3)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-neural_network//lib/rumale/neural_network/mlp_regressor.rb#19
+class Rumale::NeuralNetwork::MLPRegressor < ::Rumale::NeuralNetwork::BaseMLP
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with multi-layer perceptron.
+ #
+ # @param hidden_units [Array] The number of units in the i-th hidden layer.
+ # @param dropout_rate [Float] The rate of the units to drop.
+ # @param learning_rate [Float] The initial value of learning rate in Adam optimizer.
+ # @param decay1 [Float] The smoothing parameter for the first moment in Adam optimizer.
+ # @param decay2 [Float] The smoothing parameter for the second moment in Adam optimizer.
+ # @param max_iter [Integer] The maximum number of epochs that indicates
+ # how many times the whole data is given to the training process.
+ # @param batch_size [Intger] The size of the mini batches.
+ # @param tol [Float] The tolerance of loss for terminating optimization.
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [MLPRegressor] a new instance of MLPRegressor
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_regressor.rb#47
+ def initialize(hidden_units: T.unsafe(nil), dropout_rate: T.unsafe(nil), learning_rate: T.unsafe(nil), decay1: T.unsafe(nil), decay2: T.unsafe(nil), max_iter: T.unsafe(nil), batch_size: T.unsafe(nil), tol: T.unsafe(nil), verbose: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The taget values to be used for fitting the model.
+ # @return [MLPRegressor] The learned regressor itself.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_regressor.rb#57
+ def fit(x, y); end
+
+ # Return the number of iterations run for optimization
+ #
+ # @return [Integer]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_regressor.rb#28
+ def n_iter; end
+
+ # Return the network.
+ #
+ # @return [Rumale::NeuralNetwork::Model::Sequential]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_regressor.rb#24
+ def network; end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_regressor.rb#79
+ def predict(x); end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/mlp_regressor.rb#32
+ def rng; end
+end
+
+# This module consists of the classes for implementing neural network model.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#123
+module Rumale::NeuralNetwork::Model
+ # Returns the value of attribute layers.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#125
+ def layers; end
+end
+
+# Sequential is a class that implements linear stack model.
+# This class is used internally.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#130
+class Rumale::NeuralNetwork::Model::Sequential
+ # @return [Sequential] a new instance of Sequential
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#132
+ def initialize; end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#143
+ def delete_dropout; end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#149
+ def forward(x); end
+
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#137
+ def push(ops); end
+end
+
+# This module consists of the classes that implement optimizers adaptively tuning learning rate.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#170
+module Rumale::NeuralNetwork::Optimizer; end
+
+# Adam is a class that implements Adam optimizer.
+#
+# *Reference*
+# - Kingma, D P., and Ba, J., "Adam: A Method for Stochastic Optimization," Proc. ICLR'15, 2015.
+#
+# source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#176
+class Rumale::NeuralNetwork::Optimizer::Adam
+ # Create a new optimizer with Adam
+ #
+ # @param learning_rate [Float] The initial value of learning rate.
+ # @param decay1 [Float] The smoothing parameter for the first moment.
+ # @param decay2 [Float] The smoothing parameter for the second moment.
+ # @return [Adam] a new instance of Adam
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#183
+ def initialize(learning_rate: T.unsafe(nil), decay1: T.unsafe(nil), decay2: T.unsafe(nil)); end
+
+ # Calculate the updated weight with Adam adaptive learning rate.
+ #
+ # @param weight [Numo::DFloat] (shape: [n_features]) The weight to be updated.
+ # @param gradient [Numo::DFloat] (shape: [n_features]) The gradient for updating the weight.
+ # @return [Numo::DFloat] (shape: [n_feautres]) The updated weight.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/base_mlp.rb#198
+ def call(weight, gradient); end
+end
+
+# RBFClassifier is a class that implements classifier based on (k-means) radial basis function (RBF) networks.
+#
+# *Reference*
+# - Bugmann, G., "Normalized Gaussian Radial Basis Function networks," Neural Computation, vol. 20, pp. 97--110, 1998.
+# - Que, Q., and Belkin, M., "Back to the Future: Radial Basis Function Networks Revisited," Proc. of AISTATS'16, pp. 1375--1383, 2016.
+#
+# @example
+# require 'numo/tiny_linalg'
+# Numo::Linalg = Numo::TinyLinalg
+#
+# require 'rumale/neural_network/rbf_classifier'
+#
+# estimator = Rumale::NeuralNetwork::RBFClassifier.new(hidden_units: 128, reg_param: 100.0)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#25
+class Rumale::NeuralNetwork::RBFClassifier < ::Rumale::NeuralNetwork::BaseRBF
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with (k-means) RBF networks.
+ #
+ # @param hidden_units [Array] The number of units in the hidden layer.
+ # @param gamma [Float] The parameter for the radial basis function, if nil it is 1 / n_features.
+ # @param reg_param [Float] The regularization parameter.
+ # @param normalize [Boolean] The flag indicating whether to normalize the hidden layer output or not.
+ # @param max_iter [Integer] The maximum number of iterations for finding centers.
+ # @param tol [Float] The tolerance of termination criterion for finding centers.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [RBFClassifier] a new instance of RBFClassifier
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#53
+ def initialize(hidden_units: T.unsafe(nil), gamma: T.unsafe(nil), reg_param: T.unsafe(nil), normalize: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the centers in the hidden layer of RBF network.
+ #
+ # @return [Numo::DFloat] (shape: [n_centers, n_features])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#34
+ def centers; end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#30
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#80
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [RBFClassifier] The learned classifier itself.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#63
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#91
+ def predict(x); end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#42
+ def rng; end
+
+ # Return the weight vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_centers, n_classes])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#38
+ def weight_vec; end
+
+ private
+
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_classifier.rb#102
+ def one_hot_encode(y); end
+end
+
+# RBFRegressor is a class that implements regressor based on (k-means) radial basis function (RBF) networks.
+#
+# *Reference*
+# - Bugmann, G., "Normalized Gaussian Radial Basis Function networks," Neural Computation, vol. 20, pp. 97--110, 1998.
+# - Que, Q., and Belkin, M., "Back to the Future: Radial Basis Function Networks Revisited," Proc. of AISTATS'16, pp. 1375--1383, 2016.
+#
+# @example
+# require 'numo/tiny_linalg'
+# Numo::Linalg = Numo::TinyLinalg
+#
+# require 'rumale/neural_network/rbf_regressor'
+#
+# estimator = Rumale::NeuralNetwork::RBFRegressor.new(hidden_units: 128, reg_param: 100.0)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-neural_network//lib/rumale/neural_network/rbf_regressor.rb#24
+class Rumale::NeuralNetwork::RBFRegressor < ::Rumale::NeuralNetwork::BaseRBF
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with (k-means) RBF networks.
+ #
+ # @param hidden_units [Array] The number of units in the hidden layer.
+ # @param gamma [Float] The parameter for the radial basis function, if nil it is 1 / n_features.
+ # @param reg_param [Float] The regularization parameter.
+ # @param normalize [Boolean] The flag indicating whether to normalize the hidden layer output or not.
+ # @param max_iter [Integer] The maximum number of iterations for finding centers.
+ # @param tol [Float] The tolerance of termination criterion for finding centers.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [RBFRegressor] a new instance of RBFRegressor
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_regressor.rb#48
+ def initialize(hidden_units: T.unsafe(nil), gamma: T.unsafe(nil), reg_param: T.unsafe(nil), normalize: T.unsafe(nil), max_iter: T.unsafe(nil), tol: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the centers in the hidden layer of RBF network.
+ #
+ # @return [Numo::DFloat] (shape: [n_centers, n_features])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_regressor.rb#29
+ def centers; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The taget values to be used for fitting the model.
+ # @return [MLPRegressor] The learned regressor itself.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_regressor.rb#58
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_regressor.rb#75
+ def predict(x); end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_regressor.rb#37
+ def rng; end
+
+ # Return the weight vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_centers, n_outputs])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rbf_regressor.rb#33
+ def weight_vec; end
+end
+
+# RVFLClassifier is a class that implements classifier based on random vector functional link (RVFL) network.
+# The current implementation uses sigmoid function as activation function.
+#
+# *Reference*
+# - Malik, A. K., Gao, R., Ganaie, M. A., Tanveer, M., and Suganthan, P. N., "Random vector functional link network: recent developments, applications, and future directions," Applied Soft Computing, vol. 143, 2023.
+# - Zhang, L., and Suganthan, P. N., "A comprehensive evaluation of random vector functional link networks," Information Sciences, vol. 367--368, pp. 1094--1105, 2016.
+#
+# @example
+# require 'numo/tiny_linalg'
+# Numo::Linalg = Numo::TinyLinalg
+#
+# require 'rumale/neural_network/rvfl_classifier'
+#
+# estimator = Rumale::NeuralNetwork::RVFLClassifier.new(hidden_units: 128, reg_param: 100.0)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#26
+class Rumale::NeuralNetwork::RVFLClassifier < ::Rumale::NeuralNetwork::BaseRVFL
+ include ::Rumale::Base::Classifier
+
+ # Create a new classifier with RVFL network.
+ #
+ # @param hidden_units [Array] The number of units in the hidden layer.
+ # @param reg_param [Float] The regularization parameter.
+ # @param scale [Float] The scale parameter for random weight and bias.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [RVFLClassifier] a new instance of RVFLClassifier
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#55
+ def initialize(hidden_units: T.unsafe(nil), reg_param: T.unsafe(nil), scale: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#31
+ def classes; end
+
+ # Calculate confidence scores for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Confidence score per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#81
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [RVFLClassifier] The learned classifier itself.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#64
+ def fit(x, y); end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#92
+ def predict(x); end
+
+ # Return the bias vector in the hidden layer of RVFL network.
+ #
+ # @return [Numo::DFloat] (shape: [n_hidden_units])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#39
+ def random_bias; end
+
+ # Return the weight vector in the hidden layer of RVFL network.
+ #
+ # @return [Numo::DFloat] (shape: [n_hidden_units, n_features])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#35
+ def random_weight_vec; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#47
+ def rng; end
+
+ # Return the weight vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_features + n_hidden_units, n_classes])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#43
+ def weight_vec; end
+
+ private
+
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_classifier.rb#103
+ def one_hot_encode(y); end
+end
+
+# RVFLRegressor is a class that implements regressor based on random vector functional link (RVFL) network.
+# The current implementation uses sigmoid function as activation function.
+#
+# *Reference*
+# - Malik, A. K., Gao, R., Ganaie, M. A., Tanveer, M., and Suganthan, P. N., "Random vector functional link network: recent developments, applications, and future directions," Applied Soft Computing, vol. 143, 2023.
+# - Zhang, L., and Suganthan, P. N., "A comprehensive evaluation of random vector functional link networks," Information Sciences, vol. 367--368, pp. 1094--1105, 2016.
+#
+# @example
+# require 'numo/tiny_linalg'
+# Numo::Linalg = Numo::TinyLinalg
+#
+# require 'rumale/neural_network/rvfl_regressor'
+#
+# estimator = Rumale::NeuralNetwork::RVFLRegressor.new(hidden_units: 128, reg_param: 100.0)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-neural_network//lib/rumale/neural_network/rvfl_regressor.rb#25
+class Rumale::NeuralNetwork::RVFLRegressor < ::Rumale::NeuralNetwork::BaseRVFL
+ include ::Rumale::Base::Regressor
+
+ # Create a new regressor with RVFL network.
+ #
+ # @param hidden_units [Array] The number of units in the hidden layer.
+ # @param reg_param [Float] The regularization parameter.
+ # @param scale [Float] The scale parameter for random weight and bias.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # @return [RVFLRegressor] a new instance of RVFLRegressor
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_regressor.rb#50
+ def initialize(hidden_units: T.unsafe(nil), reg_param: T.unsafe(nil), scale: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The taget values to be used for fitting the model.
+ # @return [RVFLRegressor] The learned regressor itself.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_regressor.rb#59
+ def fit(x, y); end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) The predicted values per sample.
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_regressor.rb#76
+ def predict(x); end
+
+ # Return the bias vector in the hidden layer of RVFL network.
+ #
+ # @return [Numo::DFloat] (shape: [n_hidden_units])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_regressor.rb#34
+ def random_bias; end
+
+ # Return the weight vector in the hidden layer of RVFL network.
+ #
+ # @return [Numo::DFloat] (shape: [n_hidden_units, n_features])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_regressor.rb#30
+ def random_weight_vec; end
+
+ # Return the random generator.
+ #
+ # @return [Random]
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_regressor.rb#42
+ def rng; end
+
+ # Return the weight vector.
+ #
+ # @return [Numo::DFloat] (shape: [n_features + n_hidden_units, n_outputs])
+ #
+ # source://rumale-neural_network//lib/rumale/neural_network/rvfl_regressor.rb#38
+ def weight_vec; end
+end
+
+# source://rumale-neural_network//lib/rumale/neural_network/version.rb#8
+Rumale::NeuralNetwork::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-pipeline@1.0.0.rbi b/sorbet/rbi/gems/rumale-pipeline@1.0.0.rbi
new file mode 100644
index 00000000..a68471dc
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-pipeline@1.0.0.rbi
@@ -0,0 +1,209 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-pipeline` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-pipeline`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-pipeline//lib/rumale/pipeline/feature_union.rb#5
+module Rumale; end
+
+# Module implements utilities of pipeline that cosists of a chain of transfomers and estimators.
+#
+# source://rumale-pipeline//lib/rumale/pipeline/feature_union.rb#6
+module Rumale::Pipeline; end
+
+# FeatureUnion is a class that implements the function concatenating the multi-transformer results.
+#
+# @example
+# require 'rumale/kernel_approximation/rbf'
+# require 'rumale/decomposition/pca'
+# require 'rumale/pipeline/feature_union'
+#
+# fu = Rumale::Pipeline::FeatureUnion.new(
+# transformers: {
+# 'rbf': Rumale::KernelApproximation::RBF.new(gamma: 1.0, n_components: 96, random_seed: 1),
+# 'pca': Rumale::Decomposition::PCA.new(n_components: 32)
+# }
+# )
+# fu.fit(training_samples, traininig_labels)
+# results = fu.predict(testing_samples)
+#
+# # > p results.shape[1]
+# # > 128
+#
+# source://rumale-pipeline//lib/rumale/pipeline/feature_union.rb#26
+class Rumale::Pipeline::FeatureUnion < ::Rumale::Base::Estimator
+ # Create a new feature union.
+ #
+ # @param transformers [Hash] List of transformers. The order of transforms follows the insertion order of hash keys.
+ # @return [FeatureUnion] a new instance of FeatureUnion
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/feature_union.rb#34
+ def initialize(transformers:); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the transformers.
+ # @param y [Numo::NArray/Nil] (shape: [n_samples, n_outputs]) The target values or labels to be used for fitting the transformers.
+ # @return [FeatureUnion] The learned feature union itself.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/feature_union.rb#45
+ def fit(x, y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the transformers.
+ # @param y [Numo::NArray/Nil] (shape: [n_samples, n_outputs]) The target values or labels to be used for fitting the transformers.
+ # @return [Numo::DFloat] (shape: [n_samples, sum_n_components]) The transformed and concatenated data.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/feature_union.rb#55
+ def fit_transform(x, y = T.unsafe(nil)); end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned transformers.
+ # @return [Numo::DFloat] (shape: [n_samples, sum_n_components]) The transformed and concatenated data.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/feature_union.rb#63
+ def transform(x); end
+
+ # Return the transformers
+ #
+ # @return [Hash]
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/feature_union.rb#29
+ def transformers; end
+end
+
+# Pipeline is a class that implements the function to perform the transformers and estimators sequencially.
+#
+# @example
+# require 'rumale/kernel_approximation/rbf'
+# require 'rumale/linear_model/svc'
+# require 'rumale/pipeline/pipeline'
+#
+# rbf = Rumale::KernelApproximation::RBF.new(gamma: 1.0, n_components: 128, random_seed: 1)
+# svc = Rumale::LinearModel::SVC.new(reg_param: 1.0, fit_bias: true, max_iter: 5000)
+# pipeline = Rumale::Pipeline::Pipeline.new(steps: { trs: rbf, est: svc })
+# pipeline.fit(training_samples, traininig_labels)
+# results = pipeline.predict(testing_samples)
+#
+# source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#21
+class Rumale::Pipeline::Pipeline < ::Rumale::Base::Estimator
+ # Create a new pipeline.
+ #
+ # @param steps [Hash] List of transformers and estimators. The order of transforms follows the insertion order of hash keys.
+ # The last entry is considered an estimator.
+ # @return [Pipeline] a new instance of Pipeline
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#30
+ def initialize(steps:); end
+
+ # Call the decision_function method of last estimator after applying all transforms.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
+ # @return [Numo::DFloat] (shape: [n_samples]) Confidence score per sample.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#72
+ def decision_function(x); end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be transformed and used for fitting the model.
+ # @param y [Numo::NArray] (shape: [n_samples, n_outputs]) The target values or labels to be used for fitting the model.
+ # @return [Pipeline] The learned pipeline itself.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#42
+ def fit(x, y); end
+
+ # Call the fit_predict method of last estimator after applying all transforms.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be transformed and used for fitting the model.
+ # @param y [Numo::NArray] (shape: [n_samples, n_outputs], default: nil) The target values or labels to be used for fitting the model.
+ # @return [Numo::NArray] The predicted results by last estimator.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#53
+ def fit_predict(x, y = T.unsafe(nil)); end
+
+ # Call the fit_transform method of last estimator after applying all transforms.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be transformed and used for fitting the model.
+ # @param y [Numo::NArray] (shape: [n_samples, n_outputs], default: nil) The target values or labels to be used for fitting the model.
+ # @return [Numo::NArray] The predicted results by last estimator.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#63
+ def fit_transform(x, y = T.unsafe(nil)); end
+
+ # Call the inverse_transform method in reverse order.
+ #
+ # @param z [Numo::DFloat] (shape: [n_samples, n_components]) The transformed samples to be restored into original space.
+ # @return [Numo::DFloat] (shape: [n_samples, n_featuress]) The restored samples.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#117
+ def inverse_transform(z); end
+
+ # Call the predict method of last estimator after applying all transforms.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to obtain prediction result.
+ # @return [Numo::NArray] The predicted results by last estimator.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#81
+ def predict(x); end
+
+ # Call the predict_log_proba method of last estimator after applying all transforms.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the log-probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted log-probability of each class per sample.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#90
+ def predict_log_proba(x); end
+
+ # Call the predict_proba method of last estimator after applying all transforms.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#99
+ def predict_proba(x); end
+
+ # Call the score method of last estimator after applying all transforms.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) Testing data.
+ # @param y [Numo::NArray] (shape: [n_samples, n_outputs]) True target values or labels for testing data.
+ # @return [Float] The score of last estimator
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#133
+ def score(x, y); end
+
+ # Return the steps.
+ #
+ # @return [Hash]
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#24
+ def steps; end
+
+ # Call the transform method of last estimator after applying all transforms.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be transformed.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed samples.
+ #
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#108
+ def transform(x); end
+
+ private
+
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#158
+ def apply_transforms(x, y = T.unsafe(nil), fit: T.unsafe(nil)); end
+
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#170
+ def last_estimator; end
+
+ # source://rumale-pipeline//lib/rumale/pipeline/pipeline.rb#140
+ def validate_steps(steps); end
+end
+
+# source://rumale-pipeline//lib/rumale/pipeline/version.rb#8
+Rumale::Pipeline::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-preprocessing@1.0.0.rbi b/sorbet/rbi/gems/rumale-preprocessing@1.0.0.rbi
new file mode 100644
index 00000000..4b088973
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-preprocessing@1.0.0.rbi
@@ -0,0 +1,923 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-preprocessing` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-preprocessing`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#6
+module Rumale; end
+
+# This module consists of the classes that perform preprocessings.
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#7
+module Rumale::Preprocessing; end
+
+# Discretizes features with a given number of bins.
+# In some cases, discretizing features may accelerate decision tree training.
+#
+# @example
+# require 'rumale/preprocessing/bin_discretizer'
+#
+# discretizer = Rumale::Preprocessing::BinDiscretizer.new(n_bins: 4)
+# samples = Numo::DFloat.new(5, 2).rand - 0.5
+# transformed = discretizer.fit_transform(samples)
+# # > pp samples
+# # Numo::DFloat#shape=[5,2]
+# # [[-0.438246, -0.126933],
+# # [ 0.294815, -0.298958],
+# # [-0.383959, -0.155968],
+# # [ 0.039948, 0.237815],
+# # [-0.334911, -0.449117]]
+# # > pp transformed
+# # Numo::DFloat#shape=[5,2]
+# # [[0, 1],
+# # [3, 0],
+# # [0, 1],
+# # [2, 3],
+# # [0, 0]]
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/bin_discretizer.rb#32
+class Rumale::Preprocessing::BinDiscretizer < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new discretizer for features with given number of bins.
+ #
+ # @param n_bins [Integer] The number of bins to be used disretizing feature values.
+ # @return [BinDiscretizer] a new instance of BinDiscretizer
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/bin_discretizer.rb#42
+ def initialize(n_bins: T.unsafe(nil)); end
+
+ # Return the feature steps to be used discretizing.
+ #
+ # @return [Array] (shape: [n_features, n_bins])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/bin_discretizer.rb#37
+ def feature_steps; end
+
+ # Fit feature ranges to be discretized.
+ #
+ # @overload fit
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate the feature ranges.
+ # @return [BinDiscretizer]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/bin_discretizer.rb#53
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit feature ranges to be discretized, then return discretized samples.
+ #
+ # @overload fit_transform
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be discretized.
+ # @return [Numo::DFloat] The discretized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/bin_discretizer.rb#71
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Peform discretizing the given samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be discretized.
+ # @return [Numo::DFloat] The discretized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/bin_discretizer.rb#81
+ def transform(x); end
+end
+
+# Binarize samples according to a threshold
+#
+# @example
+# require 'rumale/preprocessing/binarizer'
+#
+# binarizer = Rumale::Preprocessing::Binarizer.new
+# x = Numo::DFloat[[-1.2, 3.2], [2.4, -0.5], [4.5, 0.8]]
+# b = binarizer.transform(x)
+# p b
+#
+# # Numo::DFloat#shape=[3, 2]
+# # [[0, 1],
+# # [1, 0],
+# # [1, 1]]
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/binarizer.rb#23
+class Rumale::Preprocessing::Binarizer < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer for binarization.
+ #
+ # @param threshold [Float] The threshold value for binarization.
+ # @return [Binarizer] a new instance of Binarizer
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/binarizer.rb#28
+ def initialize(threshold: T.unsafe(nil)); end
+
+ # This method does nothing and returns the object itself.
+ # For compatibility with other transformer, this method exists.
+ #
+ # @overload fit
+ # @return [Binarizer]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/binarizer.rb#39
+ def fit(_x = T.unsafe(nil), _y = T.unsafe(nil)); end
+
+ # The output of this method is the same as that of the transform method.
+ # For compatibility with other transformer, this method exists.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be binarized.
+ # @return [Numo::DFloat] The binarized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/binarizer.rb#58
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Binarize each sample.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be binarized.
+ # @return [Numo::DFloat] The binarized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/binarizer.rb#47
+ def transform(x); end
+end
+
+# KernelCalculator is a class that calculates the kernel matrix with training data.
+#
+# @example
+# require 'rumale/preprocessing/kernel_calculator'
+# require 'rumale/kernel_machine/kernel_ridge'
+# require 'rumale/pipeline/pipeline'
+#
+# transformer = Rumale::Preprocessing::KernelCalculator.new(kernel: 'rbf', gamma: 0.5)
+# regressor = Rumale::KernelMachine::KernelRidge.new
+# pipeline = Rumale::Pipeline::Pipeline.new(
+# steps: { trs: transfomer, est: regressor }
+# )
+# pipeline.fit(x_train, y_train)
+# results = pipeline.predict(x_test)
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/kernel_calculator.rb#24
+class Rumale::Preprocessing::KernelCalculator < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new transformer that transforms feature vectors into a kernel matrix.
+ #
+ # @param kernel [String] The type of kernel function ('rbf', 'linear', 'poly', and 'sigmoid').
+ # @param gamma [Float] The gamma parameter in rbf/poly/sigmoid kernel function.
+ # @param degree [Integer] The degree parameter in polynomial kernel function.
+ # @param coef [Float] The coefficient in poly/sigmoid kernel function.
+ # @return [KernelCalculator] a new instance of KernelCalculator
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/kernel_calculator.rb#37
+ def initialize(kernel: T.unsafe(nil), gamma: T.unsafe(nil), degree: T.unsafe(nil), coef: T.unsafe(nil)); end
+
+ # Returns the training data for calculating kernel matrix.
+ #
+ # @return [Numo::DFloat] (shape: n_components, n_features)
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/kernel_calculator.rb#29
+ def components; end
+
+ # Fit the model with given training data.
+ #
+ # @overload fit
+ # @return [KernelCalculator] The learned transformer itself.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/kernel_calculator.rb#52
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit the model with training data, and then transform them with the learned model.
+ #
+ # @overload fit_transform
+ # @return [Numo::DFloat] (shape: [n_samples, n_samples]) The calculated kernel matrix.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/kernel_calculator.rb#64
+ def fit_transform(x, y = T.unsafe(nil)); end
+
+ # Transform the given data with the learned model.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be used for calculating kernel matrix with the training data.
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The calculated kernel matrix.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/kernel_calculator.rb#74
+ def transform(x); end
+
+ private
+
+ # source://rumale-preprocessing//lib/rumale/preprocessing/kernel_calculator.rb#82
+ def kernel_mat(x, y); end
+end
+
+# Normalize samples to unit L1-norm.
+#
+# @example
+# require 'rumale/preprocessing/l1_normalizer'
+#
+# normalizer = Rumale::Preprocessing::L1Normalizer.new
+# new_samples = normalizer.fit_transform(samples)
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/l1_normalizer.rb#16
+class Rumale::Preprocessing::L1Normalizer < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new normalizer for normaliing to L1-norm.
+ #
+ # @return [L1Normalizer] a new instance of L1Normalizer
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l1_normalizer.rb#24
+ def initialize; end
+
+ # Calculate L1-norms of each sample.
+ #
+ # @overload fit
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate L1-norms.
+ # @return [L1Normalizer]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l1_normalizer.rb#34
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Calculate L1-norms of each sample, and then normalize samples to L1-norm.
+ #
+ # @overload fit_transform
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate L1-norms.
+ # @return [Numo::DFloat] The normalized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l1_normalizer.rb#48
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the vector consists of L1-norm for each sample.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l1_normalizer.rb#21
+ def norm_vec; end
+
+ # Calculate L1-norms of each sample, and then normalize samples to L1-norm.
+ # This method calls the fit_transform method. This method exists for the Pipeline class.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate L1-norms.
+ # @return [Numo::DFloat] The normalized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l1_normalizer.rb#60
+ def transform(x); end
+end
+
+# Normalize samples to unit L2-norm.
+#
+# @example
+# require 'rumale/preprocessing/l2_normalizer'
+#
+# normalizer = Rumale::Preprocessing::L2Normalizer.new
+# new_samples = normalizer.fit_transform(samples)
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/l2_normalizer.rb#17
+class Rumale::Preprocessing::L2Normalizer < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new normalizer for normaliing to unit L2-norm.
+ #
+ # @return [L2Normalizer] a new instance of L2Normalizer
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l2_normalizer.rb#25
+ def initialize; end
+
+ # Calculate L2-norms of each sample.
+ #
+ # @overload fit
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate L2-norms.
+ # @return [L2Normalizer]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l2_normalizer.rb#35
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Calculate L2-norms of each sample, and then normalize samples to unit L2-norm.
+ #
+ # @overload fit_transform
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate L2-norms.
+ # @return [Numo::DFloat] The normalized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l2_normalizer.rb#49
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the vector consists of L2-norm for each sample.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l2_normalizer.rb#22
+ def norm_vec; end
+
+ # Calculate L2-norms of each sample, and then normalize samples to unit L2-norm.
+ # This method calls the fit_transform method. This method exists for the Pipeline class.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate L2-norms.
+ # @return [Numo::DFloat] The normalized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/l2_normalizer.rb#61
+ def transform(x); end
+end
+
+# Encode labels to binary labels with one-vs-all scheme.
+#
+# @example
+# require 'rumale/preprocessing/label_binarizer'
+#
+# encoder = Rumale::Preprocessing::LabelBinarizer.new
+# label = [0, -1, 3, 3, 1, 1]
+# p encoder.fit_transform(label)
+# # Numo::Int32#shape=[6,4]
+# # [[0, 1, 0, 0],
+# # [1, 0, 0, 0],
+# # [0, 0, 0, 1],
+# # [0, 0, 0, 1],
+# # [0, 0, 1, 0],
+# # [0, 0, 1, 0]]
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/label_binarizer.rb#23
+class Rumale::Preprocessing::LabelBinarizer < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new encoder for binarizing labels with one-vs-all scheme.
+ #
+ # @param neg_label [Integer] The value represents negative label.
+ # @param pos_label [Integer] The value represents positive label.
+ # @return [LabelBinarizer] a new instance of LabelBinarizer
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_binarizer.rb#34
+ def initialize(neg_label: T.unsafe(nil), pos_label: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Array] (size: [n_classes])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_binarizer.rb#28
+ def classes; end
+
+ # Fit encoder to labels.
+ #
+ # @overload fit
+ # @return [LabelBinarizer]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_binarizer.rb#47
+ def fit(y, _not_used = T.unsafe(nil)); end
+
+ # Fit encoder to labels, then return binarized labels.
+ #
+ # @overload fit_transform
+ # @return [Numo::Int32] (shape: [n_samples, n_classes]) The binarized labels.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_binarizer.rb#58
+ def fit_transform(y, _not_used = T.unsafe(nil)); end
+
+ # Decode binarized labels.
+ #
+ # @param x [Numo::Int32] (shape: [n_samples, n_classes]) The binarized labels to be decoded.
+ # @return [Array] (shape: [n_samples]) The decoded labels.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_binarizer.rb#80
+ def inverse_transform(x); end
+
+ # Encode labels.
+ #
+ # @param y [Array] (shape: [n_samples]) The labels to be encoded.
+ # @return [Numo::Int32] (shape: [n_samples, n_classes]) The binarized labels.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_binarizer.rb#67
+ def transform(y); end
+end
+
+# Encode labels to values between 0 and n_classes - 1.
+#
+# @example
+# require 'rumale/preprocessing/label_encoder'
+#
+# encoder = Rumale::Preprocessing::LabelEncoder.new
+# labels = Numo::Int32[1, 8, 8, 15, 0]
+# encoded_labels = encoder.fit_transform(labels)
+# # > pp encoded_labels
+# # Numo::Int32#shape=[5]
+# # [1, 2, 2, 3, 0]
+# decoded_labels = encoder.inverse_transform(encoded_labels)
+# # > pp decoded_labels
+# # [1, 8, 8, 15, 0]
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#22
+class Rumale::Preprocessing::LabelEncoder < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new encoder for encoding labels to values between 0 and n_classes - 1.
+ #
+ # @return [LabelEncoder] a new instance of LabelEncoder
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#30
+ def initialize; end
+
+ # Return the class labels.
+ #
+ # @return [Array] (size: [n_classes])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#27
+ def classes; end
+
+ # Fit label-encoder to labels.
+ #
+ # @overload fit
+ # @param x [Array] (shape: [n_samples]) The labels to fit label-encoder.
+ # @return [LabelEncoder]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#40
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit label-encoder to labels, then return encoded labels.
+ #
+ # @overload fit_transform
+ # @param x [Array] (shape: [n_samples]) The labels to fit label-encoder.
+ # @return [Numo::Int32] The encoded labels.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#52
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Decode encoded labels.
+ #
+ # @param x [Numo::Int32] (shape: [n_samples]) The labels to be decoded.
+ # @return [Array] The decoded labels.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#70
+ def inverse_transform(x); end
+
+ # Encode labels.
+ #
+ # @param x [Array] (shape: [n_samples]) The labels to be encoded.
+ # @return [Numo::Int32] The encoded labels.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/label_encoder.rb#61
+ def transform(x); end
+end
+
+# Normalize samples by scaling each feature with its maximum absolute value.
+#
+# @example
+# require 'rumale/preprocessing/max_abs_scaler'
+#
+# normalizer = Rumale::Preprocessing::MaxAbsScaler.new
+# new_training_samples = normalizer.fit_transform(training_samples)
+# new_testing_samples = normalizer.transform(testing_samples)
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/max_abs_scaler.rb#17
+class Rumale::Preprocessing::MaxAbsScaler < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Creates a new normalizer for scaling each feature with its maximum absolute value.
+ #
+ # @return [MaxAbsScaler] a new instance of MaxAbsScaler
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_abs_scaler.rb#25
+ def initialize; end
+
+ # Calculate the minimum and maximum value of each feature for scaling.
+ #
+ # @overload fit
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate maximum absolute value for each feature.
+ # @return [MaxAbsScaler]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_abs_scaler.rb#35
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Calculate the maximum absolute value for each feature, and then normalize samples.
+ #
+ # @overload fit_transform
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate maximum absolute value for each feature.
+ # @return [Numo::DFloat] The scaled samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_abs_scaler.rb#48
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the vector consists of the maximum absolute value for each feature.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_abs_scaler.rb#22
+ def max_abs_vec; end
+
+ # Perform scaling the given samples with maximum absolute value for each feature.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be scaled.
+ # @return [Numo::DFloat] The scaled samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_abs_scaler.rb#58
+ def transform(x); end
+end
+
+# Normalize samples with the maximum of the absolute values.
+#
+# @example
+# require 'rumale/preprocessing/max_normalizer'
+#
+# normalizer = Rumale::Preprocessing::MaxNormalizer.new
+# new_samples = normalizer.fit_transform(samples)
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/max_normalizer.rb#16
+class Rumale::Preprocessing::MaxNormalizer < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new normalizer for normaliing to max-norm.
+ #
+ # @return [MaxNormalizer] a new instance of MaxNormalizer
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_normalizer.rb#24
+ def initialize; end
+
+ # Calculate the maximum norms of each sample.
+ #
+ # @overload fit
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate the maximum norms.
+ # @return [MaxNormalizer]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_normalizer.rb#34
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Calculate the maximums norm of each sample, and then normalize samples with the norms.
+ #
+ # @overload fit_transform
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate maximum norms.
+ # @return [Numo::DFloat] The normalized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_normalizer.rb#48
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the vector consists of the maximum norm for each sample.
+ #
+ # @return [Numo::DFloat] (shape: [n_samples])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_normalizer.rb#21
+ def norm_vec; end
+
+ # Calculate the maximum norms of each sample, and then normalize samples with the norms.
+ # This method calls the fit_transform method. This method exists for the Pipeline class.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate maximum norms.
+ # @return [Numo::DFloat] The normalized samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/max_normalizer.rb#60
+ def transform(x); end
+end
+
+# Normalize samples by scaling each feature to a given range.
+#
+# @example
+# require 'rumale/preprocessing/min_max_scaler'
+#
+# normalizer = Rumale::Preprocessing::MinMaxScaler.new(feature_range: [0.0, 1.0])
+# new_training_samples = normalizer.fit_transform(training_samples)
+# new_testing_samples = normalizer.transform(testing_samples)
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/min_max_scaler.rb#18
+class Rumale::Preprocessing::MinMaxScaler < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Creates a new normalizer for scaling each feature to a given range.
+ #
+ # @param feature_range [Array] The desired range of samples.
+ # @return [MinMaxScaler] a new instance of MinMaxScaler
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/min_max_scaler.rb#32
+ def initialize(feature_range: T.unsafe(nil)); end
+
+ # Calculate the minimum and maximum value of each feature for scaling.
+ #
+ # @overload fit
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate the minimum and maximum values.
+ # @return [MinMaxScaler]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/min_max_scaler.rb#43
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Calculate the minimum and maximum values, and then normalize samples to feature_range.
+ #
+ # @overload fit_transform
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to calculate the minimum and maximum values.
+ # @return [Numo::DFloat] The scaled samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/min_max_scaler.rb#57
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the vector consists of the maximum value for each feature.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/min_max_scaler.rb#27
+ def max_vec; end
+
+ # Return the vector consists of the minimum value for each feature.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/min_max_scaler.rb#23
+ def min_vec; end
+
+ # Perform scaling the given samples according to feature_range.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be scaled.
+ # @return [Numo::DFloat] The scaled samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/min_max_scaler.rb#67
+ def transform(x); end
+end
+
+# Encode categorical integer features to one-hot-vectors.
+#
+# @example
+# require 'rumale/preprocessing/one_hot_encoder'
+#
+# encoder = Rumale::Preprocessing::OneHotEncoder.new
+# labels = Numo::Int32[0, 0, 2, 3, 2, 1]
+# one_hot_vectors = encoder.fit_transform(labels)
+# # > pp one_hot_vectors
+# # Numo::DFloat#shape[6, 4]
+# # [[1, 0, 0, 0],
+# # [1, 0, 0, 0],
+# # [0, 0, 1, 0],
+# # [0, 0, 0, 1],
+# # [0, 0, 1, 0],
+# # [0, 1, 0, 0]]
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#24
+class Rumale::Preprocessing::OneHotEncoder < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new encoder for encoding categorical integer features to one-hot-vectors
+ #
+ # @return [OneHotEncoder] a new instance of OneHotEncoder
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#40
+ def initialize; end
+
+ # Return the indices for feature values that actually occur in the training set.
+ #
+ # @return [Nimo::Int32]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#33
+ def active_features; end
+
+ # Return the indices to feature ranges.
+ #
+ # @return [Numo::Int32] (shape: [n_features + 1])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#37
+ def feature_indices; end
+
+ # Fit one-hot-encoder to samples.
+ #
+ # @overload fit
+ # @raise [ArgumentError]
+ # @return [OneHotEncoder]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#49
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit one-hot-encoder to samples, then encode samples into one-hot-vectors
+ #
+ # @overload fit_transform
+ # @param x [Numo::Int32] (shape: [n_samples, n_features]) The samples to encode into one-hot-vectors.
+ # @raise [ArgumentError]
+ # @return [Numo::DFloat] The one-hot-vectors.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#64
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the maximum values for each feature.
+ #
+ # @return [Numo::Int32] (shape: [n_features])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#29
+ def n_values; end
+
+ # Encode samples into one-hot-vectors.
+ #
+ # @param x [Numo::Int32] (shape: [n_samples, n_features]) The samples to encode into one-hot-vectors.
+ # @raise [ArgumentError]
+ # @return [Numo::DFloat] The one-hot-vectors.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#74
+ def transform(x); end
+
+ private
+
+ # source://rumale-preprocessing//lib/rumale/preprocessing/one_hot_encoder.rb#83
+ def encode(x, indices); end
+end
+
+# Transfrom categorical features to integer values.
+#
+# @example
+# require 'rumale/preprocessing/ordinal_encoder'
+#
+# encoder = Rumale::Preprocessing::OrdinalEncoder.new
+# training_samples = [['left', 10], ['right', 15], ['right', 20]]
+# training_samples = Numo::NArray.asarray(training_samples)
+# encoder.fit(training_samples)
+# p encoder.categories
+# # [["left", "right"], [10, 15, 20]]
+# testing_samples = [['left', 20], ['right', 10]]
+# testing_samples = Numo::NArray.asarray(testing_samples)
+# encoded = encoder.transform(testing_samples)
+# p encoded
+# # Numo::DFloat#shape=[2,2]
+# # [[0, 2],
+# # [1, 0]]
+# p encoder.inverse_transform(encoded)
+# # Numo::RObject#shape=[2,2]
+# # [["left", 20],
+# # ["right", 10]]
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/ordinal_encoder.rb#30
+class Rumale::Preprocessing::OrdinalEncoder < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new encoder that transform categorical features to integer values.
+ #
+ # @param categories [Nil/Array] The category list for each feature.
+ # If nil is given, extracted categories from the training data by calling the fit method are used.
+ # @return [OrdinalEncoder] a new instance of OrdinalEncoder
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/ordinal_encoder.rb#41
+ def initialize(categories: T.unsafe(nil)); end
+
+ # Return the array consists of categorical value each feature.
+ #
+ # @return [Array] (size: n_features)
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/ordinal_encoder.rb#35
+ def categories; end
+
+ # Fit encoder by extracting the category for each feature.
+ #
+ # @overload fit
+ # @param x [Numo::NArray] (shape: [n_samples, n_features]) The samples consisting of categorical features.
+ # @raise [ArgumentError]
+ # @return [LabelEncoder]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/ordinal_encoder.rb#52
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Fit encoder, then return encoded categorical features to integer values.
+ #
+ # @overload fit_transform
+ # @param x [Numo::NArray] (shape: [n_samples, n_features]) The samples consisting of categorical features.
+ # @raise [ArgumentError]
+ # @return [Numo::DFloat] The encoded categorical features to integer values.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/ordinal_encoder.rb#66
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Decode values to categorical features.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples consisting of values transformed from categorical features.
+ # @return [Numo::NArray] The decoded features.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/ordinal_encoder.rb#96
+ def inverse_transform(x); end
+
+ # Encode categorical features.
+ #
+ # @param x [Numo::NArray] (shape: [n_samples, n_features]) The samples consisting of categorical features.
+ # @raise [ArgumentError]
+ # @return [Numo::DFloat] The encoded categorical features to integer values.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/ordinal_encoder.rb#76
+ def transform(x); end
+end
+
+# Generating polynomial features from the given samples.
+#
+# @example
+# require 'rumale/preprocessing/polynomial_features'
+#
+# transformer = Rumale::Preprocessing::PolynomialFeatures.new(degree: 2)
+# x = Numo::DFloat[[0, 1], [2, 3], [4, 5]]
+# z = transformer.fit_transform(x)
+# p z
+#
+# # Numo::DFloat#shape=[3,6]
+# # [[1, 0, 1, 0, 0, 1],
+# # [1, 2, 3, 4, 6, 9],
+# # [1, 4, 5, 16, 20, 25]]
+#
+# # If you want to perform polynomial regression, combine it with LinearRegression as follows:
+# require 'rumale/preprocessing/polynomial_features'
+# require 'rumale/linear_model/linear_regression'
+# require 'rumale/pipeline/pipeline'
+#
+# ply = Rumale::Preprocessing::PolynomialFeatures.new(degree: 2)
+# reg = Rumale::LinearModel::LinearRegression.new(fit_bias: false, random_seed: 1)
+# pipeline = Rumale::Pipeline::Pipeline.new(steps: { trs: ply, est: reg })
+# pipeline.fit(training_samples, training_values)
+# results = pipeline.predict(testing_samples)
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/polynomial_features.rb#35
+class Rumale::Preprocessing::PolynomialFeatures < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a transformer for generating polynomial features.
+ #
+ # @param degree [Integer] The degree of polynomial features.
+ # @raise [ArgumentError]
+ # @return [PolynomialFeatures] a new instance of PolynomialFeatures
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/polynomial_features.rb#45
+ def initialize(degree: T.unsafe(nil)); end
+
+ # Calculate the number of output polynomial fetures.
+ #
+ # @overload fit
+ # @return [PolynomialFeatures]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/polynomial_features.rb#57
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Calculate the number of polynomial features, and then transform samples to polynomial features.
+ #
+ # @overload fit_transform
+ # @return [Numo::DFloat] (shape: [n_samples, n_output_features]) The transformed samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/polynomial_features.rb#74
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the number of polynomial features.
+ #
+ # @return [Integer]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/polynomial_features.rb#40
+ def n_output_features; end
+
+ # Transform the given samples to polynomial features.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be transformed.
+ # @return [Numo::DFloat] (shape: [n_samples, n_output_features]) The transformed samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/polynomial_features.rb#84
+ def transform(x); end
+end
+
+# Normalize samples by centering and scaling to unit variance.
+#
+# @example
+# require 'rumale/preprocessing/standard_scaler'
+#
+# normalizer = Rumale::Preprocessing::StandardScaler.new
+# new_training_samples = normalizer.fit_transform(training_samples)
+# new_testing_samples = normalizer.transform(testing_samples)
+#
+# source://rumale-preprocessing//lib/rumale/preprocessing/standard_scaler.rb#18
+class Rumale::Preprocessing::StandardScaler < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Transformer
+
+ # Create a new normalizer for centering and scaling to unit variance.
+ #
+ # @return [StandardScaler] a new instance of StandardScaler
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/standard_scaler.rb#30
+ def initialize; end
+
+ # Calculate the mean value and standard deviation of each feature for scaling.
+ #
+ # @overload fit
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features])
+ # The samples to calculate the mean values and standard deviations.
+ # @return [StandardScaler]
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/standard_scaler.rb#41
+ def fit(x, _y = T.unsafe(nil)); end
+
+ # Calculate the mean values and standard deviations, and then normalize samples using them.
+ #
+ # @overload fit_transform
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features])
+ # The samples to calculate the mean values and standard deviations.
+ # @return [Numo::DFloat] The scaled samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/standard_scaler.rb#56
+ def fit_transform(x, _y = T.unsafe(nil)); end
+
+ # Return the vector consists of the mean value for each feature.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/standard_scaler.rb#23
+ def mean_vec; end
+
+ # Return the vector consists of the standard deviation for each feature.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/standard_scaler.rb#27
+ def std_vec; end
+
+ # Perform standardization the given samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be scaled.
+ # @return [Numo::DFloat] The scaled samples.
+ #
+ # source://rumale-preprocessing//lib/rumale/preprocessing/standard_scaler.rb#66
+ def transform(x); end
+end
+
+# source://rumale-preprocessing//lib/rumale/preprocessing/version.rb#8
+Rumale::Preprocessing::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/rumale-tree@1.0.0.rbi b/sorbet/rbi/gems/rumale-tree@1.0.0.rbi
new file mode 100644
index 00000000..169376e5
--- /dev/null
+++ b/sorbet/rbi/gems/rumale-tree@1.0.0.rbi
@@ -0,0 +1,861 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale-tree` gem.
+# Please instead update this file by running `bin/tapioca gem rumale-tree`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale-tree//lib/rumale/tree/node.rb#3
+module Rumale; end
+
+# This module consists of the classes that implement tree models.
+#
+# source://rumale-tree//lib/rumale/tree/node.rb#4
+module Rumale::Tree; end
+
+# BaseDecisionTree is an abstract class for implementation of decision tree-based estimator.
+# This class is used internally.
+#
+# source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#12
+class Rumale::Tree::BaseDecisionTree < ::Rumale::Base::Estimator
+ # Initialize a decision tree-based estimator.
+ #
+ # @param criterion [String] The function to evalue spliting point.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [BaseDecisionTree] a new instance of BaseDecisionTree
+ #
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#25
+ def initialize(criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Leaf index for sample.
+ #
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#43
+ def apply(x); end
+
+ private
+
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#120
+ def best_split(_features, _y, _impurity); end
+
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#65
+ def build_tree(x, y); end
+
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#128
+ def eval_importance(n_samples, n_features); end
+
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#137
+ def eval_importance_at_node(node); end
+
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#69
+ def grow_node(depth, x, y, impurity); end
+
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#124
+ def impurity(_y); end
+
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#51
+ def partial_apply(tree, sample); end
+
+ # @raise [NotImplementedError]
+ #
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#112
+ def put_leaf(_node, _y); end
+
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#116
+ def rand_ids; end
+
+ # @raise [NotImplementedError]
+ # @return [Boolean]
+ #
+ # source://rumale-tree//lib/rumale/tree/base_decision_tree.rb#108
+ def stop_growing?(_y); end
+end
+
+# DecisionTreeClassifier is a class that implements decision tree for classification.
+#
+# @example
+# require 'rumale/tree/decision_tree_classifier'
+#
+# estimator =
+# Rumale::Tree::DecisionTreeClassifier.new(
+# criterion: 'gini', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#19
+class Rumale::Tree::DecisionTreeClassifier < ::Rumale::Tree::BaseDecisionTree
+ include ::Rumale::Base::Classifier
+ include ::Rumale::Tree::ExtDecisionTreeClassifier
+
+ # Create a new classifier with decision tree algorithm.
+ #
+ # @param criterion [String] The function to evaluate spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [DecisionTreeClassifier] a new instance of DecisionTreeClassifier
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#55
+ def initialize(criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#25
+ def classes; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#29
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
+ # @return [DecisionTreeClassifier] The learned classifier itself.
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#65
+ def fit(x, y); end
+
+ # Return the labels assigned each leaf.
+ #
+ # @return [Numo::Int32] (size: n_leafs)
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#41
+ def leaf_labels; end
+
+ # Predict class labels for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#90
+ def predict(x); end
+
+ # Predict probability for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#100
+ def predict_proba(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#37
+ def rng; end
+
+ # Return the learned tree.
+ #
+ # @return [Node]
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#33
+ def tree; end
+
+ private
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#136
+ def best_split(features, y, whole_impurity); end
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#122
+ def build_tree(x, y); end
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#142
+ def impurity(y); end
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#108
+ def partial_predict_proba(tree, sample); end
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_classifier.rb#127
+ def put_leaf(node, y); end
+end
+
+# DecisionTreeRegressor is a class that implements decision tree for regression.
+#
+# @example
+# require 'rumale/tree/decision_tree_regressor'
+#
+# estimator =
+# Rumale::Tree::DecisionTreeRegressor.new(
+# max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#19
+class Rumale::Tree::DecisionTreeRegressor < ::Rumale::Tree::BaseDecisionTree
+ include ::Rumale::Base::Regressor
+ include ::Rumale::Tree::ExtDecisionTreeRegressor
+
+ # Create a new regressor with decision tree algorithm.
+ #
+ # @param criterion [String] The function to evaluate spliting point. Supported criteria are 'mae' and 'mse'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [DecisionTreeRegressor] a new instance of DecisionTreeRegressor
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#51
+ def initialize(criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#25
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The taget values to be used for fitting the model.
+ # @return [DecisionTreeRegressor] The learned regressor itself.
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#61
+ def fit(x, y); end
+
+ # Return the values assigned each leaf.
+ #
+ # @return [Numo::DFloat] (shape: [n_leafs, n_outputs])
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#37
+ def leaf_values; end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#84
+ def predict(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#33
+ def rng; end
+
+ # Return the learned tree.
+ #
+ # @return [Node]
+ #
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#29
+ def tree; end
+
+ private
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#107
+ def best_split(f, y, impurity); end
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#92
+ def build_tree(x, y); end
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#111
+ def impurity(y); end
+
+ # source://rumale-tree//lib/rumale/tree/decision_tree_regressor.rb#98
+ def put_leaf(node, y); end
+end
+
+module Rumale::Tree::ExtDecisionTreeClassifier
+ private
+
+ def find_split_params(_arg0, _arg1, _arg2, _arg3, _arg4, _arg5); end
+ def node_impurity(_arg0, _arg1, _arg2); end
+ def stop_growing?(_arg0); end
+end
+
+module Rumale::Tree::ExtDecisionTreeRegressor
+ private
+
+ def find_split_params(_arg0, _arg1, _arg2, _arg3, _arg4); end
+ def node_impurity(_arg0, _arg1); end
+ def stop_growing?(_arg0); end
+end
+
+module Rumale::Tree::ExtGradientTreeRegressor
+ private
+
+ def find_split_params(_arg0, _arg1, _arg2, _arg3, _arg4, _arg5, _arg6); end
+end
+
+# ExtraTreeClassifier is a class that implements extra randomized tree for classification.
+#
+# *Reference*
+# - Geurts, P., Ernst, D., and Wehenkel, L., "Extremely randomized trees," Machine Learning, vol. 63 (1), pp. 3--42, 2006.
+#
+# @example
+# require 'rumale/tree/extra_tree_classifier'
+#
+# estimator =
+# Rumale::Tree::ExtraTreeClassifier.new(
+# criterion: 'gini', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-tree//lib/rumale/tree/extra_tree_classifier.rb#20
+class Rumale::Tree::ExtraTreeClassifier < ::Rumale::Tree::DecisionTreeClassifier
+ # Create a new classifier with extra randomized tree algorithm.
+ #
+ # @param criterion [String] The function to evaluate spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, extra tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on extra tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [ExtraTreeClassifier] a new instance of ExtraTreeClassifier
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_classifier.rb#53
+ def initialize(criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_classifier.rb#23
+ def classes; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_classifier.rb#27
+ def feature_importances; end
+
+ # Return the labels assigned each leaf.
+ #
+ # @return [Numo::Int32] (size: n_leafs)
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_classifier.rb#39
+ def leaf_labels; end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_classifier.rb#35
+ def rng; end
+
+ # Return the learned tree.
+ #
+ # @return [Node]
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_classifier.rb#31
+ def tree; end
+
+ private
+
+ # source://rumale-tree//lib/rumale/tree/extra_tree_classifier.rb#76
+ def best_split(features, y, whole_impurity); end
+end
+
+# ExtraTreeRegressor is a class that implements extra randomized tree for regression.
+#
+# *Reference*
+# - Geurts, P., Ernst, D., and Wehenkel, L., "Extremely randomized trees," Machine Learning, vol. 63 (1), pp. 3--42, 2006.
+#
+# @example
+# require 'rumale/tree/extra_tree_regressor'
+#
+# estimator =
+# Rumale::Tree::ExtraTreeRegressor.new(
+# max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-tree//lib/rumale/tree/extra_tree_regressor.rb#20
+class Rumale::Tree::ExtraTreeRegressor < ::Rumale::Tree::DecisionTreeRegressor
+ # Create a new regressor with extra randomized tree algorithm.
+ #
+ # @param criterion [String] The function to evaluate spliting point. Supported criteria are 'mae' and 'mse'.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, extra tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on extra tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [ExtraTreeRegressor] a new instance of ExtraTreeRegressor
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_regressor.rb#49
+ def initialize(criterion: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_regressor.rb#23
+ def feature_importances; end
+
+ # Return the values assigned each leaf.
+ #
+ # @return [Numo::DFloat] (shape: [n_leafs, n_outputs])
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_regressor.rb#35
+ def leaf_values; end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_regressor.rb#31
+ def rng; end
+
+ # Return the learned tree.
+ #
+ # @return [Node]
+ #
+ # source://rumale-tree//lib/rumale/tree/extra_tree_regressor.rb#27
+ def tree; end
+
+ private
+
+ # source://rumale-tree//lib/rumale/tree/extra_tree_regressor.rb#67
+ def best_split(features, y, whole_impurity); end
+end
+
+# GradientTreeRegressor is a class that implements decision tree for regression with exact gredy algorithm.
+# This class is used internally for estimators with gradient tree boosting.
+#
+# *Reference*
+# - Friedman, J H., "Greedy Function Approximation: A Gradient Boosting Machine," Annals of Statistics, 29 (5), pp. 1189--1232, 2001.
+# - Friedman, J H., "Stochastic Gradient Boosting," Computational Statistics and Data Analysis, 38 (4), pp. 367--378, 2002.
+# - Chen, T., and Guestrin, C., "XGBoost: A Scalable Tree Boosting System," Proc. KDD'16, pp. 785--794, 2016.
+#
+# source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#18
+class Rumale::Tree::GradientTreeRegressor < ::Rumale::Base::Estimator
+ include ::Rumale::Base::Regressor
+ include ::Rumale::Tree::ExtGradientTreeRegressor
+
+ # Initialize a gradient tree regressor
+ #
+ # @param reg_lambda [Float] The L2 regularization term on weight.
+ # @param shrinkage_rate [Float] The shrinkage rate for weight.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, decision tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on decision tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [GradientTreeRegressor] a new instance of GradientTreeRegressor
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#52
+ def initialize(reg_lambda: T.unsafe(nil), shrinkage_rate: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the index of the leaf that each sample reached.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
+ # @return [Numo::Int32] (shape: [n_samples]) Leaf index for sample.
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#106
+ def apply(x); end
+
+ # Return the importance for each feature.
+ # The feature importances are calculated based on the numbers of times the feature is used for splitting.
+ #
+ # @return [Numo::DFloat] (shape: [n_features])
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#25
+ def feature_importances; end
+
+ # Fit the model with given training data.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
+ # @param y [Numo::DFloat] (shape: [n_samples]) The taget values to be used for fitting the model.
+ # @param g [Numo::DFloat] (shape: [n_samples]) The gradient of loss function.
+ # @param h [Numo::DFloat] (shape: [n_samples]) The hessian of loss function.
+ # @return [GradientTreeRegressor] The learned regressor itself.
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#74
+ def fit(x, y, g, h); end
+
+ # Return the values assigned each leaf.
+ #
+ # @return [Numo::DFloat] (shape: [n_leaves])
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#37
+ def leaf_weights; end
+
+ # Predict values for samples.
+ #
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
+ # @return [Numo::DFloat] (size: n_samples) Predicted values per sample.
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#96
+ def predict(x); end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#33
+ def rng; end
+
+ # Return the learned tree.
+ #
+ # @return [Node]
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#29
+ def tree; end
+
+ private
+
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#183
+ def best_split(f, g, h, sum_g, sum_h); end
+
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#128
+ def build_tree(x, y, g, h); end
+
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#135
+ def grow_node(depth, x, y, g, h); end
+
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#114
+ def partial_apply(tree, sample); end
+
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#173
+ def put_leaf(node, sum_g, sum_h); end
+
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#187
+ def rand_ids; end
+
+ # @return [Boolean]
+ #
+ # source://rumale-tree//lib/rumale/tree/gradient_tree_regressor.rb#169
+ def stop_growing?(y); end
+end
+
+# Node is a class that implements node used for construction of decision tree.
+# This class is used for internal data structures.
+#
+# source://rumale-tree//lib/rumale/tree/node.rb#7
+class Rumale::Tree::Node
+ # Create a new node for decision tree.
+ #
+ # @param depth [Integer] The depth of the node in tree.
+ # @param impurity [Float] The impurity of the node.
+ # @param n_samples [Integer] The number of the samples in the node.
+ # @param probs [Float] The probability of the node.
+ # @param leaf [Boolean] The flag indicating whether the node is a leaf.
+ # @param leaf_id [Integer] The leaf index of the node.
+ # @param left [Node] The left node.
+ # @param right [Node] The right node.
+ # @param feature_id [Integer] The feature index used for evaluation.
+ # @param threshold [Float] The threshold value of the feature for splitting the node.
+ # @return [Node] a new instance of Node
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#23
+ def initialize(depth: T.unsafe(nil), impurity: T.unsafe(nil), n_samples: T.unsafe(nil), probs: T.unsafe(nil), leaf: T.unsafe(nil), leaf_id: T.unsafe(nil), left: T.unsafe(nil), right: T.unsafe(nil), feature_id: T.unsafe(nil), threshold: T.unsafe(nil)); end
+
+ # Returns the value of attribute depth.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def depth; end
+
+ # Sets the attribute depth
+ #
+ # @param value the value to set the attribute depth to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def depth=(_arg0); end
+
+ # Returns the value of attribute feature_id.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def feature_id; end
+
+ # Sets the attribute feature_id
+ #
+ # @param value the value to set the attribute feature_id to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def feature_id=(_arg0); end
+
+ # Returns the value of attribute impurity.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def impurity; end
+
+ # Sets the attribute impurity
+ #
+ # @param value the value to set the attribute impurity to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def impurity=(_arg0); end
+
+ # Returns the value of attribute leaf.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def leaf; end
+
+ # Sets the attribute leaf
+ #
+ # @param value the value to set the attribute leaf to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def leaf=(_arg0); end
+
+ # Returns the value of attribute leaf_id.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def leaf_id; end
+
+ # Sets the attribute leaf_id
+ #
+ # @param value the value to set the attribute leaf_id to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def leaf_id=(_arg0); end
+
+ # Returns the value of attribute left.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def left; end
+
+ # Sets the attribute left
+ #
+ # @param value the value to set the attribute left to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def left=(_arg0); end
+
+ # Returns the value of attribute n_samples.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def n_samples; end
+
+ # Sets the attribute n_samples
+ #
+ # @param value the value to set the attribute n_samples to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def n_samples=(_arg0); end
+
+ # Returns the value of attribute probs.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def probs; end
+
+ # Sets the attribute probs
+ #
+ # @param value the value to set the attribute probs to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def probs=(_arg0); end
+
+ # Returns the value of attribute right.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def right; end
+
+ # Sets the attribute right
+ #
+ # @param value the value to set the attribute right to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def right=(_arg0); end
+
+ # Returns the value of attribute threshold.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def threshold; end
+
+ # Sets the attribute threshold
+ #
+ # @param value the value to set the attribute threshold to.
+ #
+ # source://rumale-tree//lib/rumale/tree/node.rb#9
+ def threshold=(_arg0); end
+end
+
+# source://rumale-tree//lib/rumale/tree/version.rb#8
+Rumale::Tree::VERSION = T.let(T.unsafe(nil), String)
+
+# VRTreeClassifier is a class that implements Variable-Random (VR) tree for classification.
+#
+# *Reference*
+# - Liu, F. T., Ting, K. M., Yu, Y., and Zhou, Z. H., "Spectrum of Variable-Random Trees," Journal of Artificial Intelligence Research, vol. 32, pp. 355--384, 2008.
+#
+# @example
+# require 'rumale/tree/vr_tree_classifier'
+#
+# estimator =
+# Rumale::Tree::VRTreeClassifier.new(
+# criterion: 'gini', max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_labels)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-tree//lib/rumale/tree/vr_tree_classifier.rb#20
+class Rumale::Tree::VRTreeClassifier < ::Rumale::Tree::DecisionTreeClassifier
+ # Create a new classifier with variable-random tree algorithm.
+ #
+ # @param criterion [String] The function to evaluate spliting point. Supported criteria are 'gini' and 'entropy'.
+ # @param alpha [Float] The probability of choosing a deterministic or random spliting point.
+ # If 1.0 is given, the tree is the same as the normal decision tree.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, variable-random tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on variable-random tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [VRTreeClassifier] a new instance of VRTreeClassifier
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_classifier.rb#55
+ def initialize(criterion: T.unsafe(nil), alpha: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the class labels.
+ #
+ # @return [Numo::Int32] (size: n_classes)
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_classifier.rb#23
+ def classes; end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_classifier.rb#27
+ def feature_importances; end
+
+ # Return the labels assigned each leaf.
+ #
+ # @return [Numo::Int32] (size: n_leafs)
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_classifier.rb#39
+ def leaf_labels; end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_classifier.rb#35
+ def rng; end
+
+ # Return the learned tree.
+ #
+ # @return [Node]
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_classifier.rb#31
+ def tree; end
+
+ private
+
+ # source://rumale-tree//lib/rumale/tree/vr_tree_classifier.rb#80
+ def best_split(features, y, whole_impurity); end
+end
+
+# VRTreeRegressor is a class that implements Variable-Random (VR) tree for regression.
+#
+# *Reference*
+# - Liu, F. T., Ting, K. M., Yu, Y., and Zhou, Z. H., "Spectrum of Variable-Random Trees," Journal of Artificial Intelligence Research, vol. 32, pp. 355--384, 2008.
+#
+# @example
+# require 'rumale/tree/vr_tree_regressor'
+#
+# estimator =
+# Rumale::Tree::VRTreeRegressor.new(
+# max_depth: 3, max_leaf_nodes: 10, min_samples_leaf: 5, random_seed: 1)
+# estimator.fit(training_samples, traininig_values)
+# results = estimator.predict(testing_samples)
+#
+# source://rumale-tree//lib/rumale/tree/vr_tree_regressor.rb#20
+class Rumale::Tree::VRTreeRegressor < ::Rumale::Tree::DecisionTreeRegressor
+ # Create a new regressor with variable-random tree algorithm.
+ #
+ # @param criterion [String] The function to evaluate spliting point. Supported criteria are 'mae' and 'mse'.
+ # @param alpha [Float] The probability of choosing a deterministic or random spliting point.
+ # If 1.0 is given, the tree is the same as the normal decision tree.
+ # @param max_depth [Integer] The maximum depth of the tree.
+ # If nil is given, variable-random tree grows without concern for depth.
+ # @param max_leaf_nodes [Integer] The maximum number of leaves on variable-random tree.
+ # If nil is given, number of leaves is not limited.
+ # @param min_samples_leaf [Integer] The minimum number of samples at a leaf node.
+ # @param max_features [Integer] The number of features to consider when searching optimal split point.
+ # If nil is given, split process considers all features.
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
+ # It is used to randomly determine the order of features when deciding spliting point.
+ # @return [VRTreeRegressor] a new instance of VRTreeRegressor
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_regressor.rb#51
+ def initialize(criterion: T.unsafe(nil), alpha: T.unsafe(nil), max_depth: T.unsafe(nil), max_leaf_nodes: T.unsafe(nil), min_samples_leaf: T.unsafe(nil), max_features: T.unsafe(nil), random_seed: T.unsafe(nil)); end
+
+ # Return the importance for each feature.
+ #
+ # @return [Numo::DFloat] (size: n_features)
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_regressor.rb#23
+ def feature_importances; end
+
+ # Return the values assigned each leaf.
+ #
+ # @return [Numo::DFloat] (shape: [n_leafs, n_outputs])
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_regressor.rb#35
+ def leaf_values; end
+
+ # Return the random generator for random selection of feature index.
+ #
+ # @return [Random]
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_regressor.rb#31
+ def rng; end
+
+ # Return the learned tree.
+ #
+ # @return [Node]
+ #
+ # source://rumale-tree//lib/rumale/tree/vr_tree_regressor.rb#27
+ def tree; end
+
+ private
+
+ # source://rumale-tree//lib/rumale/tree/vr_tree_regressor.rb#71
+ def best_split(features, y, whole_impurity); end
+end
diff --git a/sorbet/rbi/gems/rumale@1.0.0.rbi b/sorbet/rbi/gems/rumale@1.0.0.rbi
new file mode 100644
index 00000000..7f8f0f30
--- /dev/null
+++ b/sorbet/rbi/gems/rumale@1.0.0.rbi
@@ -0,0 +1,16 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `rumale` gem.
+# Please instead update this file by running `bin/tapioca gem rumale`.
+
+
+# Rumale is a machine learning library in Ruby.
+#
+# source://rumale//lib/rumale/version.rb#4
+module Rumale; end
+
+# The version of Rumale you are using.
+#
+# source://rumale//lib/rumale/version.rb#6
+Rumale::VERSION = T.let(T.unsafe(nil), String)
diff --git a/sorbet/rbi/gems/unicode_plot@0.0.5.rbi b/sorbet/rbi/gems/unicode_plot@0.0.5.rbi
new file mode 100644
index 00000000..aa57017e
--- /dev/null
+++ b/sorbet/rbi/gems/unicode_plot@0.0.5.rbi
@@ -0,0 +1,1222 @@
+# typed: true
+
+# DO NOT EDIT MANUALLY
+# This is an autogenerated file for types exported from the `unicode_plot` gem.
+# Please instead update this file by running `bin/tapioca gem unicode_plot`.
+
+
+# source://unicode_plot//lib/unicode_plot/version.rb#1
+module UnicodePlot
+ private
+
+ # @overload barplot
+ # @overload barplot
+ #
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#123
+ def barplot(*args, width: T.unsafe(nil), color: T.unsafe(nil), symbol: T.unsafe(nil), border: T.unsafe(nil), xscale: T.unsafe(nil), xlabel: T.unsafe(nil), data: T.unsafe(nil), **kw); end
+
+ # @overload barplot!
+ # @overload barplot!
+ #
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#177
+ def barplot!(plot, *args, data: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#94
+ def boxplot(*args, data: T.unsafe(nil), border: T.unsafe(nil), color: T.unsafe(nil), width: T.unsafe(nil), xlim: T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#157
+ def boxplot!(plot, *args, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/stairs.rb#63
+ def compute_stair_lines(x, y, style: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/densityplot.rb#2
+ def densityplot(x, y, color: T.unsafe(nil), grid: T.unsafe(nil), name: T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/densityplot.rb#7
+ def densityplot!(plot, x, y, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/histogram.rb#4
+ def histogram(x, nbins: T.unsafe(nil), closed: T.unsafe(nil), symbol: T.unsafe(nil), **kw); end
+
+ # @overload lineplot
+ #
+ # source://unicode_plot//lib/unicode_plot/lineplot.rb#33
+ def lineplot(*args, canvas: T.unsafe(nil), color: T.unsafe(nil), name: T.unsafe(nil), **kw); end
+
+ # @overload lineplot!
+ #
+ # source://unicode_plot//lib/unicode_plot/lineplot.rb#80
+ def lineplot!(plot, *args, color: T.unsafe(nil), name: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/scatterplot.rb#5
+ def scatterplot(*args, canvas: T.unsafe(nil), color: T.unsafe(nil), name: T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/scatterplot.rb#27
+ def scatterplot!(plot, *args, color: T.unsafe(nil), name: T.unsafe(nil)); end
+
+ # @overload stairs
+ #
+ # source://unicode_plot//lib/unicode_plot/stairs.rb#52
+ def stairs(xvec, yvec, style: T.unsafe(nil), **kw); end
+
+ # Similar to stairs, but takes an existing plot object as a first argument.
+ #
+ # source://unicode_plot//lib/unicode_plot/stairs.rb#58
+ def stairs!(plot, xvec, yvec, style: T.unsafe(nil), **kw); end
+
+ # Generates one or more {Stemplot} objects from the input data
+ # and prints a Single or Double stemplot using {stemplot1!} or {stemplot2!}
+ #
+ # @example Single sided stemplot
+ # >> UnicodePlot.stemplot(eighty_ints)
+ # 0 | 257
+ # 1 | 00335679
+ # 2 | 034455899
+ # 3 | 145588
+ # 4 | 0022223
+ # 5 | 0223399
+ # 6 | 012345568889
+ # 7 | 01133334466777888
+ # 8 | 013689
+ # 9 | 22667
+ # Key: 1|0 = 10
+ # The decimal is 1 digit(s) to the right of |
+ # @example Back-to-back stemplot
+ # >> UnicodePlot.stemplot(eighty_ints, another_eighty_ints)
+ # 752 | 0 | 1244457899
+ # 97653300 | 1 | 4799
+ # 998554430 | 2 | 015668
+ # 885541 | 3 | 0144557888899
+ # 3222200 | 4 | 00268
+ # 9933220 | 5 | 0234778
+ # 988865543210 | 6 | 122222357889
+ # 88877766443333110 | 7 | 134556689
+ # 986310 | 8 | 24589
+ # 76622 | 9 | 022234468
+ # Key: 1|0 = 10
+ # The decimal is 1 digit(s) to the right of |
+ # @see Stemplot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#323
+ def stemplot(*args, scale: T.unsafe(nil), **kw); end
+
+ # Print a Single-Vector stemplot to STDOUT.
+ #
+ # - Stem data is printed on the left.
+ # - Leaf data is printed on the right.
+ # - Key is printed at the bottom.
+ #
+ # @param plt [Stemplot] Stemplot object
+ # @param scale [Integer] Scale, should be a power of 10
+ # @param divider [String] Divider character between stem and leaf
+ # @param padchar [String] Padding character
+ # @param trim [Boolean] Trim missing stems from the plot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#233
+ def stemplot1!(plt, scale: T.unsafe(nil), divider: T.unsafe(nil), padchar: T.unsafe(nil), trim: T.unsafe(nil), **_kw); end
+
+ # Print a Back-to-Back Stemplot to STDOUT
+ #
+ # - +plt1+ Leaf data is printed on the left.
+ # - Common stem data is printed in the center.
+ # - +plt2+ Leaf data is printed on the right.
+ # - Key is printed at the bottom.
+ #
+ # @param plt1 [Stemplot] Stemplot object for the left side
+ # @param plt2 [Stemplot] Stemplot object for the right side
+ # @param scale [Integer] Scale, should be a power of 10
+ # @param divider [String] Divider character between stem and leaf
+ # @param padchar [String] Padding character
+ # @param trim [Boolean] Trim missing stems from the plot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#265
+ def stemplot2!(plt1, plt2, scale: T.unsafe(nil), divider: T.unsafe(nil), padchar: T.unsafe(nil), trim: T.unsafe(nil), **_kw); end
+
+ class << self
+ # @overload barplot
+ # @overload barplot
+ #
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#123
+ def barplot(*args, width: T.unsafe(nil), color: T.unsafe(nil), symbol: T.unsafe(nil), border: T.unsafe(nil), xscale: T.unsafe(nil), xlabel: T.unsafe(nil), data: T.unsafe(nil), **kw); end
+
+ # @overload barplot!
+ # @overload barplot!
+ #
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#177
+ def barplot!(plot, *args, data: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#43
+ def border_types; end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#94
+ def boxplot(*args, data: T.unsafe(nil), border: T.unsafe(nil), color: T.unsafe(nil), width: T.unsafe(nil), xlim: T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#157
+ def boxplot!(plot, *args, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#167
+ def canvas_types; end
+
+ # source://unicode_plot//lib/unicode_plot/stairs.rb#63
+ def compute_stair_lines(x, y, style: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/densityplot.rb#2
+ def densityplot(x, y, color: T.unsafe(nil), grid: T.unsafe(nil), name: T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/densityplot.rb#7
+ def densityplot!(plot, x, y, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/histogram.rb#4
+ def histogram(x, nbins: T.unsafe(nil), closed: T.unsafe(nil), symbol: T.unsafe(nil), **kw); end
+
+ # @overload lineplot
+ #
+ # source://unicode_plot//lib/unicode_plot/lineplot.rb#33
+ def lineplot(*args, canvas: T.unsafe(nil), color: T.unsafe(nil), name: T.unsafe(nil), **kw); end
+
+ # @overload lineplot!
+ #
+ # source://unicode_plot//lib/unicode_plot/lineplot.rb#80
+ def lineplot!(plot, *args, color: T.unsafe(nil), name: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/scatterplot.rb#5
+ def scatterplot(*args, canvas: T.unsafe(nil), color: T.unsafe(nil), name: T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/scatterplot.rb#27
+ def scatterplot!(plot, *args, color: T.unsafe(nil), name: T.unsafe(nil)); end
+
+ # @overload stairs
+ #
+ # source://unicode_plot//lib/unicode_plot/stairs.rb#52
+ def stairs(xvec, yvec, style: T.unsafe(nil), **kw); end
+
+ # Similar to stairs, but takes an existing plot object as a first argument.
+ #
+ # source://unicode_plot//lib/unicode_plot/stairs.rb#58
+ def stairs!(plot, xvec, yvec, style: T.unsafe(nil), **kw); end
+
+ # Generates one or more {Stemplot} objects from the input data
+ # and prints a Single or Double stemplot using {stemplot1!} or {stemplot2!}
+ #
+ # @example Single sided stemplot
+ # >> UnicodePlot.stemplot(eighty_ints)
+ # 0 | 257
+ # 1 | 00335679
+ # 2 | 034455899
+ # 3 | 145588
+ # 4 | 0022223
+ # 5 | 0223399
+ # 6 | 012345568889
+ # 7 | 01133334466777888
+ # 8 | 013689
+ # 9 | 22667
+ # Key: 1|0 = 10
+ # The decimal is 1 digit(s) to the right of |
+ # @example Back-to-back stemplot
+ # >> UnicodePlot.stemplot(eighty_ints, another_eighty_ints)
+ # 752 | 0 | 1244457899
+ # 97653300 | 1 | 4799
+ # 998554430 | 2 | 015668
+ # 885541 | 3 | 0144557888899
+ # 3222200 | 4 | 00268
+ # 9933220 | 5 | 0234778
+ # 988865543210 | 6 | 122222357889
+ # 88877766443333110 | 7 | 134556689
+ # 986310 | 8 | 24589
+ # 76622 | 9 | 022234468
+ # Key: 1|0 = 10
+ # The decimal is 1 digit(s) to the right of |
+ # @see Stemplot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#323
+ def stemplot(*args, scale: T.unsafe(nil), **kw); end
+
+ # Print a Single-Vector stemplot to STDOUT.
+ #
+ # - Stem data is printed on the left.
+ # - Leaf data is printed on the right.
+ # - Key is printed at the bottom.
+ #
+ # @param plt [Stemplot] Stemplot object
+ # @param scale [Integer] Scale, should be a power of 10
+ # @param divider [String] Divider character between stem and leaf
+ # @param padchar [String] Padding character
+ # @param trim [Boolean] Trim missing stems from the plot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#233
+ def stemplot1!(plt, scale: T.unsafe(nil), divider: T.unsafe(nil), padchar: T.unsafe(nil), trim: T.unsafe(nil), **_kw); end
+
+ # Print a Back-to-Back Stemplot to STDOUT
+ #
+ # - +plt1+ Leaf data is printed on the left.
+ # - Common stem data is printed in the center.
+ # - +plt2+ Leaf data is printed on the right.
+ # - Key is printed at the bottom.
+ #
+ # @param plt1 [Stemplot] Stemplot object for the left side
+ # @param plt2 [Stemplot] Stemplot object for the right side
+ # @param scale [Integer] Scale, should be a power of 10
+ # @param divider [String] Divider character between stem and leaf
+ # @param padchar [String] Padding character
+ # @param trim [Boolean] Trim missing stems from the plot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#265
+ def stemplot2!(plt1, plt2, scale: T.unsafe(nil), divider: T.unsafe(nil), padchar: T.unsafe(nil), trim: T.unsafe(nil), **_kw); end
+ end
+end
+
+# source://unicode_plot//lib/unicode_plot/canvas/ascii_canvas.rb#4
+class UnicodePlot::AsciiCanvas < ::UnicodePlot::LookupCanvas
+ # @return [AsciiCanvas] a new instance of AsciiCanvas
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas/ascii_canvas.rb#110
+ def initialize(width, height, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/ascii_canvas.rb#120
+ def lookup_decode(code); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/ascii_canvas.rb#116
+ def lookup_encode(x, y); end
+end
+
+# source://unicode_plot//lib/unicode_plot/canvas/ascii_canvas.rb#99
+UnicodePlot::AsciiCanvas::ASCII_DECODE = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/canvas/ascii_canvas.rb#13
+UnicodePlot::AsciiCanvas::ASCII_LOOKUP = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/canvas/ascii_canvas.rb#7
+UnicodePlot::AsciiCanvas::ASCII_SIGNS = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/canvas/ascii_canvas.rb#108
+UnicodePlot::AsciiCanvas::PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/renderer.rb#37
+UnicodePlot::BORDER_MAP = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/barplot.rb#2
+class UnicodePlot::Barplot < ::UnicodePlot::Plot
+ include ::UnicodePlot::ValueTransformer
+
+ # @return [Barplot] a new instance of Barplot
+ #
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#9
+ def initialize(bars, width, color, symbol, transform, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#36
+ def add_row!(bars); end
+
+ # Returns the value of attribute max_freq.
+ #
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#24
+ def max_freq; end
+
+ # Returns the value of attribute max_len.
+ #
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#25
+ def max_len; end
+
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#32
+ def n_columns; end
+
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#28
+ def n_rows; end
+
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#42
+ def print_row(out, row_index); end
+
+ # Returns the value of attribute width.
+ #
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#26
+ def width; end
+
+ private
+
+ # source://unicode_plot//lib/unicode_plot/barplot.rb#59
+ def find_max(values); end
+end
+
+# source://unicode_plot//lib/unicode_plot/barplot.rb#6
+UnicodePlot::Barplot::DEFAULT_COLOR = T.let(T.unsafe(nil), Symbol)
+
+# source://unicode_plot//lib/unicode_plot/barplot.rb#7
+UnicodePlot::Barplot::DEFAULT_SYMBOL = T.let(T.unsafe(nil), String)
+
+# source://unicode_plot//lib/unicode_plot/barplot.rb#5
+UnicodePlot::Barplot::MIN_WIDTH = T.let(T.unsafe(nil), Integer)
+
+# The `BlockCanvas` is also Unicode-based.
+# It has half the resolution of the `BrailleCanvas`.
+# In contrast to BrailleCanvas, the pixels don't
+# have visible spacing between them.
+# This canvas effectively turns every character
+# into 4 pixels that can individually be manipulated
+# using binary operations.
+#
+# source://unicode_plot//lib/unicode_plot/canvas/block_canvas.rb#9
+class UnicodePlot::BlockCanvas < ::UnicodePlot::LookupCanvas
+ # @return [BlockCanvas] a new instance of BlockCanvas
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas/block_canvas.rb#15
+ def initialize(width, height, fill_char = T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/block_canvas.rb#36
+ def lookup_decode(x); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/block_canvas.rb#35
+ def lookup_encode(x, y); end
+end
+
+# source://unicode_plot//lib/unicode_plot/canvas/block_canvas.rb#28
+UnicodePlot::BlockCanvas::BLOCK_DECODE = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/canvas/block_canvas.rb#23
+UnicodePlot::BlockCanvas::BLOCK_SIGNS = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/canvas/block_canvas.rb#12
+UnicodePlot::BlockCanvas::X_PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas/block_canvas.rb#13
+UnicodePlot::BlockCanvas::Y_PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/renderer.rb#2
+module UnicodePlot::BorderMaps; end
+
+# source://unicode_plot//lib/unicode_plot/renderer.rb#25
+UnicodePlot::BorderMaps::BORDER_BARPLOT = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/renderer.rb#14
+UnicodePlot::BorderMaps::BORDER_CORNERS = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/renderer.rb#3
+UnicodePlot::BorderMaps::BORDER_SOLID = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/renderer.rb#47
+module UnicodePlot::BorderPrinter
+ include ::UnicodePlot::StyledPrinter
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#56
+ def print_border_bottom(out, padding, length, border = T.unsafe(nil), color: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#50
+ def print_border_top(out, padding, length, border = T.unsafe(nil), color: T.unsafe(nil)); end
+end
+
+# source://unicode_plot//lib/unicode_plot/boxplot.rb#4
+class UnicodePlot::Boxplot < ::UnicodePlot::Plot
+ # @return [Boxplot] a new instance of Boxplot
+ #
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#8
+ def initialize(data, width, color, min_x, max_x, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#37
+ def add_series!(data); end
+
+ # Returns the value of attribute max_x.
+ #
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#23
+ def max_x; end
+
+ # Returns the value of attribute min_x.
+ #
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#22
+ def min_x; end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#33
+ def n_columns; end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#25
+ def n_data; end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#29
+ def n_rows; end
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#44
+ def print_row(out, row_index); end
+
+ private
+
+ # source://unicode_plot//lib/unicode_plot/boxplot.rb#86
+ def transform(values); end
+end
+
+# source://unicode_plot//lib/unicode_plot/boxplot.rb#6
+UnicodePlot::Boxplot::DEFAULT_COLOR = T.let(T.unsafe(nil), Symbol)
+
+# source://unicode_plot//lib/unicode_plot/boxplot.rb#5
+UnicodePlot::Boxplot::MIN_WIDTH = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas/braille_canvas.rb#2
+class UnicodePlot::BrailleCanvas < ::UnicodePlot::Canvas
+ # @return [BrailleCanvas] a new instance of BrailleCanvas
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas/braille_canvas.rb#23
+ def initialize(width, height, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/braille_canvas.rb#33
+ def pixel!(pixel_x, pixel_y, color); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/braille_canvas.rb#56
+ def print_row(out, row_index); end
+end
+
+# source://unicode_plot//lib/unicode_plot/canvas/braille_canvas.rb#8
+UnicodePlot::BrailleCanvas::BRAILLE_SIGNS = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/canvas/braille_canvas.rb#5
+UnicodePlot::BrailleCanvas::X_PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas/braille_canvas.rb#6
+UnicodePlot::BrailleCanvas::Y_PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas.rb#2
+class UnicodePlot::Canvas
+ include ::UnicodePlot::StyledPrinter
+ include ::UnicodePlot::BorderPrinter
+
+ # @return [Canvas] a new instance of Canvas
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#17
+ def initialize(width, height, pixel_width, pixel_height, fill_char, origin_x: T.unsafe(nil), origin_y: T.unsafe(nil), plot_width: T.unsafe(nil), plot_height: T.unsafe(nil), x_pixel_per_char: T.unsafe(nil), y_pixel_per_char: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#71
+ def char_at(x, y); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#75
+ def color_at(x, y); end
+
+ # Returns the value of attribute height.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#39
+ def height; end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#79
+ def index_at(x, y); end
+
+ # digital differential analyzer algorithm
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#112
+ def line!(x1, y1, x2, y2, color); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#149
+ def lines!(x, y, color = T.unsafe(nil)); end
+
+ # Returns the value of attribute origin_x.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#42
+ def origin_x; end
+
+ # Returns the value of attribute origin_y.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#43
+ def origin_y; end
+
+ # Returns the value of attribute pixel_height.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#41
+ def pixel_height; end
+
+ # Returns the value of attribute pixel_width.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#40
+ def pixel_width; end
+
+ # Returns the value of attribute plot_height.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#45
+ def plot_height; end
+
+ # Returns the value of attribute plot_width.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#44
+ def plot_width; end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#84
+ def point!(x, y, color); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#99
+ def points!(x, y, color = T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#64
+ def print(out); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#49
+ def show(out); end
+
+ # Returns the value of attribute width.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#38
+ def width; end
+
+ # Returns the value of attribute x_pixel_per_char.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#46
+ def x_pixel_per_char; end
+
+ # Returns the value of attribute y_pixel_per_char.
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#47
+ def y_pixel_per_char; end
+
+ private
+
+ # @raise [ArgumentError]
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#161
+ def check_positive(value, name); end
+
+ class << self
+ # source://unicode_plot//lib/unicode_plot/canvas.rb#7
+ def create(canvas_type, width, height, **kw); end
+ end
+end
+
+# source://unicode_plot//lib/unicode_plot/canvas.rb#5
+UnicodePlot::Canvas::CANVAS_CLASS_MAP = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#2
+class UnicodePlot::DensityCanvas < ::UnicodePlot::Canvas
+ # @return [DensityCanvas] a new instance of DensityCanvas
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#13
+ def initialize(width, height, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#26
+ def pixel!(pixel_x, pixel_y, color); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#45
+ def print_row(out, row_index); end
+end
+
+# source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#5
+UnicodePlot::DensityCanvas::DENSITY_SIGNS = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#8
+UnicodePlot::DensityCanvas::MIN_HEIGHT = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#7
+UnicodePlot::DensityCanvas::MIN_WIDTH = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#10
+UnicodePlot::DensityCanvas::X_PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas/density_canvas.rb#11
+UnicodePlot::DensityCanvas::Y_PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas/dot_canvas.rb#2
+class UnicodePlot::DotCanvas < ::UnicodePlot::LookupCanvas
+ # @return [DotCanvas] a new instance of DotCanvas
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas/dot_canvas.rb#22
+ def initialize(width, height, **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/dot_canvas.rb#32
+ def lookup_decode(code); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/dot_canvas.rb#28
+ def lookup_encode(x, y); end
+end
+
+# source://unicode_plot//lib/unicode_plot/canvas/dot_canvas.rb#12
+UnicodePlot::DotCanvas::DOT_DECODE = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/canvas/dot_canvas.rb#5
+UnicodePlot::DotCanvas::DOT_SIGNS = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/canvas/dot_canvas.rb#19
+UnicodePlot::DotCanvas::X_PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/canvas/dot_canvas.rb#20
+UnicodePlot::DotCanvas::Y_PIXEL_PER_CHAR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/grid_plot.rb#2
+class UnicodePlot::GridPlot < ::UnicodePlot::Plot
+ # @return [GridPlot] a new instance of GridPlot
+ #
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#7
+ def initialize(x, y, canvas, width: T.unsafe(nil), height: T.unsafe(nil), xlim: T.unsafe(nil), ylim: T.unsafe(nil), grid: T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#92
+ def lines!(x, y, color); end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#84
+ def n_columns; end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#80
+ def n_rows; end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#64
+ def origin_x; end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#68
+ def origin_y; end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#76
+ def plot_height; end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#72
+ def plot_width; end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#88
+ def points!(x, y, color); end
+
+ # source://unicode_plot//lib/unicode_plot/grid_plot.rb#96
+ def print_row(out, row_index); end
+end
+
+# source://unicode_plot//lib/unicode_plot/grid_plot.rb#5
+UnicodePlot::GridPlot::DEFAULT_HEIGHT = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/grid_plot.rb#4
+UnicodePlot::GridPlot::MIN_HEIGHT = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/grid_plot.rb#3
+UnicodePlot::GridPlot::MIN_WIDTH = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/io_context.rb#4
+class UnicodePlot::IOContext
+ extend ::Forwardable
+
+ # @return [IOContext] a new instance of IOContext
+ #
+ # source://unicode_plot//lib/unicode_plot/io_context.rb#7
+ def initialize(io, color: T.unsafe(nil)); end
+
+ # @return [Boolean]
+ #
+ # source://unicode_plot//lib/unicode_plot/io_context.rb#14
+ def color?; end
+
+ # source://forwardable/1.3.3/forwardable.rb#231
+ def print(*args, **_arg1, &block); end
+
+ # source://forwardable/1.3.3/forwardable.rb#231
+ def puts(*args, **_arg1, &block); end
+
+ private
+
+ # source://unicode_plot//lib/unicode_plot/io_context.rb#23
+ def check_color(color); end
+end
+
+# source://unicode_plot//lib/unicode_plot/lineplot.rb#4
+class UnicodePlot::Lineplot < ::UnicodePlot::GridPlot; end
+
+# source://unicode_plot//lib/unicode_plot/canvas/lookup_canvas.rb#2
+class UnicodePlot::LookupCanvas < ::UnicodePlot::Canvas
+ # @return [LookupCanvas] a new instance of LookupCanvas
+ #
+ # source://unicode_plot//lib/unicode_plot/canvas/lookup_canvas.rb#3
+ def initialize(width, height, x_pixel_per_char, y_pixel_per_char, fill_char = T.unsafe(nil), **kw); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/lookup_canvas.rb#13
+ def pixel!(pixel_x, pixel_y, color); end
+
+ # source://unicode_plot//lib/unicode_plot/canvas/lookup_canvas.rb#36
+ def print_row(out, row_index); end
+end
+
+# source://unicode_plot//lib/unicode_plot/stemplot.rb#137
+class UnicodePlot::NumericStemplot < ::UnicodePlot::Stemplot
+ # @return [NumericStemplot] a new instance of NumericStemplot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#138
+ def initialize(vector, scale: T.unsafe(nil), **kw); end
+
+ # Print key to STDOUT
+ #
+ # @param scale [Integer] Scale, should be a power of 10
+ # @param divider [String] Divider character between stem and leaf
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#154
+ def print_key(scale, divider); end
+
+ class << self
+ # Used when we have stems from a back-to-back stemplot and a combined list of stems is given
+ #
+ # @param stems [Array] Concatenated list of stems from two plots
+ # @param all [Boolean] Return all stems if true, otherwise only return stems if a leaf exists for a stem
+ # @return [Array] Sorted list of stems
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#168
+ def sorted_stem_list(stems, all: T.unsafe(nil)); end
+ end
+end
+
+# source://unicode_plot//lib/unicode_plot/plot.rb#2
+class UnicodePlot::Plot
+ include ::UnicodePlot::StyledPrinter
+
+ # @return [Plot] a new instance of Plot
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#10
+ def initialize(title: T.unsafe(nil), xlabel: T.unsafe(nil), ylabel: T.unsafe(nil), border: T.unsafe(nil), margin: T.unsafe(nil), padding: T.unsafe(nil), labels: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/plot.rb#66
+ def annotate!(loc, value, color: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/plot.rb#93
+ def annotate_row!(loc, row_index, value, color: T.unsafe(nil)); end
+
+ # Returns the value of attribute border.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#36
+ def border; end
+
+ # Returns the value of attribute colors_deco.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#44
+ def colors_deco; end
+
+ # Returns the value of attribute colors_left.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#40
+ def colors_left; end
+
+ # Returns the value of attribute colors_right.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#42
+ def colors_right; end
+
+ # Returns the value of attribute decorations.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#43
+ def decorations; end
+
+ # Returns the value of attribute labels_left.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#39
+ def labels_left; end
+
+ # Returns the value of attribute labels_right.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#41
+ def labels_right; end
+
+ # Returns the value of attribute margin.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#37
+ def margin; end
+
+ # source://unicode_plot//lib/unicode_plot/plot.rb#119
+ def next_color; end
+
+ # Returns the value of attribute padding.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#38
+ def padding; end
+
+ # source://unicode_plot//lib/unicode_plot/plot.rb#106
+ def render(out = T.unsafe(nil), newline: T.unsafe(nil), color: T.unsafe(nil)); end
+
+ # @return [Boolean]
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#62
+ def show_labels?; end
+
+ # Returns the value of attribute title.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#33
+ def title; end
+
+ # @return [Boolean]
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#46
+ def title_given?; end
+
+ # source://unicode_plot//lib/unicode_plot/plot.rb#125
+ def to_s; end
+
+ # Returns the value of attribute xlabel.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#34
+ def xlabel; end
+
+ # @return [Boolean]
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#50
+ def xlabel_given?; end
+
+ # Returns the value of attribute ylabel.
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#35
+ def ylabel; end
+
+ # @return [Boolean]
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#54
+ def ylabel_given?; end
+
+ # source://unicode_plot//lib/unicode_plot/plot.rb#58
+ def ylabel_length; end
+
+ private
+
+ # @raise [ArgumentError]
+ #
+ # source://unicode_plot//lib/unicode_plot/plot.rb#146
+ def check_border(border); end
+
+ # source://unicode_plot//lib/unicode_plot/plot.rb#133
+ def check_margin(margin); end
+
+ # source://unicode_plot//lib/unicode_plot/plot.rb#140
+ def check_row_index(row_index); end
+end
+
+# source://unicode_plot//lib/unicode_plot/plot.rb#110
+UnicodePlot::Plot::COLOR_CYCLE = T.let(T.unsafe(nil), Array)
+
+# source://unicode_plot//lib/unicode_plot/plot.rb#6
+UnicodePlot::Plot::DEFAULT_BORDER = T.let(T.unsafe(nil), Symbol)
+
+# source://unicode_plot//lib/unicode_plot/plot.rb#7
+UnicodePlot::Plot::DEFAULT_MARGIN = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/plot.rb#8
+UnicodePlot::Plot::DEFAULT_PADDING = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/plot.rb#5
+UnicodePlot::Plot::DEFAULT_WIDTH = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/renderer.rb#63
+class UnicodePlot::Renderer
+ include ::UnicodePlot::StyledPrinter
+ include ::UnicodePlot::BorderPrinter
+
+ # @return [Renderer] a new instance of Renderer
+ #
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#70
+ def initialize(plot); end
+
+ # Returns the value of attribute out.
+ #
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#76
+ def out; end
+
+ # Returns the value of attribute plot.
+ #
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#75
+ def plot; end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#78
+ def render(out, newline); end
+
+ private
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#203
+ def init_render; end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#247
+ def nocolor_string(str); end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#239
+ def print(*args); end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#231
+ def print_title(padding, title, p_width: T.unsafe(nil), color: T.unsafe(nil)); end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#243
+ def puts(*args); end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#171
+ def render_bottom; end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#126
+ def render_row(row); end
+
+ # render all rows
+ #
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#122
+ def render_rows; end
+
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#90
+ def render_top; end
+
+ class << self
+ # source://unicode_plot//lib/unicode_plot/renderer.rb#66
+ def render(out, plot, newline); end
+ end
+end
+
+# source://unicode_plot//lib/unicode_plot/scatterplot.rb#2
+class UnicodePlot::Scatterplot < ::UnicodePlot::GridPlot; end
+
+# ## Description
+#
+# Draw a stem-leaf plot of the given vector +vec+.
+#
+# ```
+# stemplot(vec, **kwargs)
+# ```
+#
+# Draw a back-to-back stem-leaf plot of the given vectors +vec1+ and +vec2+.
+#
+# ```
+# stemplot(vec1, vec2, **kwargs)
+# ```
+#
+# The vectors can be any object that converts to an Array, e.g. an Array, Range, etc.
+# If all elements of the vector are Numeric, the stem-leaf plot is classified as a
+# {NumericStemplot}, otherwise it is classified as a {StringStemplot}. Back-to-back
+# stem-leaf plots must be the same type, i.e. String and Numeric stem-leaf plots cannot
+# be mixed in a back-to-back plot.
+#
+# ## Usage
+#
+# stemplot(vec, [vec2], scale:, divider:, padchar:, trim: )
+#
+# ## Arguments
+#
+# - +vec+: Vector for which the stem leaf plot should be computed.
+# - +vec2+: Optional secondary vector, will be used to create a back-to-back stem-leaf plot.
+# - +scale+: Set scale of plot. Default = 10. Scale is changed via orders of magnitude. Common values are 0.1, 1, and 10. For String stems, the default value of 10 is a one character stem, 100 is a two character stem.
+# - +divider+: Character for break between stem and leaf. Default = "|"
+# - +padchar+: Character(s) to separate stems, leaves and dividers. Default = " "
+# - +trim+: Trims the stem labels when there are no leaves. This can be useful if your data is sparse. Default = +false+
+# - +string_padchar+: Character used to replace missing position for input strings shorter than the stem-size. Default = "_"
+#
+# ## Result
+# A plot of object type is sent to $stdout
+#
+# @example Examples using Numbers
+# # Generate some numbers
+# fifty_floats = 50.times.map { rand(-1000..1000)/350.0 }
+# eighty_ints = 80.times.map { rand(1..100) }
+# another_eighty_ints = 80.times.map { rand(1..100) }
+# three_hundred_ints = 300.times.map { rand(-100..100) }
+#
+# # Single sided stem-plot
+# UnicodePlot.stemplot(eighty_ints)
+#
+# # Single sided stem-plot with positive and negative values
+# UnicodePlot.stemplot(three_hundred_ints)
+#
+# # Single sided stem-plot using floating point values, scaled
+# UnicodePlot.stemplot(fifty_floats, scale: 1)
+#
+# # Single sided stem-plot using floating point values, scaled with new divider
+# UnicodePlot.stemplot(fifty_floats, scale: 1, divider: "😄")
+#
+# # Back to back stem-plot
+# UnicodePlot.stemplot(eighty_ints, another_eighty_ints)
+# @example Examples using Strings
+# # Generate some strings
+# words_1 = %w[apple junk ant age bee bar baz dog egg a]
+# words_2 = %w[ape flan can cat juice elf gnome child fruit]
+#
+# # Single sided stem-plot
+# UnicodePlot.stemplot(words_1)
+#
+# # Back to back stem-plot
+# UnicodePlot.stemplot(words_1, words_2)
+#
+# # Scaled stem plot using scale=100 (two letters for the stem) and trimmed stems
+# UnicodePlot.stemplot(words_1, scale: 100, trim: true)
+#
+# # Above, but changing the string_padchar
+# UnicodePlot.stemplot(words_1, scale: 100, trim: true, string_padchar: '?')
+#
+# source://unicode_plot//lib/unicode_plot/stemplot.rb#81
+class UnicodePlot::Stemplot
+ # Use {factory} method -- should not be directly called.
+ #
+ # @return [Stemplot] a new instance of Stemplot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#84
+ def initialize(*_args, **_kw); end
+
+ # Insert a stem and leaf
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#104
+ def insert(stem, leaf); end
+
+ # Returns a list of leaves for a given stem
+ #
+ # @param stem [Object] The stem
+ # @return [Array] Unsorted list of leaves
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#118
+ def leaves(stem); end
+
+ # Determines largest length of any stem
+ #
+ # @return [Integer] Length value
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#124
+ def max_stem_length; end
+
+ # Returns an unsorted list of stems
+ #
+ # @return [Array] Unsorted list of stems
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#111
+ def raw_stems; end
+
+ # Returns a sorted list of stems
+ #
+ # @param all [Boolean] Return all stems if true, otherwise only return stems if a leaf exists for a stem
+ # @return [Array] Sorted list of stems
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#131
+ def stems(all: T.unsafe(nil)); end
+
+ class << self
+ # Factory method to create a Stemplot, creates either a NumericStemplot
+ # or StringStemplot depending on input.
+ #
+ # @param vector [Array] An array of elements to stem-leaf plot
+ # @return [NumericStemplot] If all elements are Numeric
+ # @return [StringStemplot] If any elements are not Numeric
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#94
+ def factory(vector, **kw); end
+ end
+end
+
+# source://unicode_plot//lib/unicode_plot/stemplot.rb#184
+class UnicodePlot::StringStemplot < ::UnicodePlot::Stemplot
+ # @raise [ArgumentError]
+ # @return [StringStemplot] a new instance of StringStemplot
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#186
+ def initialize(vector, scale: T.unsafe(nil), string_padchar: T.unsafe(nil), **_kw); end
+
+ # Function prototype to provide same interface as {NumericStemplot}.
+ # This function does not do anything.
+ #
+ # @return [false]
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#203
+ def print_key(_scale, _divider); end
+
+ class << self
+ # Used when we have stems from a back-to-back stemplot and a combined list of stems is given
+ #
+ # @param stems [Array] Concatenated list of stems from two plots
+ # @param all [Boolean] Return all stems if true, otherwise only return stems if a leaf exists for a stem
+ # @return [Array] Sorted list of stems
+ #
+ # source://unicode_plot//lib/unicode_plot/stemplot.rb#212
+ def sorted_stem_list(stems, all: T.unsafe(nil)); end
+ end
+end
+
+# source://unicode_plot//lib/unicode_plot/styled_printer.rb#2
+module UnicodePlot::StyledPrinter
+ # source://unicode_plot//lib/unicode_plot/styled_printer.rb#82
+ def print_color(out, color, *args); end
+
+ # source://unicode_plot//lib/unicode_plot/styled_printer.rb#60
+ def print_styled(out, *args, bold: T.unsafe(nil), color: T.unsafe(nil)); end
+end
+
+# source://unicode_plot//lib/unicode_plot/styled_printer.rb#58
+UnicodePlot::StyledPrinter::COLOR_DECODE = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/styled_printer.rb#47
+UnicodePlot::StyledPrinter::COLOR_ENCODE = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/styled_printer.rb#36
+UnicodePlot::StyledPrinter::DISABLE_TEXT_STYLE = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/styled_printer.rb#3
+UnicodePlot::StyledPrinter::TEXT_COLORS = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/utils.rb#2
+module UnicodePlot::Utils
+ private
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#59
+ def ceil_neg_log10(x); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#5
+ def extend_limits(values, limits); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#29
+ def float_round_log10(x, m); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#22
+ def plotting_range_narrow(xmin, xmax); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#49
+ def round_down_subtick(x, m); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#39
+ def round_up_subtick(x, m); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#70
+ def roundable?(x); end
+
+ class << self
+ # source://unicode_plot//lib/unicode_plot/utils.rb#59
+ def ceil_neg_log10(x); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#5
+ def extend_limits(values, limits); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#29
+ def float_round_log10(x, m); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#22
+ def plotting_range_narrow(xmin, xmax); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#49
+ def round_down_subtick(x, m); end
+
+ # source://unicode_plot//lib/unicode_plot/utils.rb#39
+ def round_up_subtick(x, m); end
+
+ # @return [Boolean]
+ #
+ # source://unicode_plot//lib/unicode_plot/utils.rb#70
+ def roundable?(x); end
+ end
+end
+
+# source://unicode_plot//lib/unicode_plot/utils.rb#68
+UnicodePlot::Utils::INT64_MAX = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/utils.rb#67
+UnicodePlot::Utils::INT64_MIN = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/version.rb#2
+UnicodePlot::VERSION = T.let(T.unsafe(nil), String)
+
+# source://unicode_plot//lib/unicode_plot/value_transformer.rb#2
+module UnicodePlot::ValueTransformer
+ # source://unicode_plot//lib/unicode_plot/value_transformer.rb#12
+ def transform_values(func, values); end
+
+ private
+
+ # source://unicode_plot//lib/unicode_plot/value_transformer.rb#30
+ def transform_name(func, basename = T.unsafe(nil)); end
+
+ class << self
+ # source://unicode_plot//lib/unicode_plot/value_transformer.rb#30
+ def transform_name(func, basename = T.unsafe(nil)); end
+ end
+end
+
+# source://unicode_plot//lib/unicode_plot/value_transformer.rb#3
+UnicodePlot::ValueTransformer::PREDEFINED_TRANSFORM_FUNCTIONS = T.let(T.unsafe(nil), Hash)
+
+# source://unicode_plot//lib/unicode_plot/version.rb#4
+module UnicodePlot::Version; end
+
+# source://unicode_plot//lib/unicode_plot/version.rb#6
+UnicodePlot::Version::MAJOR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/version.rb#6
+UnicodePlot::Version::MICRO = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/version.rb#6
+UnicodePlot::Version::MINOR = T.let(T.unsafe(nil), Integer)
+
+# source://unicode_plot//lib/unicode_plot/version.rb#7
+UnicodePlot::Version::STRING = T.let(T.unsafe(nil), String)
+
+# source://unicode_plot//lib/unicode_plot/version.rb#5
+UnicodePlot::Version::TAG = T.let(T.unsafe(nil), T.untyped)
diff --git a/sorbet/rbi/todo.rbi b/sorbet/rbi/todo.rbi
index 6f7be3dc..5f430ec9 100644
--- a/sorbet/rbi/todo.rbi
+++ b/sorbet/rbi/todo.rbi
@@ -4,6 +4,7 @@
# typed: false
+module ::MakeMakefile; end
module ::Spring; end
module SyntaxTree::Haml; end
module SyntaxTree::Haml::Format::Formatter; end
diff --git a/sorbet/tapioca/require.rb b/sorbet/tapioca/require.rb
index 9078c1c2..f7b832ea 100644
--- a/sorbet/tapioca/require.rb
+++ b/sorbet/tapioca/require.rb
@@ -28,6 +28,7 @@ require "sorbet-runtime"
require "syntax_tree"
require "timeout"
require "xdiff/extension"
+require "rake/dsl_definition"
require "prometheus_exporter/client"
require "prometheus_exporter/metric"