120 lines
3.2 KiB
Ruby
120 lines
3.2 KiB
Ruby
# typed: strict
|
|
class ReduxApplicationRecord < ActiveRecord::Base
|
|
extend T::Sig
|
|
include HasPrometheusClient
|
|
include HasColorLogger
|
|
|
|
# hack to make sorbet recognize the `@after_save_deferred_jobs` instance variable
|
|
sig { params(attributes: T.untyped).void }
|
|
def initialize(attributes = nil)
|
|
@after_save_deferred_jobs =
|
|
T.let(
|
|
nil,
|
|
T.nilable(T::Array[[DeferredJob, T.nilable(Scraper::JobBase)]]),
|
|
)
|
|
super(attributes)
|
|
end
|
|
|
|
self.abstract_class = true
|
|
|
|
after_initialize { observe(:initialize) }
|
|
after_create { observe(:create) }
|
|
after_update { observe(:update) }
|
|
after_destroy { observe(:destroy) }
|
|
|
|
TRUNCATE_FIELDS = %w[
|
|
json_attributes
|
|
tags_array
|
|
flags_array
|
|
pools_array
|
|
sources_array
|
|
artists_array
|
|
].freeze
|
|
# clean up the json_attributes field in console output
|
|
sig { params(attr_name: T.any(Symbol, String)).returns(T.untyped) }
|
|
def attribute_for_inspect(attr_name)
|
|
if attr_name.to_s.ends_with?("_backup")
|
|
return "(backup value)"
|
|
elsif TRUNCATE_FIELDS.include?(attr_name.to_s)
|
|
str_value = read_attribute(attr_name).inspect
|
|
str_value = "#{str_value[0, 50]}..." if str_value.length > 50
|
|
return str_value
|
|
end
|
|
super
|
|
end
|
|
|
|
sig { returns(T::Array[String]) }
|
|
def attributes_for_inspect
|
|
super.reject { |attr_name| attr_name.start_with?("json_attributes-") }
|
|
end
|
|
|
|
sig do
|
|
params(
|
|
job_class: T.class_of(Scraper::JobBase),
|
|
params: T::Hash[Symbol, T.untyped],
|
|
set_args: T::Hash[Symbol, T.untyped],
|
|
).void
|
|
end
|
|
def enqueue_job_after_save(job_class, params, set_args = {})
|
|
@after_save_deferred_jobs ||= []
|
|
@after_save_deferred_jobs << [
|
|
DeferredJob.new(job_class:, params:, set_args:),
|
|
Scraper::JobBase.current_scraper_job,
|
|
]
|
|
end
|
|
|
|
after_save do
|
|
# T.bind(self, ReduxApplicationRecord)
|
|
@after_save_deferred_jobs ||=
|
|
T.let([], T.nilable(T::Array[[DeferredJob, T.nilable(Scraper::JobBase)]]))
|
|
|
|
GoodJob::Bulk.enqueue do
|
|
@after_save_deferred_jobs.each do |deferred_job, current_job|
|
|
job_params = deferred_job.params
|
|
|
|
if current_job
|
|
job_params.merge!(
|
|
caused_by_entry: current_job.causing_log_entry,
|
|
caused_by_job_id: current_job.job_id,
|
|
)
|
|
end
|
|
|
|
job =
|
|
deferred_job
|
|
.job_class
|
|
.set(deferred_job.set_args)
|
|
.perform_later(job_params)
|
|
|
|
if job
|
|
logger.info(
|
|
"[class: #{self.class.name}][id: #{id}][enqueued job: #{deferred_job.job_class.name}][job_id: #{job.job_id}]",
|
|
)
|
|
end
|
|
Scraper::Metrics::JobBaseMetrics.observe_job_enqueued(
|
|
source_class: self.class,
|
|
enqueued_class: deferred_job.job_class,
|
|
)
|
|
end
|
|
end
|
|
end
|
|
|
|
private
|
|
|
|
ACTIVE_RECORD_COUNTER =
|
|
T.let(
|
|
PrometheusExporter::Client.default.register(
|
|
:counter,
|
|
"active_record_lifecycle",
|
|
"active record lifecycle statistics",
|
|
),
|
|
PrometheusExporter::Client::RemoteMetric,
|
|
)
|
|
|
|
sig { params(action: Symbol).void }
|
|
def observe(action)
|
|
ACTIVE_RECORD_COUNTER.increment(
|
|
{ method: action, class_name: self.class.name },
|
|
)
|
|
end
|
|
end
|