177 lines
5.1 KiB
Ruby
177 lines
5.1 KiB
Ruby
# typed: false
|
|
require "rails_helper"
|
|
|
|
describe HasBulkEnqueueJobs, no_parallel_rspec: true do
|
|
class TestJob < Scraper::JobBase
|
|
ignore_signature_args :test_arg_ignored
|
|
include HasColorLogger
|
|
|
|
def self.http_factory_method
|
|
:get_test_http_client
|
|
end
|
|
|
|
def perform(args)
|
|
raise("no arguments: #{args.inspect}") if args.empty?
|
|
|
|
if (post = args[:post])
|
|
post = args[:post]
|
|
description = args[:description]
|
|
post.description = description
|
|
post.save!
|
|
elsif args[:defer_enqueue_job]
|
|
defer_job(TestJob2, { job_arg: args[:defer_enqueue_job] })
|
|
elsif args[:defer_after_save]
|
|
new_post = Domain::Post::FaPost.new(fa_id: 12_345)
|
|
new_post.enqueue_job_after_save(
|
|
TestJob2,
|
|
{ job_arg: args[:defer_after_save] },
|
|
)
|
|
new_post.save!
|
|
end
|
|
end
|
|
end
|
|
|
|
class TestJob2 < Scraper::JobBase
|
|
include HasColorLogger
|
|
|
|
def self.http_factory_method
|
|
:get_test_http_client
|
|
end
|
|
|
|
def perform(args)
|
|
end
|
|
end
|
|
|
|
let(:test_class) do
|
|
Class.new do
|
|
include HasBulkEnqueueJobs
|
|
|
|
def config_object_id
|
|
Scraper::JobBase.good_job_concurrency_config.object_id
|
|
end
|
|
end
|
|
end
|
|
|
|
it "has correct thread-local access to config" do
|
|
get_config_obj_id =
|
|
proc { Scraper::JobBase.good_job_concurrency_config.object_id }
|
|
|
|
id1 = get_config_obj_id.call
|
|
# id stays consistent
|
|
expect(get_config_obj_id.call).to eq(id1)
|
|
|
|
# id changes in a different thread
|
|
id2 = nil
|
|
Thread.new { id2 = get_config_obj_id.call }.join
|
|
expect(id2).to_not be_nil
|
|
expect(id2).to_not eq(id1)
|
|
end
|
|
|
|
it "enqueues jobs correctly" do
|
|
inst = test_class.new
|
|
expect(SpecUtil.enqueued_jobs).to eq([])
|
|
inst.bulk_enqueue_jobs { Domain::Fa::Job::BrowsePageJob.perform_later({}) }
|
|
expect(SpecUtil.enqueued_jobs.length).to eq(1)
|
|
expect(SpecUtil.enqueued_jobs[0][:good_job].concurrency_key).to start_with(
|
|
"Domain::Fa::Job::BrowsePageJob",
|
|
)
|
|
|
|
inst.bulk_enqueue_jobs { Domain::Fa::Job::BrowsePageJob.perform_later({}) }
|
|
expect(SpecUtil.enqueued_jobs.length).to eq(1)
|
|
end
|
|
|
|
it "does not enqueue duplicate jobs" do
|
|
post_1_first_load = create(:domain_post_fa_post)
|
|
caused_by_entry = create(:http_log_entry)
|
|
|
|
job_id =
|
|
TestJob.perform_later(
|
|
{ post: post_1_first_load, caused_by_entry: },
|
|
).job_id
|
|
expect(SpecUtil.enqueued_job_args(include_job_id: true)).to eq(
|
|
[{ post: post_1_first_load, caused_by_entry:, job_id: }],
|
|
)
|
|
|
|
# should not enqueue another job with same post and caused_by_entry
|
|
post_1_second_load = Domain::Post::FaPost.find(post_1_first_load.id)
|
|
TestJob.perform_later({ post: post_1_second_load, caused_by_entry: })
|
|
expect(SpecUtil.enqueued_job_args(include_job_id: true)).to eq(
|
|
[{ post: post_1_first_load, caused_by_entry:, job_id: }],
|
|
)
|
|
|
|
# lack of caused_by_entry should not change the concurrency key
|
|
TestJob.perform_later({ post: post_1_first_load })
|
|
expect(SpecUtil.enqueued_job_args(include_job_id: true)).to eq(
|
|
[{ post: post_1_first_load, caused_by_entry:, job_id: }],
|
|
)
|
|
|
|
# different post should cause a new job to be enqueued
|
|
post_2 = create(:domain_post_fa_post)
|
|
job_id_2 = TestJob.perform_later({ post: post_2, caused_by_entry: }).job_id
|
|
expect(SpecUtil.enqueued_job_args(include_job_id: true)).to eq(
|
|
[
|
|
{ post: post_1_first_load, caused_by_entry:, job_id: },
|
|
{ post: post_2, caused_by_entry:, job_id: job_id_2 },
|
|
],
|
|
)
|
|
end
|
|
|
|
it "correctly performs jobs" do
|
|
post = create(:domain_post_fa_post)
|
|
job = TestJob.perform_later({ post:, description: "the post changed" })
|
|
|
|
expect(SpecUtil.enqueued_job_args(include_job_id: true)).to eq(
|
|
[{ post:, description: "the post changed", job_id: job.job_id }],
|
|
)
|
|
expect { SpecUtil.perform_jobs }.to change { post.reload.description }.from(
|
|
"Test description",
|
|
).to("the post changed")
|
|
|
|
expect(SpecUtil.enqueued_job_args()).to eq([])
|
|
end
|
|
|
|
it "defer_job propagates caused_by_entry and caused_by_job_id" do
|
|
caused_by_entry = create(:http_log_entry)
|
|
job =
|
|
TestJob.perform_later(defer_enqueue_job: "deferred job", caused_by_entry:)
|
|
|
|
SpecUtil.perform_jobs(limit: 1)
|
|
|
|
expect(SpecUtil.enqueued_job_args(include_caused_by_job: true)).to eq(
|
|
[
|
|
{
|
|
job_arg: "deferred job",
|
|
caused_by_entry:,
|
|
caused_by_job_id: job.job_id,
|
|
},
|
|
],
|
|
)
|
|
end
|
|
|
|
it "enqueue_job_after_save propagates caused_by_entry and caused_by_job_id" do
|
|
caused_by_entry = create(:http_log_entry)
|
|
job =
|
|
TestJob.perform_later(
|
|
defer_after_save: "deferred after save",
|
|
caused_by_entry:,
|
|
)
|
|
|
|
SpecUtil.perform_jobs(limit: 1)
|
|
|
|
expect(SpecUtil.enqueued_job_args(include_caused_by_job: true)).to eq(
|
|
[
|
|
{
|
|
job_arg: "deferred after save",
|
|
caused_by_entry:,
|
|
caused_by_job_id: job.job_id,
|
|
},
|
|
],
|
|
)
|
|
end
|
|
|
|
it "works with no enqueued jobs" do
|
|
inst = test_class.new
|
|
inst.bulk_enqueue_jobs {}
|
|
end
|
|
end
|