separate out blob file migrator
This commit is contained in:
16
Rakefile
16
Rakefile
@@ -222,13 +222,6 @@ task fix_buggy_fa_posts: :environment do
|
||||
full_res_img.scheme = "https" if full_res_img.scheme.blank?
|
||||
matches = full_res_img.to_s == post.file_url_str
|
||||
end
|
||||
|
||||
# if post_file_url_str != file_url_str
|
||||
# post.file = nil
|
||||
# post.save!
|
||||
# puts "url mismatch: #{post_file_url_str} != #{file_url_str}"
|
||||
# end
|
||||
# binding.pry
|
||||
end
|
||||
end
|
||||
|
||||
@@ -451,3 +444,12 @@ task run_fa_user_avatar_jobs: :environment do
|
||||
puts "perform avatar job for #{avatar.user.url_name} - #{avatar.state.bold}"
|
||||
end
|
||||
end
|
||||
|
||||
task sample_migrated_favs: :environment do
|
||||
new_user = Domain::User::FaUser.where.not(migrated_user_favs_at: nil).last
|
||||
old_user = Domain::Fa::User.find_by(url_name: new_user.url_name)
|
||||
|
||||
puts "user: #{new_user.url_name}"
|
||||
puts "old fav count: #{old_user.fav_posts.count}"
|
||||
puts "new fav count: #{new_user.faved_posts.count}"
|
||||
end
|
||||
|
||||
1
TODO.md
1
TODO.md
@@ -19,3 +19,4 @@
|
||||
- [ ] backfill descriptions on inkbunny posts
|
||||
- [ ] store deep update json on inkbunny posts
|
||||
- [ ] limit number of users, or paginate for "users who favorited this post" page
|
||||
- [ ] manual good job runner does not indicate if the job threw an exception - check return value of #perform, maybe?
|
||||
|
||||
@@ -74,7 +74,7 @@ class Domain::E621::Job::ScanUserFavsJob < Domain::E621::Job::Base
|
||||
.keys
|
||||
.each_slice(1000) do |e621_post_id_slice|
|
||||
e621_id_to_post_id.merge!(
|
||||
Domain::E621::Post
|
||||
Domain::Post::E621Post
|
||||
.where(e621_id: e621_post_id_slice)
|
||||
.pluck(:e621_id, :id)
|
||||
.to_h,
|
||||
|
||||
@@ -133,11 +133,12 @@ class Domain::Inkbunny::Job::UpdatePostsJob < Domain::Inkbunny::Job::Base
|
||||
|
||||
if submission_json["user_icon_url_large"]
|
||||
user = T.must(post.creator)
|
||||
user.save!
|
||||
logger.tagged(make_arg_tag(user)) do
|
||||
avatar = user.avatar
|
||||
avatar_url_str = submission_json["user_icon_url_large"]
|
||||
if !avatar || avatar.url_str != avatar_url_str
|
||||
avatar = user.build_avatar
|
||||
avatar = user.avatars.build
|
||||
avatar.url_str = avatar_url_str
|
||||
user.deep_update_log_entry = log_entry
|
||||
logger.info "avatar url changed, enqueuing avatar download"
|
||||
|
||||
74
app/lib/domain/blob_file/migrate_blob_entry_to_blob_file.rb
Normal file
74
app/lib/domain/blob_file/migrate_blob_entry_to_blob_file.rb
Normal file
@@ -0,0 +1,74 @@
|
||||
# typed: strict
|
||||
class Domain::BlobFile::MigrateBlobEntryToBlobFile
|
||||
extend T::Sig
|
||||
|
||||
ZERO_SHA256 = T.let("00" * 32, String)
|
||||
|
||||
sig { params(log_sink: T.any(IO, StringIO)).void }
|
||||
def initialize(log_sink: $stderr)
|
||||
@num_migrated = T.let(0, Integer)
|
||||
@num_processed = T.let(0, Integer)
|
||||
@start_time = T.let(Time.current, ActiveSupport::TimeWithZone)
|
||||
@log_sink = log_sink
|
||||
end
|
||||
|
||||
sig { params(batch_size: Integer, start_sha256: String).void }
|
||||
def run(batch_size: 16, start_sha256: ZERO_SHA256)
|
||||
start_sha256_bin = HexUtil.hex2bin(start_sha256)
|
||||
BlobEntry.in_batches(
|
||||
of: batch_size,
|
||||
start: start_sha256_bin,
|
||||
order: :asc,
|
||||
use_ranges: true,
|
||||
) do |batch|
|
||||
batch_migrated = insert_blob_entries_batch(batch)
|
||||
@num_migrated += batch_migrated
|
||||
@num_processed += batch.size
|
||||
rate = batch_migrated.to_f / (Time.current - @start_time)
|
||||
last = batch.last&.sha256
|
||||
last_hex = last ? HexUtil.bin2hex(last) : "nil"
|
||||
@log_sink.puts(
|
||||
[
|
||||
"[migrated: #{n2d(@num_migrated)}]",
|
||||
"[processed: #{n2d(@num_processed)}]",
|
||||
"[rate: #{rate.round(1).to_s.rjust(5)}/second]",
|
||||
"[last: '#{last_hex}']",
|
||||
].join(" "),
|
||||
)
|
||||
@start_time = Time.current
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
sig { params(batch: T.untyped).returns(Integer) }
|
||||
def insert_blob_entries_batch(batch)
|
||||
num_migrated = 0
|
||||
|
||||
blob_entry_sha256s = batch.pluck(:sha256)
|
||||
blob_file_sha256s =
|
||||
BlobFile.where(sha256: blob_entry_sha256s).pluck(:sha256)
|
||||
missing_sha256s = blob_entry_sha256s - blob_file_sha256s
|
||||
|
||||
BlobFile.transaction do
|
||||
BlobEntry
|
||||
.where(sha256: missing_sha256s)
|
||||
.each do |blob_entry|
|
||||
blob_file = BlobFile.initialize_from_blob_entry(blob_entry)
|
||||
sha256_hex = HexUtil.bin2hex(T.must(blob_file.sha256))
|
||||
begin
|
||||
blob_file.save!
|
||||
num_migrated += 1
|
||||
rescue ActiveRecord::RecordInvalid => e
|
||||
@log_sink.puts "error saving blob file #{sha256_hex}: #{e}"
|
||||
end
|
||||
end
|
||||
end
|
||||
num_migrated
|
||||
end
|
||||
|
||||
sig { params(n: Integer).returns(String) }
|
||||
def n2d(n)
|
||||
ActiveSupport::NumberHelper.number_to_delimited(n).rjust(8)
|
||||
end
|
||||
end
|
||||
@@ -237,24 +237,109 @@ class Domain::MigrateToDomain
|
||||
sig { params(only_user: T.nilable(String)).void }
|
||||
def migrate_fa_users_favs(only_user: nil)
|
||||
logger.info "migrating fa users favs"
|
||||
if only_user
|
||||
query = Domain::User::FaUser.where(url_name: only_user)
|
||||
else
|
||||
query = Domain::User::FaUser.where(migrated_user_favs_at: nil)
|
||||
end
|
||||
pb =
|
||||
ProgressBar.create(
|
||||
throttle_rate: 0.2,
|
||||
total: query.count,
|
||||
format: "%t: %c/%C %B %p%% %a %e",
|
||||
output: @pb_sink,
|
||||
)
|
||||
query.find_in_batches(batch_size: 5) do |batch|
|
||||
ReduxApplicationRecord.transaction do
|
||||
batch.each { |user| migrate_fa_user_favs(user) }
|
||||
end
|
||||
pb.progress = [pb.progress + batch.size, pb.total].min
|
||||
end
|
||||
|
||||
ReduxApplicationRecord.connection.execute(<<~SQL)
|
||||
-- Map old user IDs to new user IDs:
|
||||
DROP TABLE IF EXISTS user_map;
|
||||
CREATE TEMP TABLE user_map TABLESPACE mirai AS
|
||||
SELECT old_users.id AS old_user_id, new_users.id AS new_user_id
|
||||
FROM domain_fa_users old_users
|
||||
JOIN domain_users new_users
|
||||
ON new_users.json_attributes->>'url_name' = old_users.url_name
|
||||
WHERE new_users.type = 'Domain::User::FaUser'
|
||||
AND new_users.json_attributes->>'migrated_user_favs_at' IS NULL;
|
||||
CREATE INDEX idx_user_map_old_user_id ON user_map(old_user_id, new_user_id) TABLESPACE mirai;
|
||||
CREATE INDEX idx_user_map_new_user_id ON user_map(new_user_id, old_user_id) TABLESPACE mirai;
|
||||
ANALYZE user_map;
|
||||
|
||||
-- Map old post IDs to new post IDs:
|
||||
DROP TABLE IF EXISTS post_map;
|
||||
CREATE TEMP TABLE post_map TABLESPACE mirai AS
|
||||
SELECT old_posts.id AS old_post_id, new_posts.id AS new_post_id
|
||||
FROM domain_fa_posts old_posts
|
||||
JOIN domain_posts new_posts
|
||||
ON (new_posts.json_attributes->>'fa_id')::integer = old_posts.fa_id
|
||||
WHERE new_posts.type = 'Domain::Post::FaPost';
|
||||
CREATE INDEX idx_post_map_old_post_id ON post_map(old_post_id, new_post_id) TABLESPACE mirai;
|
||||
CREATE INDEX idx_post_map_new_post_id ON post_map(new_post_id, old_post_id) TABLESPACE mirai;
|
||||
ANALYZE post_map;
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
v_user_ids bigint[];
|
||||
v_batch_size integer := 10; -- Adjust batch size as needed
|
||||
v_total_count integer;
|
||||
v_processed_count integer := 0;
|
||||
v_progress numeric;
|
||||
v_batch bigint[];
|
||||
BEGIN
|
||||
RAISE NOTICE 'Counting users...';
|
||||
|
||||
-- Fetch all distinct user_ids into an array
|
||||
SELECT array_agg(domain_fa_users.id ORDER BY domain_fa_users.id)
|
||||
INTO v_user_ids
|
||||
FROM domain_fa_users
|
||||
INNER JOIN user_map um ON domain_fa_users.id = um.old_user_id
|
||||
INNER JOIN domain_users du ON um.new_user_id = du.id
|
||||
WHERE du.type = 'Domain::User::FaUser'
|
||||
AND du.json_attributes->>'migrated_user_favs_at' IS NULL;
|
||||
|
||||
IF v_user_ids IS NULL THEN
|
||||
RAISE NOTICE 'No users found to process';
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
-- Get total user count for progress tracking
|
||||
v_total_count := array_length(v_user_ids, 1);
|
||||
RAISE NOTICE 'Total users to process: %', v_total_count;
|
||||
|
||||
-- Loop over user IDs in batches
|
||||
FOR i IN 1..v_total_count BY v_batch_size LOOP
|
||||
-- Extract the current batch of users
|
||||
v_batch := v_user_ids[i:LEAST(i + v_batch_size - 1, v_total_count)];
|
||||
|
||||
-- Insert batch for the current set of users
|
||||
INSERT INTO domain_user_post_favs (user_id, post_id)
|
||||
SELECT um.new_user_id, pm.new_post_id
|
||||
FROM domain_fa_favs old_favs
|
||||
JOIN post_map pm ON old_favs.post_id = pm.old_post_id
|
||||
JOIN user_map um ON old_favs.user_id = um.old_user_id
|
||||
WHERE old_favs.user_id = ANY(v_batch)
|
||||
ON CONFLICT (user_id, post_id) DO NOTHING;
|
||||
|
||||
UPDATE domain_users
|
||||
SET json_attributes = jsonb_set(json_attributes, '{migrated_user_favs_at}', to_jsonb(now()))
|
||||
FROM user_map um
|
||||
WHERE domain_users.id = um.new_user_id
|
||||
AND um.old_user_id = ANY(v_batch)
|
||||
AND domain_users.type = 'Domain::User::FaUser';
|
||||
|
||||
-- Update progress tracking
|
||||
v_processed_count := LEAST(i + v_batch_size - 1, v_total_count);
|
||||
v_progress := (v_processed_count::numeric / v_total_count::numeric) * 100;
|
||||
|
||||
-- Log progress
|
||||
RAISE NOTICE 'Processed users % of % (Progress: % %%)',
|
||||
v_processed_count, v_total_count, ROUND(v_progress, 2);
|
||||
|
||||
-- COMMIT;
|
||||
END LOOP;
|
||||
END $$;
|
||||
SQL
|
||||
|
||||
# previous attempt that does not batch:
|
||||
# INSERT INTO domain_user_post_favs (user_id, post_id)
|
||||
# SELECT um.new_user_id, pm.new_post_id
|
||||
# FROM domain_fa_favs old_favs
|
||||
# JOIN post_map pm ON old_favs.post_id = pm.old_post_id
|
||||
# JOIN user_map um ON old_favs.user_id = um.old_user_id
|
||||
# ON CONFLICT (user_id, post_id) DO NOTHING;
|
||||
|
||||
# ReduxApplicationRecord.connection.execute(<<~SQL)
|
||||
# UPDATE domain_users
|
||||
# SET json_attributes = jsonb_set(json_attributes, '{migrated_user_favs_at}', to_jsonb(now()))
|
||||
# WHERE domain_users.type = 'Domain::User::FaUser'
|
||||
# SQL
|
||||
end
|
||||
|
||||
sig { params(only_user: T.nilable(String)).void }
|
||||
@@ -771,7 +856,7 @@ class Domain::MigrateToDomain
|
||||
sig { params(user: Domain::User::FaUser).void }
|
||||
def migrate_fa_user_followed_users(user)
|
||||
user_url_name = user.url_name
|
||||
old_user = Domain::Fa::User.find_by!(url_name: user_url_name)
|
||||
old_user = Domain::Fa::User.find_by(url_name: user_url_name) || return
|
||||
followed_user_url_names = old_user.follows.pluck(:url_name)
|
||||
new_user_ids =
|
||||
Domain::User::FaUser.where(url_name: followed_user_url_names).pluck(:id)
|
||||
|
||||
@@ -11,11 +11,13 @@ class Arel::Visitors::ToSql
|
||||
join_name = o.relation.table_alias || o.relation.name
|
||||
ar_table = o.relation.instance_variable_get("@klass")
|
||||
if ar_table &&
|
||||
attribute_def =
|
||||
AttrJsonRecordAliases::ImplHelper.get_json_attr_def(
|
||||
ar_table,
|
||||
o.name,
|
||||
)
|
||||
(
|
||||
attribute_def =
|
||||
AttrJsonRecordAliases::ImplHelper.get_json_attr_def(
|
||||
ar_table,
|
||||
o.name,
|
||||
)
|
||||
)
|
||||
attr_type =
|
||||
T.cast(
|
||||
attribute_def.type.type,
|
||||
|
||||
@@ -3,6 +3,7 @@ class Domain::PostGroupJoin < ReduxApplicationRecord
|
||||
extend T::Helpers
|
||||
include AttrJsonRecordAliases
|
||||
self.table_name = "domain_post_group_joins"
|
||||
self.primary_key = %i[post_id group_id]
|
||||
abstract!
|
||||
|
||||
belongs_to :post, class_name: "::Domain::Post"
|
||||
|
||||
@@ -11,7 +11,7 @@ default: &default
|
||||
|
||||
redux_prod: &redux_prod
|
||||
adapter: postgresql
|
||||
host: 10.166.33.171
|
||||
host: db
|
||||
port: 5432
|
||||
database: redux_prod
|
||||
username: scraper_redux
|
||||
@@ -20,6 +20,7 @@ redux_prod: &redux_prod
|
||||
|
||||
redux_staging: &redux_staging
|
||||
<<: *redux_prod
|
||||
host: 10.200.0.2
|
||||
username: scraper_redux_staging
|
||||
password: q6Jf8mXEUkAxdyHq1tUtCTPa1raX1QAT
|
||||
|
||||
|
||||
@@ -12,6 +12,10 @@ namespace :blob_file do
|
||||
RubyProf.start if profile
|
||||
|
||||
def migrate_impl(batch_size, start_at)
|
||||
def n2d(n)
|
||||
ActiveSupport::NumberHelper.number_to_delimited(n).rjust(8)
|
||||
end
|
||||
|
||||
num_migrated = 0
|
||||
num_processed = 0
|
||||
start_time = Time.now
|
||||
@@ -26,8 +30,8 @@ namespace :blob_file do
|
||||
num_processed += batch.size
|
||||
rate = batch_migrated.to_f / (Time.now - start_time)
|
||||
puts [
|
||||
"[migrated: #{ActiveSupport::NumberHelper.number_to_delimited(num_migrated).rjust(8)}]",
|
||||
"[processed: #{ActiveSupport::NumberHelper.number_to_delimited(num_processed).rjust(8)}]",
|
||||
"[migrated: #{n2d(num_migrated)}]",
|
||||
"[processed: #{n2d(num_processed)}]",
|
||||
"[rate: #{rate.round(1).to_s.rjust(5)}/second]",
|
||||
"[last: '#{HexUtil.bin2hex(batch.last.sha256)}']",
|
||||
].join(" ")
|
||||
|
||||
@@ -7,7 +7,7 @@ namespace :e621 do
|
||||
desc "download files for posts missing files"
|
||||
task download_missing_files: %i[environment set_logger_stdout] do
|
||||
relation =
|
||||
Domain::E621::Post
|
||||
Domain::Post::E621Post
|
||||
.where(file: nil, state: :ok)
|
||||
.where.not(file_url_str: nil)
|
||||
puts "will download #{relation.count} posts"
|
||||
@@ -19,7 +19,7 @@ namespace :e621 do
|
||||
desc "scan e621 user favs"
|
||||
task scan_user_favs: :environment do
|
||||
while user =
|
||||
Domain::E621::User
|
||||
Domain::User::E621User
|
||||
.where(scanned_favs_at: nil)
|
||||
.where(num_other_favs_cached: ..200)
|
||||
.order("RANDOM()")
|
||||
@@ -32,9 +32,9 @@ namespace :e621 do
|
||||
task scan_user_favs_descending: %i[environment set_logger_stdout] do
|
||||
user_query =
|
||||
lambda do
|
||||
Domain::E621::User
|
||||
Domain::User::E621User
|
||||
.where(scanned_favs_status: nil)
|
||||
.or(Domain::E621::User.where.not(scanned_favs_status: "error"))
|
||||
.or(Domain::User::E621User.where.not(scanned_favs_status: "error"))
|
||||
.where(scanned_favs_at: nil)
|
||||
.where.not(num_other_favs_cached: nil)
|
||||
.order(num_other_favs_cached: :desc)
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
# typed: false
|
||||
require "rails_helper"
|
||||
|
||||
RSpec.describe Domain::BlobFile::MigrateBlobEntryToBlobFile do
|
||||
let(:log_sink) { StringIO.new }
|
||||
let(:migrator) { described_class.new(log_sink:) }
|
||||
|
||||
describe "#run" do
|
||||
context "when migrating a single BlobEntry" do
|
||||
it "skips already migrated entries" do
|
||||
# First migration
|
||||
migrator.run
|
||||
|
||||
# Second migration attempt
|
||||
expect { migrator.run }.not_to change(BlobFile, :count)
|
||||
end
|
||||
end
|
||||
|
||||
context "when migrating multiple BlobEntries" do
|
||||
let!(:blob_entries) { 3.times.map { |i| create(:blob_entry) } }
|
||||
|
||||
it "migrates all entries in batches" do
|
||||
expect { migrator.run }.to change(BlobFile, :count).by(3)
|
||||
blob_entries.each do |blob_entry|
|
||||
blob_file = BlobFile.find_by(sha256: blob_entry.sha256)
|
||||
expect(blob_file).to be_present
|
||||
expect(blob_file.content_bytes).to eq(blob_entry.contents)
|
||||
expect(File.exist?(blob_file.absolute_file_path)).to be true
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -429,57 +429,189 @@ RSpec.describe Domain::MigrateToDomain do
|
||||
end
|
||||
|
||||
describe "#migrate_fa_users_favs" do
|
||||
let!(:old_user) do
|
||||
Domain::Fa::User.create!(url_name: "testuser", name: "Test_User")
|
||||
let!(:old_users) do
|
||||
[
|
||||
Domain::Fa::User.create!(id: 45, url_name: "user1", name: "User 1"),
|
||||
Domain::Fa::User.create!(id: 46, url_name: "user2", name: "User 2"),
|
||||
Domain::Fa::User.create!(id: 47, url_name: "user3", name: "User 3"),
|
||||
Domain::Fa::User.create!(id: 48, url_name: "user4", name: "User 4"), # User with no favs
|
||||
]
|
||||
end
|
||||
|
||||
let!(:new_user) do
|
||||
Domain::User::FaUser.create!(url_name: "testuser", name: "Test_User")
|
||||
let!(:new_users) do
|
||||
[
|
||||
Domain::User::FaUser.create!(id: 67, url_name: "user1", name: "User 1"),
|
||||
Domain::User::FaUser.create!(id: 68, url_name: "user2", name: "User 2"),
|
||||
Domain::User::FaUser.create!(id: 69, url_name: "user3", name: "User 3"),
|
||||
Domain::User::FaUser.create!(id: 70, url_name: "user4", name: "User 4"),
|
||||
]
|
||||
end
|
||||
|
||||
let!(:old_posts) do
|
||||
3.times.map do |i|
|
||||
[
|
||||
Domain::Fa::Post.create!(
|
||||
fa_id: i + 1,
|
||||
id: 100,
|
||||
fa_id: 1,
|
||||
state: "ok",
|
||||
title: "Test Post #{i}",
|
||||
title: "Post 1",
|
||||
category: "artwork",
|
||||
posted_at: Time.current,
|
||||
scanned_at: Time.current,
|
||||
)
|
||||
end
|
||||
),
|
||||
Domain::Fa::Post.create!(
|
||||
id: 101,
|
||||
fa_id: 2,
|
||||
state: "ok",
|
||||
title: "Post 2",
|
||||
category: "artwork",
|
||||
posted_at: Time.current,
|
||||
scanned_at: Time.current,
|
||||
),
|
||||
Domain::Fa::Post.create!(
|
||||
id: 102,
|
||||
fa_id: 3,
|
||||
state: "ok",
|
||||
title: "Post 3",
|
||||
category: "artwork",
|
||||
posted_at: Time.current,
|
||||
scanned_at: Time.current,
|
||||
),
|
||||
Domain::Fa::Post.create!(
|
||||
id: 103,
|
||||
fa_id: 4,
|
||||
state: "ok",
|
||||
title: "Post 4",
|
||||
category: "artwork",
|
||||
posted_at: Time.current,
|
||||
scanned_at: Time.current,
|
||||
), # Post with no favs
|
||||
]
|
||||
end
|
||||
|
||||
let!(:new_posts) do
|
||||
old_posts.map do |old_post|
|
||||
[
|
||||
Domain::Post::FaPost.create!(
|
||||
fa_id: old_post.fa_id,
|
||||
id: 200,
|
||||
fa_id: 1,
|
||||
state: "ok",
|
||||
title: old_post.title,
|
||||
)
|
||||
end
|
||||
title: "Post 1",
|
||||
),
|
||||
Domain::Post::FaPost.create!(
|
||||
id: 201,
|
||||
fa_id: 2,
|
||||
state: "ok",
|
||||
title: "Post 2",
|
||||
),
|
||||
Domain::Post::FaPost.create!(
|
||||
id: 202,
|
||||
fa_id: 3,
|
||||
state: "ok",
|
||||
title: "Post 3",
|
||||
),
|
||||
Domain::Post::FaPost.create!(
|
||||
id: 203,
|
||||
fa_id: 4,
|
||||
state: "ok",
|
||||
title: "Post 4",
|
||||
),
|
||||
]
|
||||
end
|
||||
|
||||
before do
|
||||
# Add favs to the old user
|
||||
old_user.fav_posts = old_posts
|
||||
old_user.save!
|
||||
# Set up overlapping favorites:
|
||||
# user1: posts 1, 2
|
||||
# user2: posts 2, 3
|
||||
# user3: posts 1, 3
|
||||
# user4: no favs
|
||||
# post4: no favs
|
||||
old_users[0].fav_posts = [old_posts[0], old_posts[1]]
|
||||
old_users[1].fav_posts = [old_posts[1], old_posts[2]]
|
||||
old_users[2].fav_posts = [old_posts[0], old_posts[2]]
|
||||
end
|
||||
|
||||
it "migrates user favs correctly" do
|
||||
it "migrates overlapping user favs correctly", quiet: false do
|
||||
expect { migrator.migrate_fa_users_favs }.to change(
|
||||
Domain::UserPostFav,
|
||||
:count,
|
||||
).by(3)
|
||||
).by(6) # Total number of unique user-post fav relationships
|
||||
|
||||
new_user.reload
|
||||
expect(new_user.faved_posts.count).to eq(3)
|
||||
expect(new_user.faved_posts.pluck(:fa_id)).to contain_exactly(1, 2, 3)
|
||||
expect(new_user.migrated_user_favs_at).to be_present
|
||||
# Verify user1's favs
|
||||
new_users[0].reload
|
||||
expect(new_users[0].faved_posts.pluck(:fa_id)).to contain_exactly(1, 2)
|
||||
expect(new_users[0].migrated_user_favs_at).to be_present
|
||||
|
||||
# Verify user2's favs
|
||||
new_users[1].reload
|
||||
expect(new_users[1].faved_posts.pluck(:fa_id)).to contain_exactly(2, 3)
|
||||
expect(new_users[1].migrated_user_favs_at).to be_present
|
||||
|
||||
# Verify user3's favs
|
||||
new_users[2].reload
|
||||
expect(new_users[2].faved_posts.pluck(:fa_id)).to contain_exactly(1, 3)
|
||||
expect(new_users[2].migrated_user_favs_at).to be_present
|
||||
|
||||
# Verify user4 has no favs
|
||||
new_users[3].reload
|
||||
expect(new_users[3].faved_posts.count).to eq(0)
|
||||
expect(new_users[3].migrated_user_favs_at).to be_present
|
||||
|
||||
# Verify post fav counts
|
||||
expect(new_posts[0].faving_users.count).to eq(2) # Post 1 faved by user1 and user3
|
||||
expect(new_posts[1].faving_users.count).to eq(2) # Post 2 faved by user1 and user2
|
||||
expect(new_posts[2].faving_users.count).to eq(2) # Post 3 faved by user2 and user3
|
||||
expect(new_posts[3].faving_users.count).to eq(0) # Post 4 has no favs
|
||||
end
|
||||
|
||||
it "handles partial migration when some new posts are missing" do
|
||||
# Delete two new posts to simulate partial migration
|
||||
new_posts[0].destroy # Post with fa_id 1
|
||||
new_posts[2].destroy # Post with fa_id 3
|
||||
|
||||
expect { migrator.migrate_fa_users_favs }.to change(
|
||||
Domain::UserPostFav,
|
||||
:count,
|
||||
).by(2) # Only the favs for post 2 should be migrated
|
||||
|
||||
# Verify only favs for existing posts were migrated
|
||||
new_users[0].reload
|
||||
expect(new_users[0].faved_posts.pluck(:fa_id)).to contain_exactly(2)
|
||||
expect(new_users[0].migrated_user_favs_at).to be_present # Should be set even for partial migration
|
||||
|
||||
new_users[1].reload
|
||||
expect(new_users[1].faved_posts.pluck(:fa_id)).to contain_exactly(2)
|
||||
expect(new_users[1].migrated_user_favs_at).to be_present
|
||||
|
||||
new_users[2].reload
|
||||
expect(new_users[2].faved_posts.count).to eq(0)
|
||||
expect(new_users[2].migrated_user_favs_at).to be_present
|
||||
end
|
||||
|
||||
it "handles partial migration when some new users are missing" do
|
||||
# Delete two new users to simulate partial migration
|
||||
new_users[0].destroy # user1
|
||||
new_users[2].destroy # user3
|
||||
|
||||
expect { migrator.migrate_fa_users_favs }.to change(
|
||||
Domain::UserPostFav,
|
||||
:count,
|
||||
).by(2) # Only user2's favs should be migrated
|
||||
|
||||
# Verify remaining user's favs were migrated
|
||||
new_users[1].reload
|
||||
expect(new_users[1].faved_posts.pluck(:fa_id)).to contain_exactly(2, 3)
|
||||
expect(new_users[1].migrated_user_favs_at).to be_present
|
||||
|
||||
# Verify user4 still has no favs
|
||||
new_users[3].reload
|
||||
expect(new_users[3].faved_posts.count).to eq(0)
|
||||
expect(new_users[3].migrated_user_favs_at).to be_present
|
||||
end
|
||||
|
||||
it "skips users that have already been migrated" do
|
||||
new_user.update!(migrated_user_favs_at: Time.current)
|
||||
# Mark all users as migrated
|
||||
new_users.each do |user|
|
||||
user.update!(migrated_user_favs_at: Time.current)
|
||||
end
|
||||
|
||||
expect { migrator.migrate_fa_users_favs }.not_to change(
|
||||
Domain::UserPostFav,
|
||||
@@ -487,17 +619,45 @@ RSpec.describe Domain::MigrateToDomain do
|
||||
)
|
||||
end
|
||||
|
||||
it "handles missing posts gracefully" do
|
||||
# Delete one of the new posts to simulate a missing post
|
||||
new_posts.first.destroy
|
||||
it "skips individual users that have already been migrated" do
|
||||
# Mark only user1 and user3 as migrated
|
||||
migration_time = 1.day.ago
|
||||
new_users[0].update!(migrated_user_favs_at: migration_time)
|
||||
new_users[2].update!(migrated_user_favs_at: migration_time)
|
||||
|
||||
# Only user2's favs (2 favs) and user4 (0 favs) should be migrated
|
||||
expect { migrator.migrate_fa_users_favs }.to change(
|
||||
Domain::UserPostFav,
|
||||
:count,
|
||||
).by(2)
|
||||
).by(2) # Only user2's favs should be migrated
|
||||
|
||||
expect(new_user.faved_posts.count).to eq(2)
|
||||
expect(new_user.migrated_user_favs_at).to be_nil
|
||||
# Verify user1's favs weren't migrated (should be empty)
|
||||
new_users[0].reload
|
||||
expect(new_users[0].faved_posts.count).to eq(0)
|
||||
expect(new_users[0].migrated_user_favs_at).to be_within(1.second).of(
|
||||
migration_time,
|
||||
)
|
||||
|
||||
# Verify user2's favs were migrated
|
||||
new_users[1].reload
|
||||
expect(new_users[1].faved_posts.pluck(:fa_id)).to contain_exactly(2, 3)
|
||||
expect(new_users[1].migrated_user_favs_at).to be_within(1.second).of(
|
||||
Time.current,
|
||||
)
|
||||
|
||||
# Verify user3's favs weren't migrated (should be empty)
|
||||
new_users[2].reload
|
||||
expect(new_users[2].faved_posts.count).to eq(0)
|
||||
expect(new_users[2].migrated_user_favs_at).to be_within(1.second).of(
|
||||
migration_time,
|
||||
)
|
||||
|
||||
# Verify user4 was marked as migrated but still has no favs
|
||||
new_users[3].reload
|
||||
expect(new_users[3].faved_posts.count).to eq(0)
|
||||
expect(new_users[3].migrated_user_favs_at).to be_within(1.second).of(
|
||||
Time.current,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
@@ -2,14 +2,6 @@
|
||||
require "rails_helper"
|
||||
|
||||
RSpec.describe BlobFile, type: :model do
|
||||
before(:all) do
|
||||
# safeguard against running this test in a non-test environment
|
||||
root_dir =
|
||||
File.absolute_path(Rails.application.config_for("blob_file_location"))
|
||||
expect(root_dir).to match(%r{^#{Rails.root}/tmp})
|
||||
FileUtils.rm_rf(root_dir)
|
||||
end
|
||||
|
||||
describe "basic functionality" do
|
||||
it "can be built and saved" do
|
||||
content_bytes = SecureRandom.alphanumeric(1024)
|
||||
|
||||
19
spec/models/domain/post/inkbunny_post_spec.rb
Normal file
19
spec/models/domain/post/inkbunny_post_spec.rb
Normal file
@@ -0,0 +1,19 @@
|
||||
# typed: false
|
||||
require "rails_helper"
|
||||
|
||||
RSpec.describe Domain::Post::InkbunnyPost, type: :model do
|
||||
it "has a valid factory" do
|
||||
expect(build(:domain_post_inkbunny_post)).to be_valid
|
||||
end
|
||||
|
||||
it "can include pools" do
|
||||
post = create(:domain_post_inkbunny_post)
|
||||
pool = post.pools.create!(ib_id: 1, name: "Test Pool")
|
||||
|
||||
found_post =
|
||||
Domain::Post::InkbunnyPost.includes(:pools).find_by(ib_id: post.ib_id)
|
||||
|
||||
expect(found_post).to eq(post)
|
||||
expect(found_post.pools).to eq([pool])
|
||||
end
|
||||
end
|
||||
@@ -43,6 +43,17 @@ RSpec.configure do |config|
|
||||
end
|
||||
end
|
||||
|
||||
config.before(:all) do
|
||||
# safeguard against running this test in a non-test environment
|
||||
root_dir =
|
||||
File.absolute_path(Rails.application.config_for("blob_file_location"))
|
||||
if root_dir.match?(%r{^#{Rails.root}/tmp})
|
||||
FileUtils.rm_rf(root_dir)
|
||||
else
|
||||
raise "blob_file_location is not in the tmp directory"
|
||||
end
|
||||
end
|
||||
|
||||
# rspec-expectations config goes here. You can use an alternate
|
||||
# assertion/expectation library such as wrong or the stdlib/minitest
|
||||
# assertions if you prefer.
|
||||
|
||||
Reference in New Issue
Block a user