Commit fdf19bfc authored by Kassio Borges's avatar Kassio Borges

BulkImports: Reduce memory consumption when importing Epics

Avoid creating new pipeline object when the importing dataset has more
than one page. Instead, call the `BulkImports#run` recursively for each
page, while there's more page to be processed.

MR: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/51595
parent 24590717
......@@ -16,9 +16,9 @@ module EE
loader EE::BulkImports::Groups::Loaders::EpicsLoader
after_run do |context|
def after_run(context)
if context.entity.has_next_page?(:epics)
self.new.run(context)
run(context)
end
end
end
......
......@@ -26,15 +26,13 @@ RSpec.describe EE::BulkImports::Groups::Pipelines::EpicsPipeline do
subject { described_class.new }
it 'imports group epics into destination group' do
page1 = extractor_data(has_next_page: true, cursor: 'nextPageCursor')
page2 = extractor_data(has_next_page: false)
first_page = extractor_data(has_next_page: true, cursor: 'nextPageCursor')
last_page = extractor_data(has_next_page: false)
allow_next_instance_of(BulkImports::Common::Extractors::GraphqlExtractor) do |extractor|
if entity.has_next_page?(:epics)
allow(extractor).to receive(:extract).and_return(page2)
else
allow(extractor).to receive(:extract).and_return(page1)
end
allow(extractor)
.to receive(:extract)
.and_return(first_page, last_page)
end
expect { subject.run(context) }.to change(::Epic, :count).by(2)
......
......@@ -24,7 +24,7 @@ module BulkImports
end
end
after_run.call(context) if after_run.present?
after_run(context) if respond_to?(:after_run)
rescue MarkedAsFailedError
log_skip(context)
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment