Skip to content

Commit

Permalink
Remove DatabaseBackup
Browse files Browse the repository at this point in the history
  • Loading branch information
NickLaMuro committed Aug 31, 2021
1 parent b3aa135 commit 266f794
Show file tree
Hide file tree
Showing 23 changed files with 872 additions and 439 deletions.
496 changes: 496 additions & 0 deletions :q

Large diffs are not rendered by default.

99 changes: 0 additions & 99 deletions app/models/database_backup.rb

This file was deleted.

24 changes: 15 additions & 9 deletions app/models/miq_region.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ class MiqRegion < ApplicationRecord
has_many :metric_rollups, :as => :resource # Destroy will be handled by purger
has_many :vim_performance_states, :as => :resource # Destroy will be handled by purger

virtual_has_many :database_backups, :class_name => "DatabaseBackup"
virtual_has_many :ext_management_systems, :class_name => "ExtManagementSystem"
virtual_has_many :hosts, :class_name => "Host"
virtual_has_many :storages, :class_name => "Storage"
Expand Down Expand Up @@ -34,10 +33,6 @@ class MiqRegion < ApplicationRecord

PERF_ROLLUP_CHILDREN = [:ext_management_systems, :storages]

def database_backups
DatabaseBackup.in_region(region_number)
end

def ext_management_systems
ExtManagementSystem.in_region(region_number)
end
Expand Down Expand Up @@ -144,13 +139,24 @@ def self.global_replication_type?
end

def self.replication_type
MiqPglogical.new.replication_type
if remote_replication_type?
:remote
elsif global_replication_type?
:global
else
:none
end
end

def self.replication_type=(desired_type)
return desired_type if desired_type == replication_type

MiqPglogical.new.replication_type = desired_type
current_type = replication_type
return desired_type if desired_type == current_type

MiqPglogical.new.destroy_provider if current_type == :remote
PglogicalSubscription.delete_all if current_type == :global
MiqPglogical.new.configure_provider if desired_type == :remote
# Do nothing to add a global
desired_type
end

def ems_clouds
Expand Down
58 changes: 2 additions & 56 deletions app/models/miq_schedule.rb
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ class MiqSchedule < ApplicationRecord

validates :name, :uniqueness_when_changed => {:scope => [:userid, :resource_type]}
validates :name, :description, :resource_type, :run_at, :presence => true
validate :validate_run_at, :validate_file_depot
validate :validate_run_at

before_save :set_start_time_and_prod_default

Expand Down Expand Up @@ -41,7 +41,7 @@ class MiqSchedule < ApplicationRecord

SYSTEM_SCHEDULE_CLASSES = %w(MiqReport MiqAlert MiqWidget).freeze
VALID_INTERVAL_UNITS = %w(minutely hourly daily weekly monthly once).freeze
ALLOWED_CLASS_METHOD_ACTIONS = %w(db_backup db_gc automation_request).freeze
ALLOWED_CLASS_METHOD_ACTIONS = %w(automation_request).freeze
IMPORT_CLASS_NAMES = %w[MiqSchedule].freeze

default_value_for :userid, "system"
Expand Down Expand Up @@ -247,37 +247,10 @@ def action_automation_request(_klass, _at)
AutomationRequest.create_from_scheduled_task(user, filter[:uri_parts], parameters)
end

def action_db_backup(klass, _at)
self.sched_action ||= {}
self.sched_action[:options] ||= {}
self.sched_action[:options][:userid] = userid
opts = self.sched_action[:options]
opts[:file_depot_id] = file_depot.id
opts[:miq_schedule_id] = id
queue_opts = {:class_name => klass.name, :method_name => "backup", :args => [opts], :role => "database_operations",
:msg_timeout => ::Settings.task.active_task_timeout.to_i_with_method}
task_opts = {:action => "Database backup", :userid => self.sched_action[:options][:userid]}
MiqTask.generic_action_with_callback(task_opts, queue_opts)
end

def action_db_gc(klass, _at)
self.sched_action ||= {}
self.sched_action[:options] ||= {}
self.sched_action[:options][:userid] = userid
opts = self.sched_action[:options]
queue_opts = {:class_name => klass.name, :method_name => "gc", :args => [opts], :role => "database_operations"}
task_opts = {:action => "Database GC", :userid => self.sched_action[:options][:userid]}
MiqTask.generic_action_with_callback(task_opts, queue_opts)
end

def run_automation_request
action_automation_request(AutomationRequest, nil)
end

def run_adhoc_db_backup
action_db_backup(DatabaseBackup, nil)
end

def action_evaluate_alert(obj, _at)
MiqAlert.evaluate_queue(obj)
_log.info("Action [#{name}] has been run for target type: [#{obj.class}] with name: [#{obj.name}]")
Expand Down Expand Up @@ -325,33 +298,6 @@ def validate_run_at
end
end

def validate_file_depot # TODO: Do we need this if the validations are on the FileDepot classes?
if self.sched_action.kind_of?(Hash) && self.sched_action[:method] == "db_backup" && file_depot
errors.add(:file_depot, "is missing credentials") if !file_depot.uri.to_s.starts_with?("nfs") && file_depot.missing_credentials?
errors.add(:file_depot, "is missing uri") if file_depot.uri.blank?
end
end

def verify_file_depot(params) # TODO: This logic belongs in the UI, not sure where
depot_class = FileDepot.supported_protocols[params[:uri_prefix]]
depot = file_depot.class.name == depot_class ? file_depot : build_file_depot(:type => depot_class)
depot.name = params[:name]
uri = params[:uri]
api_port = params[:swift_api_port]
depot.aws_region = params[:aws_region]
depot.openstack_region = params[:openstack_region]
depot.keystone_api_version = params[:keystone_api_version]
depot.v3_domain_ident = params[:v3_domain_ident]
depot.security_protocol = params[:security_protocol]
depot.uri = api_port.blank? ? uri : depot.merged_uri(URI(uri), api_port)
if params[:save]
file_depot.save!
file_depot.update_authentication(:default => {:userid => params[:username], :password => params[:password]}) if (params[:username] || params[:password]) && depot.class.requires_credentials?
elsif depot.class.requires_credentials?
depot.verify_credentials(nil, params)
end
end

def next_interval_time
unless self.valid? || errors[:run_at].blank?
_log.warn("Invalid schedule [#{id}] [#{name}]: #{Array.wrap(errors[:run_at]).join(", ")}")
Expand Down
103 changes: 103 additions & 0 deletions config/initializers/puma-launcher.siginfo.patch.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Overwrites the default behavior of puma's thread lock to print the logger
# from ActionDispatch::DebugLocks
#
# Only load if puma is active
#
if defined?(Puma)
Puma::Launcher

module Puma
class Launcher
private

def log_thread_status
raw_threads = Thread.list
interlock_share = ActiveSupport::Dependencies.interlock.instance_variable_get(:@lock)
sleeping = interlock_share.instance_variable_get(:@sleeping)
sharing = interlock_share.instance_variable_get(:@sharing)
waiting = interlock_share.instance_variable_get(:@waiting)
exclusive_thread = interlock_share.instance_variable_get(:@exclusive_thread)

threads = raw_threads.each_with_index.inject({}) do |data, (thread, index)|
purpose, compatible = waiting[thread]

data[thread] = {
:index => index,
:thread => thread,
:backtrace => thread.backtrace,
:sharing => sharing[thread],
:exclusive => exclusive_thread == thread,
:purpose => purpose,
:compatible => compatible,
:waiting => !!waiting[thread],
:sleeper => sleeping[thread]
}

data
end

threads.each do |thread, info|
if info[:exclusive]
lock_state = +"Exclusive"
elsif info[:sharing] > 0
lock_state = +"Sharing"
lock_state << " x#{info[:sharing]}" if info[:sharing] > 1
else
lock_state = +"No lock"
end

if info[:waiting]
lock_state << " (yielded share)"
end

thread_header = "0x#{thread.__id__.to_s(16)} "
thread_header << "'#{info[:thread]['label']}' "
thread_header << "#{thread.status || 'dead'} "

puts +"Thread #{info[:index]} [#{thread_header}] #{lock_state}"

if info[:sleeper]
print " Waiting in #{info[:sleeper]}"
print " to #{info[:purpose].to_s.inspect}" unless info[:purpose].nil?
puts

if info[:compatible]
compat = info[:compatible].map { |c| c == false ? "share" : c.to_s.inspect }
puts " may be pre-empted for: #{compat.join(', ')}"
end

blockers = threads.values.select { |binfo| blocked_by?(info, binfo, threads.values) }
puts " blocked by: #{blockers.map { |i| i[:index] }.join(', ')}" if blockers.any?
end

blockees = threads.values.select { |binfo| blocked_by?(binfo, info, threads.values) }
puts " blocking: #{blockees.map { |i| i[:index] }.join(', ')}" if blockees.any?

puts "#{info[:backtrace].join("\n")}\n" if info[:backtrace]
puts "\n\n---\n\n\n"
end
end

def blocked_by?(victim, blocker, all_threads)
return false if victim.equal?(blocker)

case victim[:sleeper]
when :start_sharing
blocker[:exclusive] ||
(!victim[:waiting] && blocker[:compatible] && !blocker[:compatible].include?(false))
when :start_exclusive
blocker[:sharing] > 0 ||
blocker[:exclusive] ||
(blocker[:compatible] && !blocker[:compatible].include?(victim[:purpose]))
when :yield_shares
blocker[:exclusive]
when :stop_exclusive
blocker[:exclusive] ||
victim[:compatible] &&
victim[:compatible].include?(blocker[:purpose]) &&
all_threads.all? { |other| !other[:compatible] || blocker.equal?(other) || other[:compatible].include?(blocker[:purpose]) }
end
end
end
end
end
1 change: 0 additions & 1 deletion config/replication_exclude_tables.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
- conditions_miq_policies
- custom_buttons
- customization_specs
- database_backups
- event_logs
- external_urls
- file_depots
Expand Down
Binary file added db.dump
Binary file not shown.
4 changes: 0 additions & 4 deletions db/fixtures/miq_event_definition_events.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,6 @@
description: Server High DB Disk Usage
event_type: Default
set_type: evm_operations
- name: evm_server_db_backup_low_space
description: Server Database Backup Insufficient Space
event_type: Default
set_type: evm_operations
- name: evm_worker_start
description: Worker Started
event_type: Default
Expand Down
Binary file added db/radar.sqlite3
Binary file not shown.
21 changes: 21 additions & 0 deletions diagram_of_embedded_ansible_workflow
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@

[1].execute =>
[1].launch_ansible_job =>
[2].create_job (.create_stack) =>
[2].raw_create_stack =>
[3].run =>
[4][5].create_job =>
[4][6].signal =>
... =>
[5].execute
[5].launch_runner
[7].run_async


[1]: ServiceAnsiblePlaybook (app/models/service_ansible_playbook.rb)
[2]: ManageIQ::Providers::EmbeddedAnsible::AutomationManager::Job (app/models/manageiq/providers/embedded_ansible/automation_manager/job.rb)
[3]: ManageIQ::Providers::EmbeddedAnsible::AutomationManager::Playbook (app/models/manageiq/providers/embedded_ansible/automation_manager/playbook.rb)
[4]: ManageIQ::Providers::AnsiblePlaybookWorkflow (app/models/manageiq/providers/ansible_playbook_workflow.rb)
[5]: ManageIQ::Providers::AnsibleRunnerWorkflow (app/models/manageiq/providers/ansible_runner_workflow.rb)
[6]: Job (app/models/job.rb)
[7]: Ansible::Runner (lib/ansible/runner.rb)
16 changes: 16 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
web:
image: 'gitlab/gitlab-ee:latest'
restart: always
hostname: 'gitlab.example.com'
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'https://gitlab.example.com'
# Add any other gitlab.rb configuration here, each on its own line
ports:
- '80:80'
- '443:443'
- '22:22'
volumes:
- '$GITLAB_HOME/config:/etc/gitlab'
- '$GITLAB_HOME/logs:/var/log/gitlab'
- '$GITLAB_HOME/data:/var/opt/gitlab'
Loading

0 comments on commit 266f794

Please sign in to comment.