diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..09f42e1
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+tmp/*
+log/*
+dump/*
+!tmp/.keep
+!log/.keep
+!dump/.keep
+*.gem
diff --git a/.travis.yml b/.travis.yml
index c5b14c3..f4e6886 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,15 +1,9 @@
rvm: 2.7.2
cache: bundler
-import:
- - travis-ci/build-configs:db-setup.yml
-
services:
- redis
-before_install:
- - gem install bundler
-
env:
global:
- PATH=/snap/bin:$PATH
@@ -17,4 +11,26 @@ env:
jobs:
include:
- stage: "testing time"
- script: bundle exec rspec -e test
\ No newline at end of file
+ script: bundle exec rspec --tag ~slow
+
+dist: xenial
+
+before_install:
+ - gem install bundler
+ - sudo apt-get install -yq --no-install-suggests --no-install-recommends postgresql-common
+ - sudo service postgresql stop
+ - sudo apt install -yq --no-install-suggests --no-install-recommends postgresql-11 postgresql-client-11
+ - sed -e 's/^port.*/port = 5432/' /etc/postgresql/11/main/postgresql.conf > postgresql.conf
+ - sudo chown postgres postgresql.conf
+ - sudo mv postgresql.conf /etc/postgresql/11/main
+ - sudo cp /etc/postgresql/{10,11}/main/pg_hba.conf
+ - sudo service postgresql start 11
+
+before_script:
+ - psql --version
+ - psql -c 'CREATE DATABASE travis_test;' -U postgres
+ - psql -t -c "SELECT 1 FROM pg_roles WHERE rolname='travis'" -U postgres | grep 1 || psql -c 'CREATE ROLE travis SUPERUSER LOGIN CREATEDB;' -U postgres
+ - psql -f db/schema.sql -v ON_ERROR_STOP=1 travis_test
+ - psql -c 'CREATE DATABASE travis_test_destination;' -U postgres
+ - psql -t -c "SELECT 1 FROM pg_roles WHERE rolname='travis'" -U postgres | grep 1 || psql -c 'CREATE ROLE travis SUPERUSER LOGIN CREATEDB;' -U postgres
+ - psql -f db/schema.sql -v ON_ERROR_STOP=1 travis_test_destination
\ No newline at end of file
diff --git a/Gemfile b/Gemfile
index ecbc13b..65903ed 100644
--- a/Gemfile
+++ b/Gemfile
@@ -3,29 +3,4 @@
source 'https://rubygems.org'
git_source(:github) { |repo| "https://github.com/#{repo}.git" }
-ruby '2.7.2'
-
-gem 'activerecord'
-gem 'google-cloud-storage', '~> 1.8', require: false
-gem 'pg'
-gem 'pry'
-gem 'rails', '~> 6.1.3.1'
-gem 'redis'
-
-gem 'bootsnap', require: false
-
-group :development, :test do
- gem 'brakeman'
- gem 'byebug', platforms: %i[mri mingw x64_mingw]
- gem 'factory_bot'
- gem 'rspec-rails'
- gem 'listen'
-end
-
-group :development do
- gem 'rubocop', '~> 0.75.1', require: false
- gem 'rubocop-rspec'
-end
-
-# Windows does not include zoneinfo files, so bundle the tzinfo-data gem
-gem 'tzinfo-data', platforms: [:mingw, :mswin, :x64_mingw, :jruby]
+gemspec
\ No newline at end of file
diff --git a/Gemfile.lock b/Gemfile.lock
deleted file mode 100644
index 5b6034d..0000000
--- a/Gemfile.lock
+++ /dev/null
@@ -1,280 +0,0 @@
-GEM
- remote: https://rubygems.org/
- specs:
- actioncable (6.1.3.1)
- actionpack (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- nio4r (~> 2.0)
- websocket-driver (>= 0.6.1)
- actionmailbox (6.1.3.1)
- actionpack (= 6.1.3.1)
- activejob (= 6.1.3.1)
- activerecord (= 6.1.3.1)
- activestorage (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- mail (>= 2.7.1)
- actionmailer (6.1.3.1)
- actionpack (= 6.1.3.1)
- actionview (= 6.1.3.1)
- activejob (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- mail (~> 2.5, >= 2.5.4)
- rails-dom-testing (~> 2.0)
- actionpack (6.1.3.1)
- actionview (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- rack (~> 2.0, >= 2.0.9)
- rack-test (>= 0.6.3)
- rails-dom-testing (~> 2.0)
- rails-html-sanitizer (~> 1.0, >= 1.2.0)
- actiontext (6.1.3.1)
- actionpack (= 6.1.3.1)
- activerecord (= 6.1.3.1)
- activestorage (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- nokogiri (>= 1.8.5)
- actionview (6.1.3.1)
- activesupport (= 6.1.3.1)
- builder (~> 3.1)
- erubi (~> 1.4)
- rails-dom-testing (~> 2.0)
- rails-html-sanitizer (~> 1.1, >= 1.2.0)
- activejob (6.1.3.1)
- activesupport (= 6.1.3.1)
- globalid (>= 0.3.6)
- activemodel (6.1.3.1)
- activesupport (= 6.1.3.1)
- activerecord (6.1.3.1)
- activemodel (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- activestorage (6.1.3.1)
- actionpack (= 6.1.3.1)
- activejob (= 6.1.3.1)
- activerecord (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- marcel (~> 1.0.0)
- mini_mime (~> 1.0.2)
- activesupport (6.1.3.1)
- concurrent-ruby (~> 1.0, >= 1.0.2)
- i18n (>= 1.6, < 2)
- minitest (>= 5.1)
- tzinfo (~> 2.0)
- zeitwerk (~> 2.3)
- addressable (2.7.0)
- public_suffix (>= 2.0.2, < 5.0)
- ast (2.4.2)
- bootsnap (1.7.3)
- msgpack (~> 1.0)
- brakeman (5.0.0)
- builder (3.2.4)
- byebug (11.1.3)
- coderay (1.1.3)
- concurrent-ruby (1.1.8)
- crass (1.0.6)
- declarative (0.0.20)
- declarative-option (0.1.0)
- diff-lcs (1.4.4)
- digest-crc (0.6.3)
- rake (>= 12.0.0, < 14.0.0)
- erubi (1.10.0)
- factory_bot (6.1.0)
- activesupport (>= 5.0.0)
- faraday (1.3.0)
- faraday-net_http (~> 1.0)
- multipart-post (>= 1.2, < 3)
- ruby2_keywords
- faraday-net_http (1.0.1)
- ffi (1.15.0)
- globalid (0.4.2)
- activesupport (>= 4.2.0)
- google-apis-core (0.3.0)
- addressable (~> 2.5, >= 2.5.1)
- googleauth (~> 0.14)
- httpclient (>= 2.8.1, < 3.0)
- mini_mime (~> 1.0)
- representable (~> 3.0)
- retriable (>= 2.0, < 4.0)
- rexml
- signet (~> 0.14)
- webrick
- google-apis-iamcredentials_v1 (0.2.0)
- google-apis-core (~> 0.1)
- google-apis-storage_v1 (0.3.0)
- google-apis-core (~> 0.1)
- google-cloud-core (1.6.0)
- google-cloud-env (~> 1.0)
- google-cloud-errors (~> 1.0)
- google-cloud-env (1.5.0)
- faraday (>= 0.17.3, < 2.0)
- google-cloud-errors (1.1.0)
- google-cloud-storage (1.31.0)
- addressable (~> 2.5)
- digest-crc (~> 0.4)
- google-apis-iamcredentials_v1 (~> 0.1)
- google-apis-storage_v1 (~> 0.1)
- google-cloud-core (~> 1.2)
- googleauth (~> 0.9)
- mini_mime (~> 1.0)
- googleauth (0.16.0)
- faraday (>= 0.17.3, < 2.0)
- jwt (>= 1.4, < 3.0)
- memoist (~> 0.16)
- multi_json (~> 1.11)
- os (>= 0.9, < 2.0)
- signet (~> 0.14)
- httpclient (2.8.3)
- i18n (1.8.9)
- concurrent-ruby (~> 1.0)
- jaro_winkler (1.5.4)
- jwt (2.2.2)
- listen (3.5.0)
- rb-fsevent (~> 0.10, >= 0.10.3)
- rb-inotify (~> 0.9, >= 0.9.10)
- loofah (2.9.0)
- crass (~> 1.0.2)
- nokogiri (>= 1.5.9)
- mail (2.7.1)
- mini_mime (>= 0.1.1)
- marcel (1.0.0)
- memoist (0.16.2)
- method_source (1.0.0)
- mini_mime (1.0.3)
- mini_portile2 (2.5.0)
- minitest (5.14.4)
- msgpack (1.4.2)
- multi_json (1.15.0)
- multipart-post (2.1.1)
- nio4r (2.5.7)
- nokogiri (1.11.2)
- mini_portile2 (~> 2.5.0)
- racc (~> 1.4)
- nokogiri (1.11.2-x86_64-darwin)
- racc (~> 1.4)
- os (1.1.1)
- parallel (1.20.1)
- parser (3.0.0.0)
- ast (~> 2.4.1)
- pg (1.2.3)
- pry (0.14.0)
- coderay (~> 1.1)
- method_source (~> 1.0)
- public_suffix (4.0.6)
- racc (1.5.2)
- rack (2.2.3)
- rack-test (1.1.0)
- rack (>= 1.0, < 3)
- rails (6.1.3.1)
- actioncable (= 6.1.3.1)
- actionmailbox (= 6.1.3.1)
- actionmailer (= 6.1.3.1)
- actionpack (= 6.1.3.1)
- actiontext (= 6.1.3.1)
- actionview (= 6.1.3.1)
- activejob (= 6.1.3.1)
- activemodel (= 6.1.3.1)
- activerecord (= 6.1.3.1)
- activestorage (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- bundler (>= 1.15.0)
- railties (= 6.1.3.1)
- sprockets-rails (>= 2.0.0)
- rails-dom-testing (2.0.3)
- activesupport (>= 4.2.0)
- nokogiri (>= 1.6)
- rails-html-sanitizer (1.3.0)
- loofah (~> 2.3)
- railties (6.1.3.1)
- actionpack (= 6.1.3.1)
- activesupport (= 6.1.3.1)
- method_source
- rake (>= 0.8.7)
- thor (~> 1.0)
- rainbow (3.0.0)
- rake (13.0.3)
- rb-fsevent (0.10.4)
- rb-inotify (0.10.1)
- ffi (~> 1.0)
- redis (4.2.5)
- representable (3.0.4)
- declarative (< 0.1.0)
- declarative-option (< 0.2.0)
- uber (< 0.2.0)
- retriable (3.1.2)
- rexml (3.2.4)
- rspec-core (3.10.1)
- rspec-support (~> 3.10.0)
- rspec-expectations (3.10.1)
- diff-lcs (>= 1.2.0, < 2.0)
- rspec-support (~> 3.10.0)
- rspec-mocks (3.10.2)
- diff-lcs (>= 1.2.0, < 2.0)
- rspec-support (~> 3.10.0)
- rspec-rails (5.0.1)
- actionpack (>= 5.2)
- activesupport (>= 5.2)
- railties (>= 5.2)
- rspec-core (~> 3.10)
- rspec-expectations (~> 3.10)
- rspec-mocks (~> 3.10)
- rspec-support (~> 3.10)
- rspec-support (3.10.2)
- rubocop (0.75.1)
- jaro_winkler (~> 1.5.1)
- parallel (~> 1.10)
- parser (>= 2.6)
- rainbow (>= 2.2.2, < 4.0)
- ruby-progressbar (~> 1.7)
- unicode-display_width (>= 1.4.0, < 1.7)
- rubocop-rspec (1.41.0)
- rubocop (>= 0.68.1)
- ruby-progressbar (1.11.0)
- ruby2_keywords (0.0.4)
- signet (0.15.0)
- addressable (~> 2.3)
- faraday (>= 0.17.3, < 2.0)
- jwt (>= 1.5, < 3.0)
- multi_json (~> 1.10)
- sprockets (4.0.2)
- concurrent-ruby (~> 1.0)
- rack (> 1, < 3)
- sprockets-rails (3.2.2)
- actionpack (>= 4.0)
- activesupport (>= 4.0)
- sprockets (>= 3.0.0)
- thor (1.1.0)
- tzinfo (2.0.4)
- concurrent-ruby (~> 1.0)
- uber (0.1.0)
- unicode-display_width (1.6.1)
- webrick (1.7.0)
- websocket-driver (0.7.3)
- websocket-extensions (>= 0.1.0)
- websocket-extensions (0.1.5)
- zeitwerk (2.4.2)
-
-PLATFORMS
- ruby
- x86_64-darwin-20
-
-DEPENDENCIES
- activerecord
- bootsnap
- brakeman
- byebug
- factory_bot
- google-cloud-storage (~> 1.8)
- listen
- pg
- pry
- rails (~> 6.1.3.1)
- redis
- rspec-rails
- rubocop (~> 0.75.1)
- rubocop-rspec
- tzinfo-data
-
-RUBY VERSION
- ruby 2.7.2p137
-
-BUNDLED WITH
- 2.2.7
diff --git a/README.md b/README.md
index 0ebd57d..36f5868 100644
--- a/README.md
+++ b/README.md
@@ -1,37 +1,111 @@
# README
-*travis-backup* is a cron application, which export builds and it's correspoonding jobs
-to json files and sends them to GCE.
+*travis-backup* is an application that helps with housekeeping and backup for Travis CI database v2.2 and with migration to v3.0 database. By default it removes requests and builds with their corresponding jobs and logs, as long as they are older than given threshold says (and backups them in files, if this option is active). Although it can be also run with special modes: `move_logs`, for moving logs from one database to another, and `remove_orphans`, for deleting all orphaned data.
-* Ruby version
+### Installation and run
-2.7.2
+You can install the gem using
+
+`gem install travis-backup`
+
+Next you can run it like:
+
+```
+travis_backup 'postgres://user:pass@localhost:5432/my_db' --threshold 6
+```
+
+All arguments:
+
+```
+ first argument, no flag # database url
+ -b, --backup # when not present, removes data without saving it to file
+ -d, --dry_run # only prints in console what data will be backuped and deleted
+ -l, --limit LIMIT # builds limit for one backup file
+ -t, --threshold MONTHS # number of months from now - data younger than this time won't be backuped
+ -f, --files_location PATH # path of the folder in which backup files will be placed
+ -u, --user_id ID # run only for given user
+ -o, --org_id ID # run only for given organization
+ -r, --repo_id ID # run only for given repository
+ --move_logs # run in move logs mode - move all logs to database at destination_db_url URL
+ --destination_db_url URL # URL for moving logs to
+ --remove_orphans # run in remove orphans mode
+```
+
+Or inside your app:
+
+```
+require 'travis-backup'
+
+backup = Backup.new(
+ if_backup: true,
+ limit: 500,
+ threshold: 12,
+ files_location: './my_folder/dump',
+ database_url: 'postgresql://postgres:pass@localhost:5432/my_db'
+)
+backup.run
+```
+
+You can also run backup only for given user, organisation or repository:
+
+```
+backup.run(user_id: 1)
+# or
+backup.run(org_id: 1)
+# or
+backup.run(repo_id: 1)
+```
-* System dependencies
+#### Special modes
-* Configuration
+Using `--move_logs` flag you can move all logs to database at `destination_db_url` URL (which is required in this case). When you run gem in this mode no files are created and no other tables are being touched.
-`config/settinigs.yml` or env vars like:
-`BACKUP_LIMIT`
-`BACKUP_DELAY`
-`BACKUP_HOUSEKEEPING_PERIOD`
-`LOGS_URL`
-`DATABASE_URL`
-`GCE_PROJECT`
-`GCE_CREDENTIALS`
-`GCE_BUCKET`
-`REDIS_URL`
+Using `--remove_orphans` flag you can remove all orphaned data from tables. When you run gem in this mode no files are created.
-* How to run the test suite
+Using `--dry_run` flag you can check which data would be removed by gem, but without removing them actually. Instead of that reports will be printed on standard output. This flag can be also combined with `--move_logs` or `--remove_orphans`.
-`bundle exec rspec`
+### Configuration options
-* How to run appication
+Despite of command line arguments, one of the ways you can configure your export is a file `config/settings.yml` that you can place in your app's main directory. The gem expects properties in the following format:
-`bundle exec bin/run_backup`
+```
+backup:
+ if_backup: true # when false, removes data without saving it to file
+ dry_run: false # when true, only prints in console what data should be backuped and deleted
+ limit: 1000 # builds limit for one backup file
+ threshold: 6 # number of months from now - data younger than this time won't be backuped
+ files_location: './dump' # path of the folder in which backup files will be placed
+ user_id: 1 # run only for given user
+ org_id: 1 # run only for given organization
+ repo_id: 1 # run only for given repository
+ move_logs: false # run in move logs mode - move all logs to database at destination_db_url URL
+ remove_orphans: false # run in remove orphans mode
+```
-It's also possibe to run console
+You can also set these properties using env vars corresponding to them: `IF_BACKUP`, `BACKUP_DRY_RUN`, `BACKUP_LIMIT`, `BACKUP_THRESHOLD`, `BACKUP_FILES_LOCATION`, `BACKUP_USER_ID`, `BACKUP_ORG_ID`, `BACKUP_REPO_ID`, `BACKUP_MOVE_LOGS`, `BACKUP_REMOVE_ORPHANS`.
-`bundle exec bin/console`
-and then run export for single user/organization
-`Backup.new.export(owner_id)`
\ No newline at end of file
+You should also specify your database url. You can do this the standard way in `config/database.yml` file, setting the `database_url` hash argument while creating `Backup` instance or using the `DATABASE_URL` env var. Your database should be consistent with the Travis 2.2 database schema.
+
+For `move_logs` mode you need also to specify a destination database. You can set it also in `config/database.yml` file, in `destination` subsection, setting the `destination_db_url` hash argument while creating `Backup` instance or using the `BACKUP_DESTINATION_DB_URL` env var. Your destination database should be consistent with the Travis 3.0 database schema.
+
+### How to run the test suite
+
+You can run the test after cloning this repository. Next you should call
+
+```
+bundle install
+```
+
+and
+
+```
+bundle exec rspec
+```
+
+To make tests working properly you should also ensure database connection strings for empty test databases. You can set them as `DATABASE_URL` and `BACKUP_DESTINATION_DB_URL` environment variables or in `config/database.yml`.
+
+**Warning: these databases will be cleaned during tests, so ensure that they include no important data.**
+
+### Ruby version
+
+2.7.2
diff --git a/Rakefile b/Rakefile
index 43ed54a..e85148f 100644
--- a/Rakefile
+++ b/Rakefile
@@ -2,7 +2,7 @@ namespace :backup do
desc 'Backup all daily outdated build/job'
task :cron do
$: << 'lib'
- require 'backup'
+ require 'travis-backup'
Backup.new.run
end
diff --git a/app/assets/config/manifest.js b/app/assets/config/manifest.js
deleted file mode 100644
index 5918193..0000000
--- a/app/assets/config/manifest.js
+++ /dev/null
@@ -1,2 +0,0 @@
-//= link_tree ../images
-//= link_directory ../stylesheets .css
diff --git a/app/assets/stylesheets/application.css b/app/assets/stylesheets/application.css
deleted file mode 100644
index d05ea0f..0000000
--- a/app/assets/stylesheets/application.css
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * This is a manifest file that'll be compiled into application.css, which will include all the files
- * listed below.
- *
- * Any CSS and SCSS file within this directory, lib/assets/stylesheets, or any plugin's
- * vendor/assets/stylesheets directory can be referenced here using a relative path.
- *
- * You're free to add application-wide styles to this file and they'll appear at the bottom of the
- * compiled file so the styles you add here take precedence over styles defined in any other CSS/SCSS
- * files in this directory. Styles in this file should be added after the last require_* statement.
- * It is generally better to create a new file per style scope.
- *
- *= require_tree .
- *= require_self
- */
diff --git a/app/channels/application_cable/channel.rb b/app/channels/application_cable/channel.rb
deleted file mode 100644
index d672697..0000000
--- a/app/channels/application_cable/channel.rb
+++ /dev/null
@@ -1,4 +0,0 @@
-module ApplicationCable
- class Channel < ActionCable::Channel::Base
- end
-end
diff --git a/app/channels/application_cable/connection.rb b/app/channels/application_cable/connection.rb
deleted file mode 100644
index 0ff5442..0000000
--- a/app/channels/application_cable/connection.rb
+++ /dev/null
@@ -1,4 +0,0 @@
-module ApplicationCable
- class Connection < ActionCable::Connection::Base
- end
-end
diff --git a/app/controllers/application_controller.rb b/app/controllers/application_controller.rb
deleted file mode 100644
index 09705d1..0000000
--- a/app/controllers/application_controller.rb
+++ /dev/null
@@ -1,2 +0,0 @@
-class ApplicationController < ActionController::Base
-end
diff --git a/app/controllers/concerns/.keep b/app/controllers/concerns/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/app/helpers/application_helper.rb b/app/helpers/application_helper.rb
deleted file mode 100644
index de6be79..0000000
--- a/app/helpers/application_helper.rb
+++ /dev/null
@@ -1,2 +0,0 @@
-module ApplicationHelper
-end
diff --git a/app/javascript/channels/consumer.js b/app/javascript/channels/consumer.js
deleted file mode 100644
index 8ec3aad..0000000
--- a/app/javascript/channels/consumer.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Action Cable provides the framework to deal with WebSockets in Rails.
-// You can generate new channels where WebSocket features live using the `bin/rails generate channel` command.
-
-import { createConsumer } from "@rails/actioncable"
-
-export default createConsumer()
diff --git a/app/javascript/channels/index.js b/app/javascript/channels/index.js
deleted file mode 100644
index 0cfcf74..0000000
--- a/app/javascript/channels/index.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Load all the channels within this directory and all subdirectories.
-// Channel files must be named *_channel.js.
-
-const channels = require.context('.', true, /_channel\.js$/)
-channels.keys().forEach(channels)
diff --git a/app/javascript/packs/application.js b/app/javascript/packs/application.js
deleted file mode 100644
index f710851..0000000
--- a/app/javascript/packs/application.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// This file is automatically compiled by Webpack, along with any other files
-// present in this directory. You're encouraged to place your actual application logic in
-// a relevant structure within app/javascript and only use these pack files to reference
-// that code so it'll be compiled.
-
-import Rails from "@rails/ujs"
-import Turbolinks from "turbolinks"
-import * as ActiveStorage from "@rails/activestorage"
-import "channels"
-
-Rails.start()
-Turbolinks.start()
-ActiveStorage.start()
diff --git a/app/jobs/application_job.rb b/app/jobs/application_job.rb
deleted file mode 100644
index d394c3d..0000000
--- a/app/jobs/application_job.rb
+++ /dev/null
@@ -1,7 +0,0 @@
-class ApplicationJob < ActiveJob::Base
- # Automatically retry jobs that encountered a deadlock
- # retry_on ActiveRecord::Deadlocked
-
- # Most jobs are safe to ignore if the underlying records are no longer available
- # discard_on ActiveJob::DeserializationError
-end
diff --git a/app/mailers/application_mailer.rb b/app/mailers/application_mailer.rb
deleted file mode 100644
index 286b223..0000000
--- a/app/mailers/application_mailer.rb
+++ /dev/null
@@ -1,4 +0,0 @@
-class ApplicationMailer < ActionMailer::Base
- default from: 'from@example.com'
- layout 'mailer'
-end
diff --git a/app/models/application_record.rb b/app/models/application_record.rb
deleted file mode 100644
index 10a4cba..0000000
--- a/app/models/application_record.rb
+++ /dev/null
@@ -1,3 +0,0 @@
-class ApplicationRecord < ActiveRecord::Base
- self.abstract_class = true
-end
diff --git a/app/models/concerns/.keep b/app/models/concerns/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/app/views/layouts/application.html.erb b/app/views/layouts/application.html.erb
deleted file mode 100644
index 39fd3c2..0000000
--- a/app/views/layouts/application.html.erb
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-
- TravisBackup
-
- <%= csrf_meta_tags %>
- <%= csp_meta_tag %>
-
- <%= stylesheet_link_tag 'application', media: 'all', 'data-turbolinks-track': 'reload' %>
- <%= javascript_pack_tag 'application', 'data-turbolinks-track': 'reload' %>
-
-
-
- <%= yield %>
-
-
diff --git a/app/views/layouts/mailer.html.erb b/app/views/layouts/mailer.html.erb
deleted file mode 100644
index cbd34d2..0000000
--- a/app/views/layouts/mailer.html.erb
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-
-
-
-
-
-
- <%= yield %>
-
-
diff --git a/app/views/layouts/mailer.text.erb b/app/views/layouts/mailer.text.erb
deleted file mode 100644
index 37f0bdd..0000000
--- a/app/views/layouts/mailer.text.erb
+++ /dev/null
@@ -1 +0,0 @@
-<%= yield %>
diff --git a/bin/run_backup b/bin/travis_backup
similarity index 67%
rename from bin/run_backup
rename to bin/travis_backup
index 344f0f5..35d6d55 100755
--- a/bin/run_backup
+++ b/bin/travis_backup
@@ -2,6 +2,6 @@
$: << 'lib'
-require 'backup'
+require 'travis-backup'
Backup.new.run
diff --git a/config/database.yml b/config/database.yml
index d9a9f06..1fd47de 100644
--- a/config/database.yml
+++ b/config/database.yml
@@ -18,3 +18,6 @@ test:
<<: *default
url: 'postgresql://localhost/travis_test'
eager_load: false
+ destination:
+ url: 'postgresql://localhost/travis_test_destination'
+ eager_load: false
diff --git a/config/environments/production.rb b/config/environments/production.rb
index be36373..a0a0af1 100644
--- a/config/environments/production.rb
+++ b/config/environments/production.rb
@@ -99,11 +99,11 @@
# Inserts middleware to perform automatic connection switching.
# The `database_selector` hash is used to pass options to the DatabaseSelector
- # middleware. The `delay` is used to determine how long to wait after a write
+ # middleware. The `threshold` is used to determine how long to wait after a write
# to send a subsequent read to the primary.
#
# The `database_resolver` class is used by the middleware to determine which
- # database is appropriate to use based on the time delay.
+ # database is appropriate to use based on the time threshold.
#
# The `database_resolver_context` class is used by the middleware to set
# timestamps for the last write to the primary. The resolver uses the context
@@ -114,7 +114,7 @@
# DatabaseSelector middleware is designed as such you can define your own
# strategy for connection switching and pass that into the middleware through
# these configuration options.
- # config.active_record.database_selector = { delay: 2.seconds }
+ # config.active_record.database_selector = { threshold: 2.seconds }
# config.active_record.database_resolver = ActiveRecord::Middleware::DatabaseSelector::Resolver
# config.active_record.database_resolver_context = ActiveRecord::Middleware::DatabaseSelector::Resolver::Session
end
diff --git a/config/settings.yml b/config/settings.yml
index 0a00e4c..61e3a29 100644
--- a/config/settings.yml
+++ b/config/settings.yml
@@ -1,18 +1,7 @@
backup:
- # builds limit in file
+ # when false, removes data without saving it to file
+ if_backup: true
+ # builds limit for one backup file
limit: 1000
- # delay in months
- delay: 6
- # how long (in days) expoorted files should be kept in storage
- housekeeping_period: 150
- # logs URL (the same as via UI)
- logs_url: https://api.travis-ci.org/v3/job
-
-# GCE Settings
-gce:
- project: travis-ci-prod-services-1
- credentials: config/secrets/travis-ci-prod-services-travis-backup.json
- bucket: travis-backup-staging
-
-redis:
- url: redis://127.0.0.1:6379
\ No newline at end of file
+ # path of the folder in which backup files will be placed
+ files_location: './dump'
\ No newline at end of file
diff --git a/db/schema.sql b/db/schema.sql
new file mode 100644
index 0000000..aa9ca08
--- /dev/null
+++ b/db/schema.sql
@@ -0,0 +1,3502 @@
+DROP SCHEMA IF EXISTS public CASCADE;
+DROP SCHEMA IF EXISTS sqitch CASCADE;
+CREATE SCHEMA public;
+
+SET statement_timeout = 0;
+SET lock_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = on;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+
+SET statement_timeout = 0;
+SET lock_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = on;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+
+--
+-- Name: sqitch; Type: SCHEMA; Schema: -; Owner: postgres
+--
+
+CREATE SCHEMA sqitch;
+
+
+ALTER SCHEMA sqitch OWNER TO postgres;
+
+--
+-- Name: SCHEMA sqitch; Type: COMMENT; Schema: -; Owner: postgres
+--
+
+COMMENT ON SCHEMA sqitch IS 'Sqitch database deployment metadata v1.1.';
+
+
+--
+-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner:
+--
+
+CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
+
+
+--
+-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner:
+--
+
+COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
+
+
+--
+-- Name: pg_trgm; Type: EXTENSION; Schema: -; Owner:
+--
+
+CREATE EXTENSION IF NOT EXISTS pg_trgm WITH SCHEMA public;
+
+
+--
+-- Name: EXTENSION pg_trgm; Type: COMMENT; Schema: -; Owner:
+--
+
+COMMENT ON EXTENSION pg_trgm IS 'text similarity measurement and index searching based on trigrams';
+
+
+--
+-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner:
+--
+
+CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public;
+
+
+--
+-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner:
+--
+
+COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions';
+
+
+SET search_path = public, pg_catalog;
+
+--
+-- Name: source_type; Type: TYPE; Schema: public; Owner: postgres
+--
+
+CREATE TYPE source_type AS ENUM (
+ 'manual',
+ 'stripe',
+ 'github',
+ 'unknown'
+);
+
+
+ALTER TYPE public.source_type OWNER TO postgres;
+
+--
+-- Name: set_updated_at(); Type: FUNCTION; Schema: public; Owner: postgres
+--
+
+CREATE FUNCTION set_updated_at() RETURNS trigger
+ LANGUAGE plpgsql
+ AS $$
+ BEGIN
+ IF TG_OP = 'INSERT' OR
+ (TG_OP = 'UPDATE' AND NEW.* IS DISTINCT FROM OLD.*) THEN
+ NEW.updated_at := statement_timestamp();
+ END IF;
+ RETURN NEW;
+ END;
+ $$;
+
+
+ALTER FUNCTION public.set_updated_at() OWNER TO postgres;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: abuses; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE abuses (
+ id integer NOT NULL,
+ owner_id integer,
+ owner_type character varying,
+ request_id integer,
+ level integer NOT NULL,
+ reason character varying NOT NULL,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.abuses OWNER TO postgres;
+
+--
+-- Name: abuses_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE abuses_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.abuses_id_seq OWNER TO postgres;
+
+--
+-- Name: abuses_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE abuses_id_seq OWNED BY abuses.id;
+
+
+--
+-- Name: annotation_providers; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE annotation_providers (
+ id integer NOT NULL,
+ name character varying,
+ api_username character varying,
+ api_key character varying,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.annotation_providers OWNER TO postgres;
+
+--
+-- Name: annotation_providers_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE annotation_providers_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.annotation_providers_id_seq OWNER TO postgres;
+
+--
+-- Name: annotation_providers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE annotation_providers_id_seq OWNED BY annotation_providers.id;
+
+
+--
+-- Name: annotations; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE annotations (
+ id integer NOT NULL,
+ job_id integer NOT NULL,
+ url character varying,
+ description text NOT NULL,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ annotation_provider_id integer NOT NULL,
+ status character varying
+);
+
+
+ALTER TABLE public.annotations OWNER TO postgres;
+
+--
+-- Name: annotations_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE annotations_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.annotations_id_seq OWNER TO postgres;
+
+--
+-- Name: annotations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE annotations_id_seq OWNED BY annotations.id;
+
+
+--
+-- Name: beta_features; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE beta_features (
+ id integer NOT NULL,
+ name character varying,
+ description text,
+ feedback_url character varying,
+ staff_only boolean,
+ default_enabled boolean,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone
+);
+
+
+ALTER TABLE public.beta_features OWNER TO postgres;
+
+--
+-- Name: beta_features_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE beta_features_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.beta_features_id_seq OWNER TO postgres;
+
+--
+-- Name: beta_features_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE beta_features_id_seq OWNED BY beta_features.id;
+
+
+--
+-- Name: branches; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE branches (
+ id integer NOT NULL,
+ repository_id integer NOT NULL,
+ last_build_id integer,
+ name character varying NOT NULL,
+ exists_on_github boolean DEFAULT true NOT NULL,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.branches OWNER TO postgres;
+
+--
+-- Name: branches_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE branches_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.branches_id_seq OWNER TO postgres;
+
+--
+-- Name: branches_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE branches_id_seq OWNED BY branches.id;
+
+
+--
+-- Name: broadcasts; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE broadcasts (
+ id integer NOT NULL,
+ recipient_id integer,
+ recipient_type character varying,
+ kind character varying,
+ message character varying,
+ expired boolean,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ category character varying
+);
+
+
+ALTER TABLE public.broadcasts OWNER TO postgres;
+
+--
+-- Name: broadcasts_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE broadcasts_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.broadcasts_id_seq OWNER TO postgres;
+
+--
+-- Name: broadcasts_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE broadcasts_id_seq OWNED BY broadcasts.id;
+
+
+--
+-- Name: shared_builds_tasks_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE shared_builds_tasks_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.shared_builds_tasks_seq OWNER TO postgres;
+
+--
+-- Name: builds; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE builds (
+ id bigint DEFAULT nextval('shared_builds_tasks_seq'::regclass) NOT NULL,
+ repository_id integer,
+ number character varying,
+ started_at timestamp without time zone,
+ finished_at timestamp without time zone,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ config text,
+ commit_id integer,
+ request_id integer,
+ state character varying,
+ duration integer,
+ owner_id integer,
+ owner_type character varying,
+ event_type character varying,
+ previous_state character varying,
+ pull_request_title text,
+ pull_request_number integer,
+ branch character varying,
+ canceled_at timestamp without time zone,
+ cached_matrix_ids integer[],
+ received_at timestamp without time zone,
+ private boolean,
+ pull_request_id integer,
+ branch_id integer,
+ tag_id integer,
+ sender_id integer,
+ sender_type character varying
+);
+
+
+ALTER TABLE public.builds OWNER TO postgres;
+
+--
+-- Name: builds_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE builds_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.builds_id_seq OWNER TO postgres;
+
+--
+-- Name: builds_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE builds_id_seq OWNED BY builds.id;
+
+
+--
+-- Name: commits; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE commits (
+ id integer NOT NULL,
+ repository_id integer,
+ commit character varying,
+ ref character varying,
+ branch character varying,
+ message text,
+ compare_url character varying,
+ committed_at timestamp without time zone,
+ committer_name character varying,
+ committer_email character varying,
+ author_name character varying,
+ author_email character varying,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ branch_id integer,
+ tag_id integer
+);
+
+
+ALTER TABLE public.commits OWNER TO postgres;
+
+--
+-- Name: commits_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE commits_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.commits_id_seq OWNER TO postgres;
+
+--
+-- Name: commits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE commits_id_seq OWNED BY commits.id;
+
+
+--
+-- Name: coupons; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE coupons (
+ id integer NOT NULL,
+ percent_off integer,
+ coupon_id character varying,
+ redeem_by timestamp without time zone,
+ amount_off integer,
+ duration character varying,
+ duration_in_months integer,
+ max_redemptions integer,
+ redemptions integer
+);
+
+
+ALTER TABLE public.coupons OWNER TO postgres;
+
+--
+-- Name: coupons_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE coupons_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.coupons_id_seq OWNER TO postgres;
+
+--
+-- Name: coupons_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE coupons_id_seq OWNED BY coupons.id;
+
+
+--
+-- Name: crons; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE crons (
+ id integer NOT NULL,
+ branch_id integer,
+ "interval" character varying NOT NULL,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ next_run timestamp without time zone,
+ last_run timestamp without time zone,
+ dont_run_if_recent_build_exists boolean DEFAULT false
+);
+
+
+ALTER TABLE public.crons OWNER TO postgres;
+
+--
+-- Name: crons_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE crons_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.crons_id_seq OWNER TO postgres;
+
+--
+-- Name: crons_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE crons_id_seq OWNED BY crons.id;
+
+
+--
+-- Name: emails; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE emails (
+ id integer NOT NULL,
+ user_id integer,
+ email character varying,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.emails OWNER TO postgres;
+
+--
+-- Name: emails_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE emails_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.emails_id_seq OWNER TO postgres;
+
+--
+-- Name: emails_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE emails_id_seq OWNED BY emails.id;
+
+
+--
+-- Name: invoices; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE invoices (
+ id integer NOT NULL,
+ object text,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone,
+ subscription_id integer,
+ invoice_id character varying,
+ stripe_id character varying,
+ cc_last_digits character varying
+);
+
+
+ALTER TABLE public.invoices OWNER TO postgres;
+
+--
+-- Name: invoices_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE invoices_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.invoices_id_seq OWNER TO postgres;
+
+--
+-- Name: invoices_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE invoices_id_seq OWNED BY invoices.id;
+
+
+--
+-- Name: jobs; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE jobs (
+ id bigint DEFAULT nextval('shared_builds_tasks_seq'::regclass) NOT NULL,
+ repository_id integer,
+ commit_id integer,
+ source_id integer,
+ source_type character varying,
+ queue character varying,
+ type character varying,
+ state character varying,
+ number character varying,
+ config text,
+ worker character varying,
+ started_at timestamp without time zone,
+ finished_at timestamp without time zone,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ tags text,
+ allow_failure boolean DEFAULT false,
+ owner_id integer,
+ owner_type character varying,
+ result integer,
+ queued_at timestamp without time zone,
+ canceled_at timestamp without time zone,
+ received_at timestamp without time zone,
+ debug_options text,
+ private boolean,
+ stage_number character varying,
+ stage_id integer
+);
+
+
+ALTER TABLE public.jobs OWNER TO postgres;
+
+--
+-- Name: jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE jobs_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.jobs_id_seq OWNER TO postgres;
+
+--
+-- Name: jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE jobs_id_seq OWNED BY jobs.id;
+
+
+--
+-- Name: log_parts; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE log_parts (
+ id bigint NOT NULL,
+ log_id integer NOT NULL,
+ content text,
+ number integer,
+ final boolean,
+ created_at timestamp without time zone DEFAULT '2000-01-01 00:00:00'::timestamp without time zone NOT NULL
+)
+WITH (autovacuum_vacuum_threshold='0', autovacuum_vacuum_scale_factor='0.001');
+
+
+ALTER TABLE public.log_parts OWNER TO postgres;
+
+--
+-- Name: log_parts_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE log_parts_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.log_parts_id_seq OWNER TO postgres;
+
+--
+-- Name: log_parts_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE log_parts_id_seq OWNED BY log_parts.id;
+
+
+--
+-- Name: logs; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE logs (
+ id integer NOT NULL,
+ job_id integer,
+ content text,
+ removed_by integer,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone,
+ aggregated_at timestamp without time zone,
+ archived_at timestamp without time zone,
+ purged_at timestamp without time zone,
+ removed_at timestamp without time zone,
+ archiving boolean,
+ archive_verified boolean
+);
+
+
+ALTER TABLE public.logs OWNER TO postgres;
+
+--
+-- Name: logs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE logs_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.logs_id_seq OWNER TO postgres;
+
+--
+-- Name: logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE logs_id_seq OWNED BY logs.id;
+
+
+--
+-- Name: memberships; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE memberships (
+ id integer NOT NULL,
+ organization_id integer,
+ user_id integer,
+ role character varying
+);
+
+
+ALTER TABLE public.memberships OWNER TO postgres;
+
+--
+-- Name: memberships_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE memberships_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.memberships_id_seq OWNER TO postgres;
+
+--
+-- Name: memberships_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE memberships_id_seq OWNED BY memberships.id;
+
+
+--
+-- Name: messages; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE messages (
+ id integer NOT NULL,
+ subject_id integer,
+ subject_type character varying,
+ level character varying,
+ key character varying,
+ code character varying,
+ args json,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.messages OWNER TO postgres;
+
+--
+-- Name: messages_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE messages_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.messages_id_seq OWNER TO postgres;
+
+--
+-- Name: messages_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE messages_id_seq OWNED BY messages.id;
+
+
+--
+-- Name: organizations; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE organizations (
+ id integer NOT NULL,
+ name character varying,
+ login character varying,
+ github_id integer,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ avatar_url character varying,
+ location character varying,
+ email character varying,
+ company character varying,
+ homepage character varying,
+ billing_admin_only boolean
+);
+
+
+ALTER TABLE public.organizations OWNER TO postgres;
+
+--
+-- Name: organizations_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE organizations_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.organizations_id_seq OWNER TO postgres;
+
+--
+-- Name: organizations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE organizations_id_seq OWNED BY organizations.id;
+
+
+--
+-- Name: owner_groups; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE owner_groups (
+ id integer NOT NULL,
+ uuid character varying,
+ owner_id integer,
+ owner_type character varying,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone
+);
+
+
+ALTER TABLE public.owner_groups OWNER TO postgres;
+
+--
+-- Name: owner_groups_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE owner_groups_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.owner_groups_id_seq OWNER TO postgres;
+
+--
+-- Name: owner_groups_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE owner_groups_id_seq OWNED BY owner_groups.id;
+
+
+--
+-- Name: permissions; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE permissions (
+ id integer NOT NULL,
+ user_id integer,
+ repository_id integer,
+ admin boolean DEFAULT false,
+ push boolean DEFAULT false,
+ pull boolean DEFAULT false
+);
+
+
+ALTER TABLE public.permissions OWNER TO postgres;
+
+--
+-- Name: permissions_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE permissions_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.permissions_id_seq OWNER TO postgres;
+
+--
+-- Name: permissions_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE permissions_id_seq OWNED BY permissions.id;
+
+
+--
+-- Name: pull_requests; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE pull_requests (
+ id integer NOT NULL,
+ repository_id integer,
+ number integer,
+ title character varying,
+ state character varying,
+ head_repo_github_id integer,
+ head_repo_slug character varying,
+ head_ref character varying,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone
+);
+
+
+ALTER TABLE public.pull_requests OWNER TO postgres;
+
+--
+-- Name: pull_requests_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE pull_requests_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.pull_requests_id_seq OWNER TO postgres;
+
+--
+-- Name: pull_requests_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE pull_requests_id_seq OWNED BY pull_requests.id;
+
+
+--
+-- Name: queueable_jobs; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE queueable_jobs (
+ id integer NOT NULL,
+ job_id integer
+);
+
+
+ALTER TABLE public.queueable_jobs OWNER TO postgres;
+
+--
+-- Name: queueable_jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE queueable_jobs_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.queueable_jobs_id_seq OWNER TO postgres;
+
+--
+-- Name: queueable_jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE queueable_jobs_id_seq OWNED BY queueable_jobs.id;
+
+
+--
+-- Name: repositories; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE repositories (
+ id integer NOT NULL,
+ name character varying,
+ url character varying,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ last_build_id integer,
+ last_build_number character varying,
+ last_build_started_at timestamp without time zone,
+ last_build_finished_at timestamp without time zone,
+ owner_name character varying,
+ owner_email text,
+ active boolean,
+ description text,
+ last_build_duration integer,
+ owner_id integer,
+ owner_type character varying,
+ private boolean DEFAULT false,
+ last_build_state character varying,
+ github_id integer,
+ default_branch character varying,
+ github_language character varying,
+ settings json,
+ next_build_number integer,
+ invalidated_at timestamp without time zone,
+ current_build_id bigint
+);
+
+
+ALTER TABLE public.repositories OWNER TO postgres;
+
+--
+-- Name: repositories_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE repositories_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.repositories_id_seq OWNER TO postgres;
+
+--
+-- Name: repositories_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE repositories_id_seq OWNED BY repositories.id;
+
+
+--
+-- Name: requests; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE requests (
+ id integer NOT NULL,
+ repository_id integer,
+ commit_id integer,
+ state character varying,
+ source character varying,
+ payload text,
+ token character varying,
+ config text,
+ started_at timestamp without time zone,
+ finished_at timestamp without time zone,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ event_type character varying,
+ comments_url character varying,
+ base_commit character varying,
+ head_commit character varying,
+ owner_id integer,
+ owner_type character varying,
+ result character varying,
+ message character varying,
+ private boolean,
+ pull_request_id integer,
+ branch_id integer,
+ tag_id integer,
+ sender_id integer,
+ sender_type character varying
+);
+
+
+ALTER TABLE public.requests OWNER TO postgres;
+
+--
+-- Name: requests_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE requests_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.requests_id_seq OWNER TO postgres;
+
+--
+-- Name: requests_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE requests_id_seq OWNED BY requests.id;
+
+
+--
+-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE schema_migrations (
+ version character varying NOT NULL
+);
+
+
+ALTER TABLE public.schema_migrations OWNER TO postgres;
+
+--
+-- Name: ssl_keys; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE ssl_keys (
+ id integer NOT NULL,
+ repository_id integer,
+ public_key text,
+ private_key text,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.ssl_keys OWNER TO postgres;
+
+--
+-- Name: ssl_keys_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE ssl_keys_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.ssl_keys_id_seq OWNER TO postgres;
+
+--
+-- Name: ssl_keys_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE ssl_keys_id_seq OWNED BY ssl_keys.id;
+
+
+--
+-- Name: stages; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE stages (
+ id integer NOT NULL,
+ build_id integer,
+ number integer,
+ name character varying,
+ state character varying,
+ started_at timestamp without time zone,
+ finished_at timestamp without time zone
+);
+
+
+ALTER TABLE public.stages OWNER TO postgres;
+
+--
+-- Name: stages_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE stages_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.stages_id_seq OWNER TO postgres;
+
+--
+-- Name: stages_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE stages_id_seq OWNED BY stages.id;
+
+
+--
+-- Name: stars; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE stars (
+ id integer NOT NULL,
+ repository_id integer,
+ user_id integer,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.stars OWNER TO postgres;
+
+--
+-- Name: stars_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE stars_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.stars_id_seq OWNER TO postgres;
+
+--
+-- Name: stars_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE stars_id_seq OWNED BY stars.id;
+
+
+--
+-- Name: stripe_events; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE stripe_events (
+ id integer NOT NULL,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone,
+ event_object text,
+ event_type character varying,
+ date timestamp without time zone,
+ event_id character varying
+);
+
+
+ALTER TABLE public.stripe_events OWNER TO postgres;
+
+--
+-- Name: stripe_events_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE stripe_events_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.stripe_events_id_seq OWNER TO postgres;
+
+--
+-- Name: stripe_events_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE stripe_events_id_seq OWNED BY stripe_events.id;
+
+
+--
+-- Name: subscriptions; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE subscriptions (
+ id integer NOT NULL,
+ cc_token character varying,
+ valid_to timestamp without time zone,
+ owner_id integer,
+ owner_type character varying,
+ first_name character varying,
+ last_name character varying,
+ company character varying,
+ zip_code character varying,
+ address character varying,
+ address2 character varying,
+ city character varying,
+ state character varying,
+ country character varying,
+ vat_id character varying,
+ customer_id character varying,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone,
+ cc_owner character varying,
+ cc_last_digits character varying,
+ cc_expiration_date character varying,
+ billing_email character varying,
+ selected_plan character varying,
+ coupon character varying,
+ contact_id integer,
+ canceled_at timestamp without time zone,
+ canceled_by_id integer,
+ status character varying,
+ source source_type DEFAULT 'unknown'::source_type NOT NULL,
+ concurrency integer
+);
+
+
+ALTER TABLE public.subscriptions OWNER TO postgres;
+
+--
+-- Name: subscriptions_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE subscriptions_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.subscriptions_id_seq OWNER TO postgres;
+
+--
+-- Name: subscriptions_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE subscriptions_id_seq OWNED BY subscriptions.id;
+
+
+--
+-- Name: tags; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE tags (
+ id integer NOT NULL,
+ repository_id integer,
+ name character varying,
+ last_build_id integer,
+ exists_on_github boolean,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone
+);
+
+
+ALTER TABLE public.tags OWNER TO postgres;
+
+--
+-- Name: tags_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE tags_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.tags_id_seq OWNER TO postgres;
+
+--
+-- Name: tags_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE tags_id_seq OWNED BY tags.id;
+
+
+--
+-- Name: tokens; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE tokens (
+ id integer NOT NULL,
+ user_id integer,
+ token character varying,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.tokens OWNER TO postgres;
+
+--
+-- Name: tokens_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE tokens_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.tokens_id_seq OWNER TO postgres;
+
+--
+-- Name: tokens_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE tokens_id_seq OWNED BY tokens.id;
+
+
+--
+-- Name: trial_allowances; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE trial_allowances (
+ id integer NOT NULL,
+ trial_id integer,
+ creator_id integer,
+ creator_type character varying,
+ builds_allowed integer,
+ builds_remaining integer,
+ created_at timestamp without time zone,
+ updated_at timestamp without time zone
+);
+
+
+ALTER TABLE public.trial_allowances OWNER TO postgres;
+
+--
+-- Name: trial_allowances_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE trial_allowances_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.trial_allowances_id_seq OWNER TO postgres;
+
+--
+-- Name: trial_allowances_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE trial_allowances_id_seq OWNED BY trial_allowances.id;
+
+
+--
+-- Name: trials; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE trials (
+ id integer NOT NULL,
+ owner_id integer,
+ owner_type character varying,
+ chartmogul_customer_uuids text[] DEFAULT '{}'::text[],
+ status character varying DEFAULT 'new'::character varying,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.trials OWNER TO postgres;
+
+--
+-- Name: trials_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE trials_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.trials_id_seq OWNER TO postgres;
+
+--
+-- Name: trials_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE trials_id_seq OWNED BY trials.id;
+
+
+--
+-- Name: urls; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE urls (
+ id integer NOT NULL,
+ url character varying,
+ code character varying,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+ALTER TABLE public.urls OWNER TO postgres;
+
+--
+-- Name: urls_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE urls_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.urls_id_seq OWNER TO postgres;
+
+--
+-- Name: urls_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE urls_id_seq OWNED BY urls.id;
+
+
+--
+-- Name: user_beta_features; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE user_beta_features (
+ id integer NOT NULL,
+ user_id integer,
+ beta_feature_id integer,
+ enabled boolean,
+ last_deactivated_at timestamp without time zone,
+ last_activated_at timestamp without time zone
+);
+
+
+ALTER TABLE public.user_beta_features OWNER TO postgres;
+
+--
+-- Name: user_beta_features_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE user_beta_features_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.user_beta_features_id_seq OWNER TO postgres;
+
+--
+-- Name: user_beta_features_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE user_beta_features_id_seq OWNED BY user_beta_features.id;
+
+
+--
+-- Name: users; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE users (
+ id integer NOT NULL,
+ name character varying,
+ login character varying,
+ email character varying,
+ created_at timestamp without time zone NOT NULL,
+ updated_at timestamp without time zone NOT NULL,
+ is_admin boolean DEFAULT false,
+ github_id integer,
+ github_oauth_token character varying,
+ gravatar_id character varying,
+ locale character varying,
+ is_syncing boolean,
+ synced_at timestamp without time zone,
+ github_scopes text,
+ education boolean,
+ first_logged_in_at timestamp without time zone,
+ avatar_url character varying,
+ suspended boolean DEFAULT false,
+ suspended_at timestamp without time zone
+);
+
+
+ALTER TABLE public.users OWNER TO postgres;
+
+--
+-- Name: users_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
+--
+
+CREATE SEQUENCE users_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+ALTER TABLE public.users_id_seq OWNER TO postgres;
+
+--
+-- Name: users_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
+--
+
+ALTER SEQUENCE users_id_seq OWNED BY users.id;
+
+
+SET search_path = sqitch, pg_catalog;
+
+--
+-- Name: changes; Type: TABLE; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE changes (
+ change_id text NOT NULL,
+ script_hash text,
+ change text NOT NULL,
+ project text NOT NULL,
+ note text DEFAULT ''::text NOT NULL,
+ committed_at timestamp with time zone DEFAULT clock_timestamp() NOT NULL,
+ committer_name text NOT NULL,
+ committer_email text NOT NULL,
+ planned_at timestamp with time zone NOT NULL,
+ planner_name text NOT NULL,
+ planner_email text NOT NULL
+);
+
+
+ALTER TABLE sqitch.changes OWNER TO postgres;
+
+--
+-- Name: TABLE changes; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON TABLE changes IS 'Tracks the changes currently deployed to the database.';
+
+
+--
+-- Name: COLUMN changes.change_id; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.change_id IS 'Change primary key.';
+
+
+--
+-- Name: COLUMN changes.script_hash; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.script_hash IS 'Deploy script SHA-1 hash.';
+
+
+--
+-- Name: COLUMN changes.change; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.change IS 'Name of a deployed change.';
+
+
+--
+-- Name: COLUMN changes.project; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.project IS 'Name of the Sqitch project to which the change belongs.';
+
+
+--
+-- Name: COLUMN changes.note; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.note IS 'Description of the change.';
+
+
+--
+-- Name: COLUMN changes.committed_at; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.committed_at IS 'Date the change was deployed.';
+
+
+--
+-- Name: COLUMN changes.committer_name; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.committer_name IS 'Name of the user who deployed the change.';
+
+
+--
+-- Name: COLUMN changes.committer_email; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.committer_email IS 'Email address of the user who deployed the change.';
+
+
+--
+-- Name: COLUMN changes.planned_at; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.planned_at IS 'Date the change was added to the plan.';
+
+
+--
+-- Name: COLUMN changes.planner_name; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.planner_name IS 'Name of the user who planed the change.';
+
+
+--
+-- Name: COLUMN changes.planner_email; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN changes.planner_email IS 'Email address of the user who planned the change.';
+
+
+--
+-- Name: dependencies; Type: TABLE; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE dependencies (
+ change_id text NOT NULL,
+ type text NOT NULL,
+ dependency text NOT NULL,
+ dependency_id text,
+ CONSTRAINT dependencies_check CHECK ((((type = 'require'::text) AND (dependency_id IS NOT NULL)) OR ((type = 'conflict'::text) AND (dependency_id IS NULL))))
+);
+
+
+ALTER TABLE sqitch.dependencies OWNER TO postgres;
+
+--
+-- Name: TABLE dependencies; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON TABLE dependencies IS 'Tracks the currently satisfied dependencies.';
+
+
+--
+-- Name: COLUMN dependencies.change_id; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN dependencies.change_id IS 'ID of the depending change.';
+
+
+--
+-- Name: COLUMN dependencies.type; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN dependencies.type IS 'Type of dependency.';
+
+
+--
+-- Name: COLUMN dependencies.dependency; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN dependencies.dependency IS 'Dependency name.';
+
+
+--
+-- Name: COLUMN dependencies.dependency_id; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN dependencies.dependency_id IS 'Change ID the dependency resolves to.';
+
+
+--
+-- Name: events; Type: TABLE; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE events (
+ event text NOT NULL,
+ change_id text NOT NULL,
+ change text NOT NULL,
+ project text NOT NULL,
+ note text DEFAULT ''::text NOT NULL,
+ requires text[] DEFAULT '{}'::text[] NOT NULL,
+ conflicts text[] DEFAULT '{}'::text[] NOT NULL,
+ tags text[] DEFAULT '{}'::text[] NOT NULL,
+ committed_at timestamp with time zone DEFAULT clock_timestamp() NOT NULL,
+ committer_name text NOT NULL,
+ committer_email text NOT NULL,
+ planned_at timestamp with time zone NOT NULL,
+ planner_name text NOT NULL,
+ planner_email text NOT NULL,
+ CONSTRAINT events_event_check CHECK ((event = ANY (ARRAY['deploy'::text, 'revert'::text, 'fail'::text, 'merge'::text])))
+);
+
+
+ALTER TABLE sqitch.events OWNER TO postgres;
+
+--
+-- Name: TABLE events; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON TABLE events IS 'Contains full history of all deployment events.';
+
+
+--
+-- Name: COLUMN events.event; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.event IS 'Type of event.';
+
+
+--
+-- Name: COLUMN events.change_id; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.change_id IS 'Change ID.';
+
+
+--
+-- Name: COLUMN events.change; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.change IS 'Change name.';
+
+
+--
+-- Name: COLUMN events.project; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.project IS 'Name of the Sqitch project to which the change belongs.';
+
+
+--
+-- Name: COLUMN events.note; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.note IS 'Description of the change.';
+
+
+--
+-- Name: COLUMN events.requires; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.requires IS 'Array of the names of required changes.';
+
+
+--
+-- Name: COLUMN events.conflicts; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.conflicts IS 'Array of the names of conflicting changes.';
+
+
+--
+-- Name: COLUMN events.tags; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.tags IS 'Tags associated with the change.';
+
+
+--
+-- Name: COLUMN events.committed_at; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.committed_at IS 'Date the event was committed.';
+
+
+--
+-- Name: COLUMN events.committer_name; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.committer_name IS 'Name of the user who committed the event.';
+
+
+--
+-- Name: COLUMN events.committer_email; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.committer_email IS 'Email address of the user who committed the event.';
+
+
+--
+-- Name: COLUMN events.planned_at; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.planned_at IS 'Date the event was added to the plan.';
+
+
+--
+-- Name: COLUMN events.planner_name; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.planner_name IS 'Name of the user who planed the change.';
+
+
+--
+-- Name: COLUMN events.planner_email; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN events.planner_email IS 'Email address of the user who plan planned the change.';
+
+
+--
+-- Name: projects; Type: TABLE; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE projects (
+ project text NOT NULL,
+ uri text,
+ created_at timestamp with time zone DEFAULT clock_timestamp() NOT NULL,
+ creator_name text NOT NULL,
+ creator_email text NOT NULL
+);
+
+
+ALTER TABLE sqitch.projects OWNER TO postgres;
+
+--
+-- Name: TABLE projects; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON TABLE projects IS 'Sqitch projects deployed to this database.';
+
+
+--
+-- Name: COLUMN projects.project; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN projects.project IS 'Unique Name of a project.';
+
+
+--
+-- Name: COLUMN projects.uri; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN projects.uri IS 'Optional project URI';
+
+
+--
+-- Name: COLUMN projects.created_at; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN projects.created_at IS 'Date the project was added to the database.';
+
+
+--
+-- Name: COLUMN projects.creator_name; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN projects.creator_name IS 'Name of the user who added the project.';
+
+
+--
+-- Name: COLUMN projects.creator_email; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN projects.creator_email IS 'Email address of the user who added the project.';
+
+
+--
+-- Name: releases; Type: TABLE; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE releases (
+ version real NOT NULL,
+ installed_at timestamp with time zone DEFAULT clock_timestamp() NOT NULL,
+ installer_name text NOT NULL,
+ installer_email text NOT NULL
+);
+
+
+ALTER TABLE sqitch.releases OWNER TO postgres;
+
+--
+-- Name: TABLE releases; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON TABLE releases IS 'Sqitch registry releases.';
+
+
+--
+-- Name: COLUMN releases.version; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN releases.version IS 'Version of the Sqitch registry.';
+
+
+--
+-- Name: COLUMN releases.installed_at; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN releases.installed_at IS 'Date the registry release was installed.';
+
+
+--
+-- Name: COLUMN releases.installer_name; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN releases.installer_name IS 'Name of the user who installed the registry release.';
+
+
+--
+-- Name: COLUMN releases.installer_email; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN releases.installer_email IS 'Email address of the user who installed the registry release.';
+
+
+--
+-- Name: tags; Type: TABLE; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+CREATE TABLE tags (
+ tag_id text NOT NULL,
+ tag text NOT NULL,
+ project text NOT NULL,
+ change_id text NOT NULL,
+ note text DEFAULT ''::text NOT NULL,
+ committed_at timestamp with time zone DEFAULT clock_timestamp() NOT NULL,
+ committer_name text NOT NULL,
+ committer_email text NOT NULL,
+ planned_at timestamp with time zone NOT NULL,
+ planner_name text NOT NULL,
+ planner_email text NOT NULL
+);
+
+
+ALTER TABLE sqitch.tags OWNER TO postgres;
+
+--
+-- Name: TABLE tags; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON TABLE tags IS 'Tracks the tags currently applied to the database.';
+
+
+--
+-- Name: COLUMN tags.tag_id; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.tag_id IS 'Tag primary key.';
+
+
+--
+-- Name: COLUMN tags.tag; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.tag IS 'Project-unique tag name.';
+
+
+--
+-- Name: COLUMN tags.project; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.project IS 'Name of the Sqitch project to which the tag belongs.';
+
+
+--
+-- Name: COLUMN tags.change_id; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.change_id IS 'ID of last change deployed before the tag was applied.';
+
+
+--
+-- Name: COLUMN tags.note; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.note IS 'Description of the tag.';
+
+
+--
+-- Name: COLUMN tags.committed_at; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.committed_at IS 'Date the tag was applied to the database.';
+
+
+--
+-- Name: COLUMN tags.committer_name; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.committer_name IS 'Name of the user who applied the tag.';
+
+
+--
+-- Name: COLUMN tags.committer_email; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.committer_email IS 'Email address of the user who applied the tag.';
+
+
+--
+-- Name: COLUMN tags.planned_at; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.planned_at IS 'Date the tag was added to the plan.';
+
+
+--
+-- Name: COLUMN tags.planner_name; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.planner_name IS 'Name of the user who planed the tag.';
+
+
+--
+-- Name: COLUMN tags.planner_email; Type: COMMENT; Schema: sqitch; Owner: postgres
+--
+
+COMMENT ON COLUMN tags.planner_email IS 'Email address of the user who planned the tag.';
+
+
+SET search_path = public, pg_catalog;
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY abuses ALTER COLUMN id SET DEFAULT nextval('abuses_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY annotation_providers ALTER COLUMN id SET DEFAULT nextval('annotation_providers_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY annotations ALTER COLUMN id SET DEFAULT nextval('annotations_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY beta_features ALTER COLUMN id SET DEFAULT nextval('beta_features_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY branches ALTER COLUMN id SET DEFAULT nextval('branches_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY broadcasts ALTER COLUMN id SET DEFAULT nextval('broadcasts_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY commits ALTER COLUMN id SET DEFAULT nextval('commits_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY coupons ALTER COLUMN id SET DEFAULT nextval('coupons_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY crons ALTER COLUMN id SET DEFAULT nextval('crons_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY emails ALTER COLUMN id SET DEFAULT nextval('emails_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY invoices ALTER COLUMN id SET DEFAULT nextval('invoices_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY log_parts ALTER COLUMN id SET DEFAULT nextval('log_parts_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY logs ALTER COLUMN id SET DEFAULT nextval('logs_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY memberships ALTER COLUMN id SET DEFAULT nextval('memberships_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY messages ALTER COLUMN id SET DEFAULT nextval('messages_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY organizations ALTER COLUMN id SET DEFAULT nextval('organizations_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY owner_groups ALTER COLUMN id SET DEFAULT nextval('owner_groups_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY permissions ALTER COLUMN id SET DEFAULT nextval('permissions_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY pull_requests ALTER COLUMN id SET DEFAULT nextval('pull_requests_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY queueable_jobs ALTER COLUMN id SET DEFAULT nextval('queueable_jobs_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY repositories ALTER COLUMN id SET DEFAULT nextval('repositories_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY requests ALTER COLUMN id SET DEFAULT nextval('requests_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY ssl_keys ALTER COLUMN id SET DEFAULT nextval('ssl_keys_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY stages ALTER COLUMN id SET DEFAULT nextval('stages_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY stars ALTER COLUMN id SET DEFAULT nextval('stars_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY stripe_events ALTER COLUMN id SET DEFAULT nextval('stripe_events_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY subscriptions ALTER COLUMN id SET DEFAULT nextval('subscriptions_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY tags ALTER COLUMN id SET DEFAULT nextval('tags_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY tokens ALTER COLUMN id SET DEFAULT nextval('tokens_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY trial_allowances ALTER COLUMN id SET DEFAULT nextval('trial_allowances_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY trials ALTER COLUMN id SET DEFAULT nextval('trials_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY urls ALTER COLUMN id SET DEFAULT nextval('urls_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY user_beta_features ALTER COLUMN id SET DEFAULT nextval('user_beta_features_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY users ALTER COLUMN id SET DEFAULT nextval('users_id_seq'::regclass);
+
+--
+-- Name: abuses_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('abuses_id_seq', 1, false);
+
+--
+-- Name: annotation_providers_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('annotation_providers_id_seq', 1, false);
+
+--
+-- Name: annotations_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('annotations_id_seq', 1, false);
+
+--
+-- Name: beta_features_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('beta_features_id_seq', 1, false);
+
+--
+-- Name: branches_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('branches_id_seq', 72, true);
+
+--
+-- Name: broadcasts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('broadcasts_id_seq', 1, false);
+
+--
+-- Name: builds_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('builds_id_seq', 1, false);
+
+--
+-- Name: commits_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('commits_id_seq', 210, true);
+
+--
+-- Name: coupons_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('coupons_id_seq', 1, false);
+
+--
+-- Name: crons_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('crons_id_seq', 1, false);
+
+--
+-- Name: emails_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('emails_id_seq', 8, true);
+
+--
+-- Name: invoices_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('invoices_id_seq', 1, false);
+
+--
+-- Name: jobs_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('jobs_id_seq', 1, true);
+
+--
+-- Name: log_parts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('log_parts_id_seq', 7609, true);
+
+--
+-- Name: ssl_keys_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('ssl_keys_id_seq', 30, true);
+
+
+--
+-- Name: stages_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('stages_id_seq', 19, true);
+
+--
+-- Name: stars_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('stars_id_seq', 1, false);
+
+--
+-- Name: stripe_events_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('stripe_events_id_seq', 1, false);
+
+--
+-- Name: subscriptions_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('subscriptions_id_seq', 1, false);
+
+--
+-- Name: tags_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('tags_id_seq', 1, false);
+
+--
+-- Name: tokens_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('tokens_id_seq', 8, true);
+
+--
+-- Name: trial_allowances_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('trial_allowances_id_seq', 1, false);
+
+--
+-- Name: trials_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('trials_id_seq', 1, false);
+
+--
+-- Name: urls_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('urls_id_seq', 1, false);
+
+--
+-- Name: user_beta_features_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('user_beta_features_id_seq', 1, false);
+
+--
+-- Name: users_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
+--
+
+SELECT pg_catalog.setval('users_id_seq', 8, true);
+
+
+SET search_path = sqitch, pg_catalog;
+
+
+SET search_path = public, pg_catalog;
+
+--
+-- Name: abuses_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY abuses
+ ADD CONSTRAINT abuses_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: annotation_providers_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY annotation_providers
+ ADD CONSTRAINT annotation_providers_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: annotations_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY annotations
+ ADD CONSTRAINT annotations_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: beta_features_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY beta_features
+ ADD CONSTRAINT beta_features_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: branches_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY branches
+ ADD CONSTRAINT branches_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: broadcasts_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY broadcasts
+ ADD CONSTRAINT broadcasts_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: builds_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY builds
+ ADD CONSTRAINT builds_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: commits_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY commits
+ ADD CONSTRAINT commits_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: coupons_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY coupons
+ ADD CONSTRAINT coupons_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: crons_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY crons
+ ADD CONSTRAINT crons_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: emails_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY emails
+ ADD CONSTRAINT emails_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: invoices_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY invoices
+ ADD CONSTRAINT invoices_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY jobs
+ ADD CONSTRAINT jobs_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: log_parts_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY log_parts
+ ADD CONSTRAINT log_parts_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: logs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY logs
+ ADD CONSTRAINT logs_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: memberships_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY memberships
+ ADD CONSTRAINT memberships_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: messages_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY messages
+ ADD CONSTRAINT messages_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: organizations_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY organizations
+ ADD CONSTRAINT organizations_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: owner_groups_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY owner_groups
+ ADD CONSTRAINT owner_groups_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: permissions_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY permissions
+ ADD CONSTRAINT permissions_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: pull_requests_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY pull_requests
+ ADD CONSTRAINT pull_requests_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: queueable_jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY queueable_jobs
+ ADD CONSTRAINT queueable_jobs_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: repositories_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY repositories
+ ADD CONSTRAINT repositories_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: requests_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY requests
+ ADD CONSTRAINT requests_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: ssl_keys_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY ssl_keys
+ ADD CONSTRAINT ssl_keys_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: stages_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY stages
+ ADD CONSTRAINT stages_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: stars_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY stars
+ ADD CONSTRAINT stars_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: stripe_events_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY stripe_events
+ ADD CONSTRAINT stripe_events_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: subscriptions_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY subscriptions
+ ADD CONSTRAINT subscriptions_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: tags_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY tags
+ ADD CONSTRAINT tags_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: tokens_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY tokens
+ ADD CONSTRAINT tokens_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: trial_allowances_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY trial_allowances
+ ADD CONSTRAINT trial_allowances_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: trials_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY trials
+ ADD CONSTRAINT trials_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: urls_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY urls
+ ADD CONSTRAINT urls_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: user_beta_features_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY user_beta_features
+ ADD CONSTRAINT user_beta_features_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: users_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY users
+ ADD CONSTRAINT users_pkey PRIMARY KEY (id);
+
+
+SET search_path = sqitch, pg_catalog;
+
+--
+-- Name: changes_pkey; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY changes
+ ADD CONSTRAINT changes_pkey PRIMARY KEY (change_id);
+
+
+--
+-- Name: changes_project_script_hash_key; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY changes
+ ADD CONSTRAINT changes_project_script_hash_key UNIQUE (project, script_hash);
+
+
+--
+-- Name: dependencies_pkey; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY dependencies
+ ADD CONSTRAINT dependencies_pkey PRIMARY KEY (change_id, dependency);
+
+
+--
+-- Name: events_pkey; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY events
+ ADD CONSTRAINT events_pkey PRIMARY KEY (change_id, committed_at);
+
+
+--
+-- Name: projects_pkey; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY projects
+ ADD CONSTRAINT projects_pkey PRIMARY KEY (project);
+
+
+--
+-- Name: projects_uri_key; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY projects
+ ADD CONSTRAINT projects_uri_key UNIQUE (uri);
+
+
+--
+-- Name: releases_pkey; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY releases
+ ADD CONSTRAINT releases_pkey PRIMARY KEY (version);
+
+
+--
+-- Name: tags_pkey; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY tags
+ ADD CONSTRAINT tags_pkey PRIMARY KEY (tag_id);
+
+
+--
+-- Name: tags_project_tag_key; Type: CONSTRAINT; Schema: sqitch; Owner: postgres; Tablespace:
+--
+
+ALTER TABLE ONLY tags
+ ADD CONSTRAINT tags_project_tag_key UNIQUE (project, tag);
+
+
+SET search_path = public, pg_catalog;
+
+--
+-- Name: index_abuses_on_owner; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_abuses_on_owner ON abuses USING btree (owner_id);
+
+
+--
+-- Name: index_abuses_on_owner_id_and_owner_type_and_level; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_abuses_on_owner_id_and_owner_type_and_level ON abuses USING btree (owner_id, owner_type, level);
+
+
+--
+-- Name: index_annotations_on_job_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_annotations_on_job_id ON annotations USING btree (job_id);
+
+
+--
+-- Name: index_branches_on_repository_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_branches_on_repository_id ON branches USING btree (repository_id);
+
+
+--
+-- Name: index_branches_on_repository_id_and_name; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_branches_on_repository_id_and_name ON branches USING btree (repository_id, name);
+
+
+--
+-- Name: index_broadcasts_on_recipient_id_and_recipient_type; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_broadcasts_on_recipient_id_and_recipient_type ON broadcasts USING btree (recipient_id, recipient_type);
+
+
+--
+-- Name: index_builds_on_repository_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_repository_id ON builds USING btree (repository_id);
+
+
+--
+-- Name: index_builds_on_repository_id_and_branch_and_event_type; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_repository_id_and_branch_and_event_type ON builds USING btree (repository_id, branch, event_type) WHERE ((state)::text = ANY ((ARRAY['created'::character varying, 'queued'::character varying, 'received'::character varying])::text[]));
+
+
+--
+-- Name: index_builds_on_repository_id_and_branch_and_event_type_and_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_repository_id_and_branch_and_event_type_and_id ON builds USING btree (repository_id, branch, event_type, id);
+
+
+--
+-- Name: index_builds_on_repository_id_and_branch_and_id_desc; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_repository_id_and_branch_and_id_desc ON builds USING btree (repository_id, branch, id DESC);
+
+
+--
+-- Name: index_builds_on_repository_id_and_number; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_repository_id_and_number ON builds USING btree (repository_id, ((number)::integer));
+
+
+--
+-- Name: index_builds_on_repository_id_and_number_and_event_type; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_repository_id_and_number_and_event_type ON builds USING btree (repository_id, number, event_type);
+
+
+--
+-- Name: index_builds_on_request_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_request_id ON builds USING btree (request_id);
+
+
+--
+-- Name: index_builds_on_sender_type_and_sender_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_sender_type_and_sender_id ON builds USING btree (sender_type, sender_id);
+
+
+--
+-- Name: index_builds_on_state; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_builds_on_state ON builds USING btree (state);
+
+
+--
+-- Name: index_emails_on_email; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_emails_on_email ON emails USING btree (email);
+
+
+--
+-- Name: index_emails_on_user_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_emails_on_user_id ON emails USING btree (user_id);
+
+
+--
+-- Name: index_invoices_on_stripe_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_invoices_on_stripe_id ON invoices USING btree (stripe_id);
+
+
+--
+-- Name: index_jobs_on_created_at; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_jobs_on_created_at ON jobs USING btree (created_at);
+
+
+--
+-- Name: index_jobs_on_owner_id_and_owner_type_and_state; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_jobs_on_owner_id_and_owner_type_and_state ON jobs USING btree (owner_id, owner_type, state);
+
+
+--
+-- Name: index_jobs_on_source_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_jobs_on_source_id ON jobs USING btree (source_id);
+
+
+--
+-- Name: index_jobs_on_stage_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_jobs_on_stage_id ON jobs USING btree (stage_id);
+
+
+--
+-- Name: index_jobs_on_state; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_jobs_on_state ON jobs USING btree (state);
+
+
+--
+-- Name: index_jobs_on_type_and_source_id_and_source_type; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_jobs_on_type_and_source_id_and_source_type ON jobs USING btree (type, source_id, source_type);
+
+
+--
+-- Name: index_jobs_on_updated_at; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_jobs_on_updated_at ON jobs USING btree (updated_at);
+
+
+--
+-- Name: index_log_parts_on_created_at; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_log_parts_on_created_at ON log_parts USING btree (created_at);
+
+
+--
+-- Name: index_log_parts_on_log_id_and_number; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_log_parts_on_log_id_and_number ON log_parts USING btree (log_id, number);
+
+
+--
+-- Name: index_logs_on_archive_verified; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_logs_on_archive_verified ON logs USING btree (archive_verified);
+
+
+--
+-- Name: index_logs_on_archived_at; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_logs_on_archived_at ON logs USING btree (archived_at);
+
+
+--
+-- Name: index_logs_on_job_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_logs_on_job_id ON logs USING btree (job_id);
+
+
+--
+-- Name: index_memberships_on_user_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_memberships_on_user_id ON memberships USING btree (user_id);
+
+
+--
+-- Name: index_messages_on_subject_type_and_subject_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_messages_on_subject_type_and_subject_id ON messages USING btree (subject_type, subject_id);
+
+
+--
+-- Name: index_organizations_on_github_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_organizations_on_github_id ON organizations USING btree (github_id);
+
+
+--
+-- Name: index_organizations_on_login; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_organizations_on_login ON organizations USING btree (login);
+
+
+--
+-- Name: index_organizations_on_lower_login; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_organizations_on_lower_login ON organizations USING btree (lower((login)::text));
+
+
+--
+-- Name: index_owner_groups_on_owner_type_and_owner_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_owner_groups_on_owner_type_and_owner_id ON owner_groups USING btree (owner_type, owner_id);
+
+
+--
+-- Name: index_owner_groups_on_uuid; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_owner_groups_on_uuid ON owner_groups USING btree (uuid);
+
+
+--
+-- Name: index_permissions_on_repository_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_permissions_on_repository_id ON permissions USING btree (repository_id);
+
+
+--
+-- Name: index_permissions_on_user_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_permissions_on_user_id ON permissions USING btree (user_id);
+
+
+--
+-- Name: index_permissions_on_user_id_and_repository_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_permissions_on_user_id_and_repository_id ON permissions USING btree (user_id, repository_id);
+
+
+--
+-- Name: index_pull_requests_on_repository_id_and_number; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_pull_requests_on_repository_id_and_number ON pull_requests USING btree (repository_id, number);
+
+
+--
+-- Name: index_queueable_jobs_on_job_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_queueable_jobs_on_job_id ON queueable_jobs USING btree (job_id);
+
+
+--
+-- Name: index_repositories_on_active; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_repositories_on_active ON repositories USING btree (active);
+
+
+--
+-- Name: index_repositories_on_github_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_repositories_on_github_id ON repositories USING btree (github_id);
+
+
+--
+-- Name: index_repositories_on_lower_name; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_repositories_on_lower_name ON repositories USING btree (lower((name)::text));
+
+
+--
+-- Name: index_repositories_on_name; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_repositories_on_name ON repositories USING btree (name);
+
+
+--
+-- Name: index_repositories_on_owner_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_repositories_on_owner_id ON repositories USING btree (owner_id);
+
+
+--
+-- Name: index_repositories_on_owner_name; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_repositories_on_owner_name ON repositories USING btree (owner_name);
+
+
+--
+-- Name: index_repositories_on_slug; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_repositories_on_slug ON repositories USING gin (((((owner_name)::text || '/'::text) || (name)::text)) gin_trgm_ops);
+
+
+--
+-- Name: index_requests_on_commit_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_requests_on_commit_id ON requests USING btree (commit_id);
+
+
+--
+-- Name: index_requests_on_created_at; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_requests_on_created_at ON requests USING btree (created_at);
+
+
+--
+-- Name: index_requests_on_head_commit; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_requests_on_head_commit ON requests USING btree (head_commit);
+
+
+--
+-- Name: index_requests_on_repository_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_requests_on_repository_id ON requests USING btree (repository_id);
+
+
+--
+-- Name: index_requests_on_repository_id_and_id_desc; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_requests_on_repository_id_and_id_desc ON requests USING btree (repository_id, id DESC);
+
+
+--
+-- Name: index_ssl_key_on_repository_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_ssl_key_on_repository_id ON ssl_keys USING btree (repository_id);
+
+
+--
+-- Name: index_stages_on_build_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_stages_on_build_id ON stages USING btree (build_id);
+
+
+--
+-- Name: index_stars_on_user_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_stars_on_user_id ON stars USING btree (user_id);
+
+
+--
+-- Name: index_stars_on_user_id_and_repository_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_stars_on_user_id_and_repository_id ON stars USING btree (user_id, repository_id);
+
+
+--
+-- Name: index_stripe_events_on_date; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_stripe_events_on_date ON stripe_events USING btree (date);
+
+
+--
+-- Name: index_stripe_events_on_event_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_stripe_events_on_event_id ON stripe_events USING btree (event_id);
+
+
+--
+-- Name: index_stripe_events_on_event_type; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_stripe_events_on_event_type ON stripe_events USING btree (event_type);
+
+
+--
+-- Name: index_tags_on_repository_id_and_name; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_tags_on_repository_id_and_name ON tags USING btree (repository_id, name);
+
+
+--
+-- Name: index_tokens_on_token; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_tokens_on_token ON tokens USING btree (token);
+
+
+--
+-- Name: index_tokens_on_user_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_tokens_on_user_id ON tokens USING btree (user_id);
+
+
+--
+-- Name: index_trial_allowances_on_creator_id_and_creator_type; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_trial_allowances_on_creator_id_and_creator_type ON trial_allowances USING btree (creator_id, creator_type);
+
+
+--
+-- Name: index_trial_allowances_on_trial_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_trial_allowances_on_trial_id ON trial_allowances USING btree (trial_id);
+
+
+--
+-- Name: index_trials_on_owner; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_trials_on_owner ON trials USING btree (owner_id, owner_type);
+
+
+--
+-- Name: index_user_beta_features_on_user_id_and_beta_feature_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_user_beta_features_on_user_id_and_beta_feature_id ON user_beta_features USING btree (user_id, beta_feature_id);
+
+
+--
+-- Name: index_users_on_github_id; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_users_on_github_id ON users USING btree (github_id);
+
+
+--
+-- Name: index_users_on_github_oauth_token; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_users_on_github_oauth_token ON users USING btree (github_oauth_token);
+
+
+--
+-- Name: index_users_on_login; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_users_on_login ON users USING btree (login);
+
+
+--
+-- Name: index_users_on_lower_login; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE INDEX index_users_on_lower_login ON users USING btree (lower((login)::text));
+
+
+--
+-- Name: subscriptions_owner; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX subscriptions_owner ON subscriptions USING btree (owner_id, owner_type) WHERE ((status)::text = 'subscribed'::text);
+
+
+--
+-- Name: unique_schema_migrations; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
+--
+
+CREATE UNIQUE INDEX unique_schema_migrations ON schema_migrations USING btree (version);
+
+
+--
+-- Name: set_updated_at_on_builds; Type: TRIGGER; Schema: public; Owner: postgres
+--
+
+CREATE TRIGGER set_updated_at_on_builds BEFORE INSERT OR UPDATE ON builds FOR EACH ROW EXECUTE PROCEDURE set_updated_at();
+
+
+--
+-- Name: set_updated_at_on_jobs; Type: TRIGGER; Schema: public; Owner: postgres
+--
+
+CREATE TRIGGER set_updated_at_on_jobs BEFORE INSERT OR UPDATE ON jobs FOR EACH ROW EXECUTE PROCEDURE set_updated_at();
+
+
+--
+-- Name: fk_repositories_current_build_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres
+--
+
+ALTER TABLE ONLY repositories
+ ADD CONSTRAINT fk_repositories_current_build_id FOREIGN KEY (current_build_id) REFERENCES builds(id);
+
+
+SET search_path = sqitch, pg_catalog;
+
+--
+-- Name: changes_project_fkey; Type: FK CONSTRAINT; Schema: sqitch; Owner: postgres
+--
+
+ALTER TABLE ONLY changes
+ ADD CONSTRAINT changes_project_fkey FOREIGN KEY (project) REFERENCES projects(project) ON UPDATE CASCADE;
+
+
+--
+-- Name: dependencies_change_id_fkey; Type: FK CONSTRAINT; Schema: sqitch; Owner: postgres
+--
+
+ALTER TABLE ONLY dependencies
+ ADD CONSTRAINT dependencies_change_id_fkey FOREIGN KEY (change_id) REFERENCES changes(change_id) ON UPDATE CASCADE ON DELETE CASCADE;
+
+
+--
+-- Name: dependencies_dependency_id_fkey; Type: FK CONSTRAINT; Schema: sqitch; Owner: postgres
+--
+
+ALTER TABLE ONLY dependencies
+ ADD CONSTRAINT dependencies_dependency_id_fkey FOREIGN KEY (dependency_id) REFERENCES changes(change_id) ON UPDATE CASCADE;
+
+
+--
+-- Name: events_project_fkey; Type: FK CONSTRAINT; Schema: sqitch; Owner: postgres
+--
+
+ALTER TABLE ONLY events
+ ADD CONSTRAINT events_project_fkey FOREIGN KEY (project) REFERENCES projects(project) ON UPDATE CASCADE;
+
+
+--
+-- Name: tags_change_id_fkey; Type: FK CONSTRAINT; Schema: sqitch; Owner: postgres
+--
+
+ALTER TABLE ONLY tags
+ ADD CONSTRAINT tags_change_id_fkey FOREIGN KEY (change_id) REFERENCES changes(change_id) ON UPDATE CASCADE;
+
+
+--
+-- Name: tags_project_fkey; Type: FK CONSTRAINT; Schema: sqitch; Owner: postgres
+--
+
+ALTER TABLE ONLY tags
+ ADD CONSTRAINT tags_project_fkey FOREIGN KEY (project) REFERENCES projects(project) ON UPDATE CASCADE;
+
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: postgres
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+REVOKE ALL ON SCHEMA public FROM postgres;
+GRANT ALL ON SCHEMA public TO postgres;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+--
+-- PostgreSQL database dump complete
+--
+
diff --git a/app/assets/images/.keep b/dump/.keep
similarity index 100%
rename from app/assets/images/.keep
rename to dump/.keep
diff --git a/lib/backup.rb b/lib/backup.rb
deleted file mode 100644
index 6964cc8..0000000
--- a/lib/backup.rb
+++ /dev/null
@@ -1,132 +0,0 @@
-# frozen_string_literal: true
-
-require 'active_support/core_ext/array'
-require 'active_support/time'
-require 'config'
-require 'google/cloud/storage'
-require 'models/build_backup'
-require 'models/repository'
-require 'redis'
-
-# main travis-backup class
-class Backup
- def initialize
- @config = Config.new
- connect_gce
- connect_db
- connect_redis
- end
-
- def run
- export
- purge
- end
-
- def connect_db
- ActiveRecord::Base.establish_connection(@config.database_url)
- end
-
- def connect_gce
- return unless @config.gce_project && @config.gce_credentials
-
- storage = Google::Cloud::Storage.new(
- project_id: @config.gce_project,
- credentials: @config.gce_credentials
- )
- @bucket = storage.bucket(@config.gce_bucket)
- end
-
- def connect_redis
- @redis = Redis.new(url: @config.redis_url)
- end
-
- def purge
- BuildBackup.where('created_at < ?', @config.housekeeping_period.to_i.days.ago.to_datetime) do |backup|
- purge_backup(backup)
- end
- end
-
- def export(owner_id = nil)
- if owner_id
- Repository.where('owner_id = ?', owner_id).order(:id).each do |repository|
- process_repo(repository)
- end
- else
- Repository.order(:id).each do |repository|
- process_repo(repository)
- end
- end
- end
-
- def process_repo(repository) # rubocop:disable Metrics/AbcSize, Metrics/MethodLength
- repository.builds.where('created_at < ?', @config.delay.to_i.months.ago.to_datetime)
- .in_groups_of(@config.limit.to_i, false).map do |builds|
- if builds.count == @config.limit.to_i
- builds_export = export_builds(builds)
- file_name = "repository_#{repository.id}_builds_#{builds.first.id}-#{builds.last.id}.json"
- pretty_json = JSON.pretty_generate(builds_export)
- if upload(file_name, pretty_json)
- BuildBackup.new(repository_id: repository.id, file_name: file_name).save!
- builds.each(&:destroy)
- end
- builds_export
- end
- end
- end
-
- def purge_backup(backup)
- begin
- @bucket.file(backup.file_name).delete
- rescue
- print "Unable to remove file #{backup.file_name}\n"
- end
- backup.destroy
- end
-
- private
-
- def upload(file_name, content) # rubocop:disable Metrics/MethodLength
- uploaded = false
- begin
- File.open(file_name, 'w') do |file|
- file.write(content)
- file.close
- remote_file = @bucket.create_file(file_name, file_name)
- uploaded = remote_file.name == file_name
- end
- rescue => e
- print "Failed to save #{file_name}, error: #{e.inspect}\n"
- ensure
- File.delete(file_name)
- end
- uploaded
- end
-
- def generate_log_token(job_id)
- token = SecureRandom.urlsafe_base64(16)
- @redis.set("l:#{token}", job_id)
- @redis.expire("l:#{token}", @config.housekeeping_period.to_i * 86400)
- token
- end
-
- def export_builds(builds)
- builds.map do |build|
- build_export = build.attributes
- build_export[:build_config] = build.build_config&.attributes
- build_export[:jobs] = export_jobs(build.jobs)
-
- build_export
- end
- end
-
- def export_jobs(jobs)
- jobs.map do |job|
- job_export = job.attributes
- job_export[:job_config] = job.job_config&.attributes
- job_export[:log_url] = "#{@config.logs_url}/#{job.id}/log.txt"
- job_export[:log_url] += "?log.token=#{generate_log_token(job.id)}" if job.repository&.private?
-
- job_export
- end
- end
-end
diff --git a/lib/backup/move_logs.rb b/lib/backup/move_logs.rb
new file mode 100644
index 0000000..928d84d
--- /dev/null
+++ b/lib/backup/move_logs.rb
@@ -0,0 +1,43 @@
+# frozen_string_literal: true
+
+class Backup
+ class MoveLogs
+ attr_reader :config
+
+ def initialize(config, db_helper, dry_run_reporter=nil)
+ @config = config
+ @dry_run_reporter = dry_run_reporter
+ @db_helper = db_helper
+ end
+
+ def run
+ return run_dry if @config.dry_run
+
+ @db_helper.connect_db(@config.database_url)
+ Log.order(:id).in_batches(of: @config.limit.to_i).map do |logs_batch|
+ process_logs_batch(logs_batch)
+ end
+ end
+
+ def process_logs_batch(logs_batch)
+ log_hashes = logs_batch.as_json
+ @db_helper.connect_db(@config.destination_db_url)
+
+ log_hashes.each do |log_hash|
+ new_log = Log.new(log_hash)
+ new_log.save!
+ end
+
+ @db_helper.connect_db(@config.database_url)
+
+ logs_batch.each(&:destroy)
+
+ GC.start
+ end
+
+ def run_dry
+ ids = Log.order(:id).map(&:id)
+ @dry_run_reporter.add_to_report(:logs, *ids)
+ end
+ end
+end
diff --git a/lib/backup/remove_old.rb b/lib/backup/remove_old.rb
new file mode 100644
index 0000000..a4f4fb6
--- /dev/null
+++ b/lib/backup/remove_old.rb
@@ -0,0 +1,204 @@
+# frozen_string_literal: true
+
+class Backup
+ class RemoveOld
+ attr_reader :config
+
+ def initialize(config, dry_run_reporter=nil)
+ @config = config
+ @dry_run_reporter = dry_run_reporter
+ end
+
+ def dry_run_report
+ @dry_run_reporter.report
+ end
+
+ def run(args={})
+ user_id = args[:user_id] || @config.user_id
+ repo_id = args[:repo_id] || @config.repo_id
+ org_id = args[:org_id] || @config.org_id
+
+ if user_id
+ process_repos_for_owner(user_id, 'User')
+ elsif org_id
+ process_repos_for_owner(org_id, 'Organization')
+ elsif repo_id
+ process_repo_with_id(repo_id)
+ else
+ process_all_repos
+ end
+ end
+
+ def process_repos_for_owner(owner_id, owner_type)
+ Repository.where('owner_id = ? and owner_type = ?', owner_id, owner_type).order(:id).each do |repository|
+ process_repo(repository)
+ end
+ end
+
+ def process_repo_with_id(repo_id)
+ process_repo(Repository.find(repo_id))
+ end
+
+ def process_all_repos
+ Repository.order(:id).each do |repository|
+ process_repo(repository)
+ end
+ end
+
+ def process_repo(repository)
+ process_repo_builds(repository)
+ process_repo_requests(repository)
+ end
+
+ def process_repo_builds(repository) # rubocop:disable Metrics/AbcSize, Metrics/MethodLength
+ threshold = @config.threshold.to_i.months.ago.to_datetime
+ current_build_id = repository.current_build_id || -1
+ repository.builds.where('created_at < ? and id != ?', threshold, current_build_id)
+ .in_batches(of: @config.limit.to_i).map do |builds_batch|
+ if @config.if_backup
+ file_prefix = "repository_#{repository.id}"
+ save_and_destroy_builds_batch(builds_batch, file_prefix)
+ else
+ destroy_builds_batch(builds_batch)
+ end
+ end.compact.reduce(&:&)
+ end
+
+ def process_repo_requests(repository)
+ threshold = @config.threshold.to_i.months.ago.to_datetime
+ repository.requests.where('created_at < ?', threshold)
+ .in_batches(of: @config.limit.to_i).map do |requests_batch|
+ @config.if_backup ? save_and_destroy_requests_batch(requests_batch, repository) : destroy_requests_batch(requests_batch)
+ end.compact
+ end
+
+ private
+
+ def save_and_destroy_builds_batch(builds_batch, file_prefix)
+ builds_export = builds_batch.map(&:attributes)
+
+ dependencies_saved = builds_batch.map do |build|
+ save_build_jobs_and_logs(build, file_prefix)
+ end.reduce(&:&)
+
+ if dependencies_saved
+ file_name = "#{file_prefix}_builds_#{builds_batch.first.id}-#{builds_batch.last.id}.json"
+ pretty_json = JSON.pretty_generate(builds_export)
+ save_file(file_name, pretty_json) ? destroy_builds_batch(builds_batch) : false
+ else
+ false
+ end
+ end
+
+ def save_build_jobs_and_logs(build, file_prefix)
+ build.jobs.in_batches(of: @config.limit.to_i).map do |jobs_batch|
+ file_prefix = "#{file_prefix}_build_#{build.id}"
+ save_jobs_batch(jobs_batch, file_prefix)
+ end.compact.reduce(&:&)
+ end
+
+ def save_jobs_batch(jobs_batch, file_prefix)
+ jobs_export = jobs_batch.map(&:attributes)
+
+ logs_saved = jobs_batch.map do |job|
+ save_job_logs(job, file_prefix)
+ end.reduce(&:&)
+
+ if logs_saved
+ file_name = "#{file_prefix}_jobs_#{jobs_batch.first.id}-#{jobs_batch.last.id}.json"
+ pretty_json = JSON.pretty_generate(jobs_export)
+ save_file(file_name, pretty_json)
+ else
+ false
+ end
+ end
+
+ def save_job_logs(job, file_prefix)
+ job.logs.in_batches(of: @config.limit.to_i).map do |logs_batch|
+ file_prefix = "#{file_prefix}_job_#{job.id}"
+ save_logs_batch(logs_batch, file_prefix)
+ end.compact.reduce(&:&)
+ end
+
+ def save_logs_batch(logs_batch, file_prefix)
+ logs_export = logs_batch.map(&:attributes)
+ file_name = "#{file_prefix}_logs_#{logs_batch.first.id}-#{logs_batch.last.id}.json"
+ pretty_json = JSON.pretty_generate(logs_export)
+ save_file(file_name, pretty_json)
+ end
+
+ def destroy_builds_batch(builds_batch)
+ return destroy_builds_batch_dry(builds_batch) if @config.dry_run
+
+ builds_batch.each(&:destroy)
+ end
+
+ def destroy_builds_batch_dry(builds_batch)
+ @dry_run_reporter.add_to_report(:builds, *builds_batch.map(&:id))
+
+ jobs_ids = builds_batch.map do |build|
+ build.jobs.map(&:id) || []
+ end.flatten
+
+ @dry_run_reporter.add_to_report(:jobs, *jobs_ids)
+
+ logs_ids = builds_batch.map do |build|
+ build.jobs.map do |job|
+ job.logs.map(&:id) || []
+ end.flatten || []
+ end.flatten
+
+ @dry_run_reporter.add_to_report(:logs, *logs_ids)
+ end
+
+ def save_and_destroy_requests_batch(requests_batch, repository)
+ requests_export = export_requests(requests_batch)
+ file_name = "repository_#{repository.id}_requests_#{requests_batch.first.id}-#{requests_batch.last.id}.json"
+ pretty_json = JSON.pretty_generate(requests_export)
+ if save_file(file_name, pretty_json)
+ destroy_requests_batch(requests_batch)
+ end
+ requests_export
+ end
+
+ def destroy_requests_batch(requests_batch)
+ return destroy_requests_batch_dry(requests_batch) if @config.dry_run
+
+ requests_batch.each(&:destroy)
+ end
+
+ def destroy_requests_batch_dry(requests_batch)
+ @dry_run_reporter.add_to_report(:requests, *requests_batch.map(&:id))
+ end
+
+ def save_file(file_name, content) # rubocop:disable Metrics/MethodLength
+ return true if @config.dry_run
+
+ saved = false
+ begin
+ unless File.directory?(@config.files_location)
+ FileUtils.mkdir_p(@config.files_location)
+ end
+
+ File.open(file_path(file_name), 'w') do |file|
+ file.write(content)
+ file.close
+ saved = true
+ end
+ rescue => e
+ print "Failed to save #{file_name}, error: #{e.inspect}\n"
+ end
+ saved
+ end
+
+ def file_path(file_name)
+ "#{@config.files_location}/#{file_name}"
+ end
+
+ def export_requests(requests)
+ requests.map do |request|
+ request.attributes
+ end
+ end
+ end
+end
diff --git a/lib/backup/remove_orphans.rb b/lib/backup/remove_orphans.rb
new file mode 100644
index 0000000..c0c5d78
--- /dev/null
+++ b/lib/backup/remove_orphans.rb
@@ -0,0 +1,146 @@
+# frozen_string_literal: true
+
+class Backup
+ class RemoveOrphans
+ attr_reader :config
+
+ def initialize(config, dry_run_reporter=nil)
+ @config = config
+ @dry_run_reporter = dry_run_reporter
+ end
+
+ def dry_run_report
+ @dry_run_reporter.report
+ end
+
+ def run
+ cases.each do |model_block|
+ model_block[:relations].each do |relation|
+ process_table(
+ main_model: model_block[:main_model],
+ related_model: relation[:related_model],
+ fk_name: relation[:fk_name],
+ method: model_block[:method],
+ dry_run_complement: model_block[:dry_run_complement]
+ )
+ end
+ end
+ end
+
+ def cases
+ [
+ {
+ main_model: Repository,
+ relations: [
+ {related_model: Build, fk_name: 'current_build_id'},
+ {related_model: Build, fk_name: 'last_build_id'}
+ ]
+ }, {
+ main_model: Build,
+ relations: [
+ {related_model: Repository, fk_name: 'repository_id'},
+ {related_model: Commit, fk_name: 'commit_id'},
+ {related_model: Request, fk_name: 'request_id'},
+ {related_model: PullRequest, fk_name: 'pull_request_id'},
+ {related_model: Branch, fk_name: 'branch_id'},
+ {related_model: Tag, fk_name: 'tag_id'}
+ ],
+ method: :destroy_all,
+ dry_run_complement: -> (ids) { add_builds_dependencies_to_dry_run_report(ids) }
+ }, {
+ main_model: Job,
+ relations: [
+ {related_model: Repository, fk_name: 'repository_id'},
+ {related_model: Commit, fk_name: 'commit_id'},
+ {related_model: Stage, fk_name: 'stage_id'},
+ ]
+ }, {
+ main_model: Branch,
+ relations: [
+ {related_model: Repository, fk_name: 'repository_id'},
+ {related_model: Build, fk_name: 'last_build_id'}
+ ]
+ }, {
+ main_model: Tag,
+ relations: [
+ {related_model: Repository, fk_name: 'repository_id'},
+ {related_model: Build, fk_name: 'last_build_id'}
+ ]
+ }, {
+ main_model: Commit,
+ relations: [
+ {related_model: Repository, fk_name: 'repository_id'},
+ {related_model: Branch, fk_name: 'branch_id'},
+ {related_model: Tag, fk_name: 'tag_id'}
+ ]
+ }, {
+ main_model: Cron,
+ relations: [
+ {related_model: Branch, fk_name: 'branch_id'}
+ ]
+ }, {
+ main_model: PullRequest,
+ relations: [
+ {related_model: Repository, fk_name: 'repository_id'}
+ ]
+ }, {
+ main_model: SslKey,
+ relations: [
+ {related_model: Repository, fk_name: 'repository_id'}
+ ]
+ }, {
+ main_model: Request,
+ relations: [
+ {related_model: Commit, fk_name: 'commit_id'},
+ {related_model: PullRequest, fk_name: 'pull_request_id'},
+ {related_model: Branch, fk_name: 'branch_id'},
+ {related_model: Tag, fk_name: 'tag_id'}
+ ]
+ }, {
+ main_model: Stage,
+ relations: [
+ {related_model: Build, fk_name: 'build_id'}
+ ]
+ }
+ ]
+ end
+
+ def add_builds_dependencies_to_dry_run_report(ids_for_delete)
+ repos_for_delete = Repository.where(current_build_id: ids_for_delete)
+ jobs_for_delete = Job.where(source_id: ids_for_delete)
+ @dry_run_reporter.add_to_report(:repositories, *repos_for_delete.map(&:id))
+ @dry_run_reporter.add_to_report(:jobs, *jobs_for_delete.map(&:id))
+ end
+
+ def process_table(args)
+ main_model = args[:main_model]
+ related_model = args[:related_model]
+ fk_name = args[:fk_name]
+ method = args[:method] || :delete_all
+ dry_run_complement = args[:dry_run_complement]
+
+ main_table = main_model.table_name
+ related_table = related_model.table_name
+
+ for_delete = main_model.find_by_sql(%{
+ select a.*
+ from #{main_table} a
+ left join #{related_table} b
+ on a.#{fk_name} = b.id
+ where
+ a.#{fk_name} is not null
+ and b.id is null;
+ })
+
+ ids_for_delete = for_delete.map(&:id)
+
+ if config.dry_run
+ key = main_table.to_sym
+ @dry_run_reporter.add_to_report(key, *ids_for_delete)
+ dry_run_complement.call(ids_for_delete) if dry_run_complement
+ else
+ main_model.where(id: ids_for_delete).send(method)
+ end
+ end
+ end
+end
diff --git a/lib/config.rb b/lib/config.rb
index c8a1247..ead7726 100644
--- a/lib/config.rb
+++ b/lib/config.rb
@@ -1,23 +1,171 @@
# frozen_string_literal: true
+require 'optparse'
-# Config for travis-backup
class Config
- attr_reader :limit, :delay, :housekeeping_period, :database_url, :logs_url, :gce_project,
- :gce_credentials, :gce_bucket, :redis_url
-
- def initialize # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength
- config = YAML.load(File.open('config/settings.yml'))
- connection_details = YAML.load(File.open('config/database.yml'))
-
- @limit = ENV['BACKUP_LIMIT'] || config['backup']['limit']
- @delay = ENV['BACKUP_DELAY'] || config['backup']['delay']
- @logs_url = ENV['LOGS_URL'] || config['backup']['logs_url']
- @housekeeping_period = ENV['BACKUP_HOUSEKEEPING_PERIOD'] || config['backup']['housekeeping_period']
- @database_url = ENV['DATABASE_URL'] || connection_details['development']
- @gce_project = ENV['GCE_PROJECT'] || config['gce']['project']
- credentials = ENV['GCE_CREDENTIALS'] || (File.exist?(config['gce']['credentials']) ? File.read(config['gce']['credentials']) : nil)
- @gce_credentials = credentials ? JSON.parse(credentials) : nil
- @gce_bucket = ENV['GCE_BUCKET'] || config['gce']['bucket']
- @redis = ENV['REDIS_URL'] || config['redis']['url']
+ attr_reader :if_backup,
+ :dry_run,
+ :limit,
+ :threshold,
+ :files_location,
+ :database_url,
+ :user_id,
+ :repo_id,
+ :org_id,
+ :move_logs,
+ :remove_orphans,
+ :destination_db_url
+
+ def initialize(args={}) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength
+ set_values(args)
+ check_values
+ end
+
+ def set_values(args)
+ config = yaml_load('config/settings.yml')
+ connection_details = yaml_load('config/database.yml')
+ argv_opts = argv_options
+ @if_backup = first_not_nil(
+ args[:if_backup],
+ argv_opts[:if_backup],
+ ENV['IF_BACKUP'],
+ config.dig('backup', 'if_backup'),
+ true
+ )
+ @dry_run = first_not_nil(
+ args[:dry_run],
+ argv_opts[:dry_run],
+ ENV['BACKUP_DRY_RUN'],
+ config.dig('backup', 'dry_run'),
+ false
+ )
+ @limit = first_not_nil(
+ args[:limit],
+ argv_opts[:limit],
+ ENV['BACKUP_LIMIT'],
+ config.dig('backup', 'limit'),
+ 1000
+ )
+ @threshold = first_not_nil(
+ args[:threshold],
+ argv_opts[:threshold],
+ ENV['BACKUP_THRESHOLD'],
+ config.dig('backup', 'threshold')
+ )
+ @files_location = first_not_nil(
+ args[:files_location],
+ argv_opts[:files_location],
+ ENV['BACKUP_FILES_LOCATION'],
+ config.dig('backup', 'files_location'),
+ './dump'
+ )
+ @database_url = first_not_nil(
+ args[:database_url],
+ argv_opts[:database_url],
+ ENV['DATABASE_URL'],
+ connection_details.dig(ENV['RAILS_ENV'])
+ )
+ @user_id = first_not_nil(
+ args[:user_id],
+ argv_opts[:user_id],
+ ENV['BACKUP_USER_ID'],
+ config.dig('backup', 'user_id')
+ )
+ @repo_id = first_not_nil(
+ args[:repo_id],
+ argv_opts[:repo_id],
+ ENV['BACKUP_REPO_ID'],
+ config.dig('backup', 'repo_id')
+ )
+ @org_id = first_not_nil(
+ args[:org_id],
+ argv_opts[:org_id],
+ ENV['BACKUP_ORG_ID'],
+ config.dig('backup', 'org_id')
+ )
+ @move_logs = first_not_nil(
+ args[:move_logs],
+ argv_opts[:move_logs],
+ ENV['BACKUP_MOVE_LOGS'],
+ config.dig('backup', 'move_logs'),
+ false
+ )
+ @remove_orphans = first_not_nil(
+ args[:remove_orphans],
+ argv_opts[:remove_orphans],
+ ENV['BACKUP_REMOVE_ORPHANS'],
+ config.dig('backup', 'remove_orphans'),
+ false
+ )
+ @destination_db_url = first_not_nil(
+ args[:destination_db_url],
+ argv_opts[:destination_db_url],
+ ENV['BACKUP_DESTINATION_DB_URL'],
+ connection_details.dig(ENV['RAILS_ENV'], 'destination')
+ )
+ end
+
+ def check_values
+ if !@move_logs && !@remove_orphans && !@threshold
+ message = abort_message("Please provide the threshold argument. Data younger than it will be omitted. " +
+ "Threshold defines number of months from now.")
+ abort message
+ end
+
+ if !@database_url
+ message = abort_message("Please provide proper database URL.")
+ abort message
+ end
+
+ if (@move_logs && !@destination_db_url)
+ abort "\nFor moving logs you need to specify your destination database. Example usage:\n" +
+ "\n $ bin/travis_backup 'postgres://source_url' --move_logs --destination_db_url 'postgres://destination_url'\n" +
+ "\nor using in code:\n" +
+ "\n Backup.new(database_url: 'postgres://source_url', destination_db_url: 'postgres://destination_url', move_logs: true)\n" +
+ "\nYou can also set it using environment variables or configuration files.\n"
+ end
+ end
+
+ def abort_message(intro)
+ "\n#{intro} Example usage:\n"+
+ "\n $ bin/travis_backup 'postgres://my_database_url' --threshold 6\n" +
+ "\nor using in code:\n" +
+ "\n Backup.new(database_url: 'postgres://my_database_url', threshold: 6)\n" +
+ "\nYou can also set it using environment variables or configuration files.\n"
+ end
+
+ def argv_options
+ argv_copy = ARGV.clone
+ options = {}
+ OptionParser.new do |opt|
+ opt.on('-b', '--backup') { |o| options[:if_backup] = o }
+ opt.on('-d', '--dry_run') { |o| options[:dry_run] = o }
+ opt.on('-l', '--limit X') { |o| options[:limit] = o.to_i }
+ opt.on('-t', '--threshold X') { |o| options[:threshold] = o.to_i }
+ opt.on('-f', '--files_location X') { |o| options[:files_location] = o }
+ opt.on('-u', '--user_id X') { |o| options[:user_id] = o.to_i }
+ opt.on('-r', '--repo_id X') { |o| options[:repo_id] = o.to_i }
+ opt.on('-o', '--org_id X') { |o| options[:org_id] = o.to_i }
+ opt.on('--move_logs') { |o| options[:move_logs] = o }
+ opt.on('--remove_orphans') { |o| options[:remove_orphans] = o }
+ opt.on('--destination_db_url X') { |o| options[:destination_db_url] = o }
+ end.parse!
+
+ options[:database_url] = ARGV.shift if ARGV[0]
+ argv_copy.each do |arg|
+ ARGV.push(arg)
+ end
+ options
+ end
+
+ def first_not_nil(*arr)
+ arr.compact.first
+ end
+
+ def yaml_load(url)
+ begin
+ YAML.load(File.open(url))
+ rescue => e
+ {}
+ end
end
end
diff --git a/lib/db_helper.rb b/lib/db_helper.rb
new file mode 100644
index 0000000..aa80d0a
--- /dev/null
+++ b/lib/db_helper.rb
@@ -0,0 +1,27 @@
+# frozen_string_literal: true
+
+class DbHelper
+ def initialize(config)
+ @config = config
+ connect_db
+ end
+
+ def connect_db(config_or_url=@config.database_url)
+ ActiveRecord::Base.establish_connection(config_or_url)
+ end
+
+ def do_in_other_db(config_or_url)
+ saved_config = ActiveRecord::Base.connection_db_config
+ connect_db(config_or_url)
+ result = yield
+ connect_db(saved_config)
+ result
+ end
+
+ def do_without_triggers
+ ActiveRecord::Base.connection.execute('set session_replication_role = replica;')
+ result = yield
+ ActiveRecord::Base.connection.execute('set session_replication_role = default;')
+ result
+ end
+end
\ No newline at end of file
diff --git a/lib/dry_run_reporter.rb b/lib/dry_run_reporter.rb
new file mode 100644
index 0000000..9ae2a86
--- /dev/null
+++ b/lib/dry_run_reporter.rb
@@ -0,0 +1,33 @@
+# frozen_string_literal: true
+
+class DryRunReporter
+ attr_reader :report
+
+ def initialize
+ @report = {}
+ end
+
+ def add_to_report(key, *values)
+ report[key] = [] if report[key].nil?
+ report[key].concat(values)
+ report[key].uniq!
+ end
+
+ def print_report
+ if @report.to_a.map(&:second).flatten.empty?
+ puts 'Dry run active. No data would be removed in normal run.'
+ else
+ puts 'Dry run active. The following data would be removed in normal run:'
+
+ @report.to_a.map(&:first).each do |symbol|
+ print_report_line(symbol)
+ end
+ end
+ end
+
+ private
+
+ def print_report_line(symbol)
+ puts " - #{symbol}: #{@report[symbol].to_json}" if @report[symbol].any?
+ end
+end
diff --git a/lib/models/branch.rb b/lib/models/branch.rb
new file mode 100644
index 0000000..ed099f7
--- /dev/null
+++ b/lib/models/branch.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class Branch < Model
+ self.table_name = 'branches'
+end
diff --git a/lib/models/build.rb b/lib/models/build.rb
index 5741cc2..b5aeb35 100644
--- a/lib/models/build.rb
+++ b/lib/models/build.rb
@@ -1,6 +1,5 @@
# frozen_string_literal: true
-require 'models/build_config'
require 'models/job'
require 'models/model'
require 'models/repository'
@@ -8,8 +7,8 @@
# Build model
class Build < Model
belongs_to :repository
- belongs_to :build_config, foreign_key: :config_id, dependent: :delete
- has_many :jobs, -> { order('id') }, foreign_key: :source_id, dependent: :delete_all, class_name: 'Job'
+ has_many :jobs, -> { order('id') }, foreign_key: :source_id, dependent: :destroy, class_name: 'Job'
+ has_one :repo_for_that_this_build_is_current, foreign_key: :current_build_id, dependent: :destroy, class_name: 'Repository'
self.table_name = 'builds'
end
diff --git a/lib/models/build_config.rb b/lib/models/build_config.rb
deleted file mode 100644
index fca1389..0000000
--- a/lib/models/build_config.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# frozen_string_literal: true
-
-require 'models/model'
-
-# BuildConfig model
-class BuildConfig < Model
- self.table_name = 'build_configs'
-end
diff --git a/lib/models/commit.rb b/lib/models/commit.rb
new file mode 100644
index 0000000..0cf4b0d
--- /dev/null
+++ b/lib/models/commit.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class Commit < Model
+ self.table_name = 'commits'
+end
diff --git a/lib/models/cron.rb b/lib/models/cron.rb
new file mode 100644
index 0000000..cc8ef31
--- /dev/null
+++ b/lib/models/cron.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class Cron < Model
+ self.table_name = 'crons'
+end
diff --git a/lib/models/job.rb b/lib/models/job.rb
index 423f46a..10dbba3 100644
--- a/lib/models/job.rb
+++ b/lib/models/job.rb
@@ -1,15 +1,15 @@
# frozen_string_literal: true
-require 'models/job_config'
require 'models/model'
require 'models/repository'
+require 'models/log'
# Job model
class Job < Model
self.inheritance_column = :_type_disabled
belongs_to :repository
- belongs_to :job_config, foreign_key: :config_id, dependent: :delete
+ has_many :logs, -> { order('id') }, foreign_key: :job_id, dependent: :destroy, class_name: 'Log'
self.table_name = 'jobs'
end
diff --git a/lib/models/job_config.rb b/lib/models/job_config.rb
deleted file mode 100644
index e263013..0000000
--- a/lib/models/job_config.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# frozen_string_literal: true
-
-require 'models/model'
-
-# JobConfig model
-class JobConfig < Model
- self.table_name = 'job_configs'
-end
diff --git a/lib/models/log.rb b/lib/models/log.rb
new file mode 100644
index 0000000..bbb5f4c
--- /dev/null
+++ b/lib/models/log.rb
@@ -0,0 +1,9 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class Log < Model
+ belongs_to :job
+
+ self.table_name = 'logs'
+end
diff --git a/lib/models/organization.rb b/lib/models/organization.rb
new file mode 100644
index 0000000..edbd92c
--- /dev/null
+++ b/lib/models/organization.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class Organization < Model
+ self.table_name = 'organizations'
+end
diff --git a/lib/models/pull_request.rb b/lib/models/pull_request.rb
new file mode 100644
index 0000000..a9b5553
--- /dev/null
+++ b/lib/models/pull_request.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class PullRequest < Model
+ self.table_name = 'pull_requests'
+end
diff --git a/lib/models/repository.rb b/lib/models/repository.rb
index 0bdc048..fc7660f 100644
--- a/lib/models/repository.rb
+++ b/lib/models/repository.rb
@@ -2,11 +2,12 @@
require 'models/model'
require 'models/build'
+require 'models/request'
# Repository model
class Repository < Model
- has_many :builds, -> { order('id') }, foreign_key: :repository_id, dependent: :destroy, class_name: 'Build'
- has_many :build_backup
+ has_many :builds, -> { order('id') }, foreign_key: :repository_id, class_name: 'Build'
+ has_many :requests, -> { order('id') }, foreign_key: :repository_id, dependent: :destroy, class_name: 'Request'
self.table_name = 'repositories'
end
diff --git a/lib/models/build_backup.rb b/lib/models/request.rb
similarity index 59%
rename from lib/models/build_backup.rb
rename to lib/models/request.rb
index e3acad8..4fb1408 100644
--- a/lib/models/build_backup.rb
+++ b/lib/models/request.rb
@@ -3,9 +3,8 @@
require 'models/model'
require 'models/repository'
-# Build model
-class BuildBackup < Model
+class Request < Model
belongs_to :repository
- self.table_name = 'build_backups'
+ self.table_name = 'requests'
end
diff --git a/lib/models/ssl_key.rb b/lib/models/ssl_key.rb
new file mode 100644
index 0000000..0e0c7bb
--- /dev/null
+++ b/lib/models/ssl_key.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class SslKey < Model
+ self.table_name = 'ssl_keys'
+end
diff --git a/lib/models/stage.rb b/lib/models/stage.rb
new file mode 100644
index 0000000..38a675e
--- /dev/null
+++ b/lib/models/stage.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class Stage < Model
+ self.table_name = 'stages'
+end
diff --git a/lib/models/tag.rb b/lib/models/tag.rb
new file mode 100644
index 0000000..9d6f9e7
--- /dev/null
+++ b/lib/models/tag.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class Tag < Model
+ self.table_name = 'tags'
+end
diff --git a/lib/models/user.rb b/lib/models/user.rb
new file mode 100644
index 0000000..380619f
--- /dev/null
+++ b/lib/models/user.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+require 'models/model'
+
+class User < Model
+ self.table_name = 'users'
+end
diff --git a/lib/travis-backup.rb b/lib/travis-backup.rb
new file mode 100644
index 0000000..b0306d6
--- /dev/null
+++ b/lib/travis-backup.rb
@@ -0,0 +1,50 @@
+# frozen_string_literal: true
+
+require 'active_support/core_ext/array'
+require 'active_support/time'
+require 'config'
+require 'db_helper'
+require 'dry_run_reporter'
+require 'models/repository'
+require 'models/log'
+require 'models/branch'
+require 'models/tag'
+require 'models/commit'
+require 'models/cron'
+require 'models/pull_request'
+require 'models/ssl_key'
+require 'models/request'
+require 'models/stage'
+require 'backup/move_logs'
+require 'backup/remove_orphans'
+require 'backup/remove_old'
+
+# main travis-backup class
+class Backup
+ attr_accessor :config
+
+ def initialize(config_args={})
+ @config = Config.new(config_args)
+ @db_helper = DbHelper.new(@config)
+
+ if @config.dry_run
+ @dry_run_reporter = DryRunReporter.new
+ end
+ end
+
+ def dry_run_report
+ @dry_run_reporter.report
+ end
+
+ def run(args={})
+ if @config.move_logs
+ Backup::MoveLogs.new(@config, @db_helper, @dry_run_reporter).run
+ elsif @config.remove_orphans
+ Backup::RemoveOrphans.new(@config, @dry_run_reporter).run
+ else
+ Backup::RemoveOld.new(@config, @dry_run_reporter).run(args)
+ end
+
+ @dry_run_reporter.print_report if @config.dry_run
+ end
+end
diff --git a/spec/backup/move_logs_spec.rb b/spec/backup/move_logs_spec.rb
new file mode 100644
index 0000000..3ebf053
--- /dev/null
+++ b/spec/backup/move_logs_spec.rb
@@ -0,0 +1,107 @@
+$: << 'lib'
+require 'uri'
+require 'travis-backup'
+require 'models/build'
+require 'models/job'
+require 'models/organization'
+require 'models/user'
+require 'support/factories'
+require 'support/expected_files'
+require 'support/before_tests'
+require 'pry'
+
+
+describe Backup::MoveLogs do
+ before(:all) do
+ BeforeTests.new.run
+ end
+
+ let(:files_location) { "dump/tests" }
+ let!(:config) { Config.new(files_location: files_location, limit: 5) }
+ let!(:db_helper) { DbHelper.new(config) }
+ let!(:move_logs) { Backup::MoveLogs.new(config, db_helper, DryRunReporter.new) }
+
+ describe 'run' do
+ let!(:logs) {
+ FactoryBot.create_list(
+ :log,
+ 10,
+ job_id: 1,
+ content: 'some log content',
+ removed_by: 1,
+ archiving: false,
+ archive_verified: true
+ )
+ }
+
+ def do_in_destination_db(&block)
+ db_helper.do_in_other_db(config.destination_db_url, &block)
+ end
+
+ def destination_logs_size
+ do_in_destination_db do
+ Log.all.size
+ end
+ end
+
+ it 'copies logs to destination database' do
+ source_db_logs = Log.all.as_json
+
+ expect {
+ move_logs.run
+ }.to change { destination_logs_size }.by 10
+
+ destination_db_logs = do_in_destination_db do
+ Log.all.as_json
+ end
+
+ expect(destination_db_logs).to eql(source_db_logs)
+ end
+
+ it 'removes copied logs from source database' do
+ expect {
+ move_logs.run
+ }.to change { Log.all.size }.by -10
+ end
+
+ context 'when memory is limited and data amount big', slow: true do
+ let!(:config) { Config.new(files_location: files_location, limit: 5000) }
+ let!(:db_helper) { DbHelper.new(config) }
+ let!(:move_logs) { Backup::MoveLogs.new(config, db_helper, DryRunReporter.new) }
+ let!(:logs) {
+ FactoryBot.create(
+ :log,
+ job_id: 1,
+ content: 'some log content',
+ removed_by: 1,
+ archiving: false,
+ archive_verified: true
+ )
+ ActiveRecord::Base.connection.execute(%{
+ do $$
+ declare
+ counter integer := 0;
+ begin
+ while counter < 49999 loop
+ insert into logs (content) (select content from logs where content is not null limit 1);
+ counter := counter + 1;
+ end loop;
+ end$$;
+ })
+ }
+
+ def do_with_limited_memory(limit_in_mb)
+ system("ulimit -Sv #{limit_in_mb * 1000}")
+ result = yield
+ system("ulimit -Sv unlimited")
+ result
+ end
+
+ it 'runs without memory problems' do
+ do_with_limited_memory(300) do
+ move_logs.run
+ end
+ end
+ end
+ end
+end
diff --git a/spec/backup/remove_old_spec.rb b/spec/backup/remove_old_spec.rb
new file mode 100644
index 0000000..8f56683
--- /dev/null
+++ b/spec/backup/remove_old_spec.rb
@@ -0,0 +1,312 @@
+$: << 'lib'
+require 'uri'
+require 'travis-backup'
+require 'models/build'
+require 'models/job'
+require 'models/organization'
+require 'models/user'
+require 'support/factories'
+require 'support/expected_files'
+require 'support/before_tests'
+require 'support/utils'
+require 'pry'
+
+
+describe Backup::RemoveOld do
+ before(:all) do
+ BeforeTests.new.run
+ end
+
+ let(:files_location) { "dump/tests" }
+ let!(:config) { Config.new(files_location: files_location, limit: 5) }
+ let!(:db_helper) { DbHelper.new(config) }
+ let!(:remove_old) { Backup::RemoveOld.new(config, DryRunReporter.new) }
+
+
+ describe 'process_repo' do
+ let!(:repository) {
+ FactoryBot.create(:repository)
+ }
+
+ it 'processes repository builds' do
+ expect(remove_old).to receive(:process_repo_builds).once.with(repository)
+ remove_old.process_repo(repository)
+ end
+
+ it 'processes repository requests' do
+ expect(remove_old).to receive(:process_repo_requests).once.with(repository)
+ remove_old.process_repo(repository)
+ end
+ end
+
+ describe 'process_repo_builds' do
+ after(:each) do
+ Repository.destroy_all
+ Build.destroy_all
+ Job.destroy_all
+ Log.destroy_all
+ end
+
+ let(:datetime) { (Config.new.threshold + 1).months.ago.to_time.utc }
+ let!(:repository) {
+ ActiveRecord::Base.connection.execute('set session_replication_role = replica;')
+ repository = FactoryBot.create(
+ :repository_with_builds_jobs_and_logs,
+ created_at: datetime,
+ updated_at: datetime
+ )
+ ActiveRecord::Base.connection.execute('set session_replication_role = default;')
+ repository
+ }
+ let!(:repository2) {
+ FactoryBot.create(
+ :repository_with_builds_jobs_and_logs,
+ created_at: datetime,
+ updated_at: datetime,
+ builds_count: 1
+ )
+ }
+ let(:expected_files_creator) {
+ ExpectedFiles.new(repository, datetime)
+ }
+ let!(:expected_builds_json) {
+ expected_files_creator.builds_json
+ }
+ let!(:expected_jobs_jsons) {
+ repository.builds.map do |build|
+ expected_files_creator.jobs_json(build)
+ end
+ }
+ let!(:expected_logs_jsons) {
+ repository.builds.map do |build|
+ build.jobs.map do |job|
+ expected_files_creator.logs_json(job)
+ end
+ end.flatten(1)
+ }
+
+ shared_context 'removing builds and jobs' do
+ it 'should delete all builds of the repository' do
+ remove_old.process_repo_builds(repository)
+ expect(Build.all.map(&:repository_id)).to eq([repository2.id])
+ end
+
+ it 'should delete all jobs of removed builds and leave the rest' do
+ expect {
+ remove_old.process_repo_builds(repository)
+ }.to change { Job.all.size }.by -4
+
+ build_id = Build.first.id
+ expect(Job.all.map(&:source_id)).to eq([build_id, build_id])
+ end
+
+ it 'should delete all logs of removed jobs and leave the rest' do
+ expect {
+ remove_old.process_repo_builds(repository)
+ }.to change { Log.all.size }.by -8
+
+ build_id = Build.first.id
+ expect(Log.all.map(&:job).map(&:source_id)).to eq(Array.new(4, build_id))
+ end
+ end
+
+ shared_context 'not saving JSON to file' do
+ it 'should not save JSON to file' do
+ expect(File).not_to receive(:open)
+ remove_old.process_repo_builds(repository)
+ end
+ end
+
+ context 'when if_backup config is set to true' do
+ it 'should save proper build JSON file' do
+ expect_method_calls_on(
+ File, :write,
+ [JSON.pretty_generate(expected_builds_json)],
+ allow_instances: true,
+ arguments_to_check: :first
+ ) do
+ remove_old.process_repo_builds(repository)
+ end
+ end
+
+ it 'should save proper job JSON files' do
+ expect_method_calls_on(
+ File, :write,
+ [
+ JSON.pretty_generate(expected_jobs_jsons.first),
+ JSON.pretty_generate(expected_jobs_jsons.second)
+ ],
+ allow_instances: true,
+ arguments_to_check: :first
+ ) do
+ remove_old.process_repo_builds(repository)
+ end
+ end
+
+ it 'should save proper log JSON files' do
+ expect_method_calls_on(
+ File, :write,
+ [
+ JSON.pretty_generate(expected_logs_jsons.first),
+ JSON.pretty_generate(expected_logs_jsons.second),
+ JSON.pretty_generate(expected_logs_jsons.third),
+ JSON.pretty_generate(expected_logs_jsons.fourth),
+ ],
+ allow_instances: true,
+ arguments_to_check: :first
+ ) do
+ remove_old.process_repo_builds(repository)
+ end
+ end
+
+ it 'should save JSON files at proper paths' do
+ expect_method_calls_on(
+ File, :open,
+ [
+ Regexp.new('dump/tests/repository_\d+_build_\d+_job_\d+_logs_\d+-\d+.json'),
+ Regexp.new('dump/tests/repository_\d+_build_\d+_job_\d+_logs_\d+-\d+.json'),
+ Regexp.new('dump/tests/repository_\d+_build_\d+_jobs_\d+-\d+.json'),
+ Regexp.new('dump/tests/repository_\d+_build_\d+_job_\d+_logs_\d+-\d+.json'),
+ Regexp.new('dump/tests/repository_\d+_build_\d+_job_\d+_logs_\d+-\d+.json'),
+ Regexp.new('dump/tests/repository_\d+_build_\d+_jobs_\d+-\d+.json'),
+ Regexp.new('dump/tests/repository_\d+_builds_\d+-\d+.json')
+ ],
+ match_mode: :match,
+ arguments_to_check: :first
+ ) do
+ remove_old.process_repo_builds(repository)
+ end
+ end
+
+ it_behaves_like 'removing builds and jobs'
+
+ context 'when path with nonexistent folders is given' do
+ let(:random_files_location) { "dump/tests/#{rand(100000)}" }
+ let!(:config) { Config.new(files_location: random_files_location, limit: 2) }
+ let!(:remove_old) { Backup::RemoveOld.new(config, DryRunReporter.new) }
+
+
+ it 'should create needed folders' do
+ expect(FileUtils).to receive(:mkdir_p).once.with(random_files_location).and_call_original
+ remove_old.process_repo_builds(repository)
+ end
+ end
+ end
+
+ context 'when if_backup config is set to false' do
+ let!(:config) { Config.new(files_location: files_location, limit: 2, if_backup: false) }
+ let!(:remove_old) { Backup::RemoveOld.new(config, DryRunReporter.new) }
+
+ it_behaves_like 'not saving JSON to file'
+ it_behaves_like 'removing builds and jobs'
+ end
+
+ context 'when dry_run config is set to true' do
+ let!(:config) { Config.new(files_location: files_location, limit: 2, dry_run: true) }
+ let!(:remove_old) { Backup::RemoveOld.new(config, DryRunReporter.new) }
+
+ it_behaves_like 'not saving JSON to file'
+
+ it 'should not delete builds' do
+ expect {
+ remove_old.process_repo_builds(repository)
+ }.not_to change { Build.all.size }
+ end
+
+ it 'should not delete jobs' do
+ expect {
+ remove_old.process_repo_builds(repository)
+ }.not_to change { Job.all.size }
+ end
+ end
+ end
+
+ describe 'process_repo_requests' do
+ after(:each) do
+ Repository.destroy_all
+ Request.destroy_all
+ end
+
+ let(:datetime) { (Config.new.threshold + 1).months.ago.to_time.utc }
+ let!(:repository) {
+ FactoryBot.create(
+ :repository_with_requests,
+ created_at: datetime,
+ updated_at: datetime
+ )
+ }
+ let!(:repository2) {
+ FactoryBot.create(
+ :repository_with_requests,
+ created_at: datetime,
+ updated_at: datetime,
+ requests_count: 1
+ )
+ }
+
+
+ let!(:expected_requests_json) {
+ ExpectedFiles.new(repository, datetime).requests_json
+ }
+
+ shared_context 'removing requests' do
+ it 'should delete all requests of the repository' do
+ remove_old.process_repo_requests(repository)
+ expect(Request.all.map(&:repository_id)).to eq([repository2.id])
+ end
+ end
+
+ shared_context 'not saving JSON to file' do
+ it 'should not save JSON to file' do
+ expect(File).not_to receive(:open)
+ remove_old.process_repo_requests(repository)
+ end
+ end
+
+ context 'when if_backup config is set to true' do
+ it 'should save proper build JSON to file' do
+ expect_any_instance_of(File).to receive(:write).once.with(JSON.pretty_generate(expected_requests_json))
+ remove_old.process_repo_requests(repository)
+ end
+
+ it 'should save JSON to file at proper path' do
+ expect(File).to receive(:open).once.with(Regexp.new(files_location), 'w')
+ remove_old.process_repo_requests(repository)
+ end
+
+ it_behaves_like 'removing requests'
+
+ context 'when path with nonexistent folders is given' do
+ let(:random_files_location) { "dump/tests/#{rand(100000)}" }
+ let!(:config) { Config.new(files_location: random_files_location, limit: 2) }
+ let!(:remove_old) { Backup::RemoveOld.new(config, DryRunReporter.new) }
+
+ it 'should create needed folders' do
+ expect(FileUtils).to receive(:mkdir_p).once.with(random_files_location).and_call_original
+ remove_old.process_repo_requests(repository)
+ end
+ end
+ end
+
+ context 'when if_backup config is set to false' do
+ let!(:config) { Config.new(files_location: files_location, limit: 2, if_backup: false) }
+ let!(:remove_old) { Backup::RemoveOld.new(config, DryRunReporter.new) }
+
+ it_behaves_like 'not saving JSON to file'
+ it_behaves_like 'removing requests'
+ end
+
+ context 'when dry_run config is set to true' do
+ let!(:config) { Config.new(files_location: files_location, limit: 2, dry_run: true) }
+ let!(:remove_old) { Backup::RemoveOld.new(config, DryRunReporter.new) }
+
+ it_behaves_like 'not saving JSON to file'
+
+ it 'should not delete requests' do
+ expect {
+ remove_old.process_repo_requests(repository)
+ }.not_to change { Request.all.size }
+ end
+ end
+ end
+end
diff --git a/spec/backup/remove_orphans_spec.rb b/spec/backup/remove_orphans_spec.rb
new file mode 100644
index 0000000..d754685
--- /dev/null
+++ b/spec/backup/remove_orphans_spec.rb
@@ -0,0 +1,157 @@
+$: << 'lib'
+require 'uri'
+require 'travis-backup'
+require 'models/build'
+require 'models/job'
+require 'models/organization'
+require 'models/user'
+require 'support/factories'
+require 'support/expected_files'
+require 'support/before_tests'
+require 'pry'
+require 'database_cleaner/active_record'
+
+describe Backup::RemoveOrphans do
+ before(:all) do
+ BeforeTests.new.run
+ end
+
+ let(:files_location) { "dump/tests" }
+ let!(:config) { Config.new(files_location: files_location, limit: 5) }
+ let!(:db_helper) { DbHelper.new(config) }
+ let!(:remove_orphans) { Backup::RemoveOrphans.new(config, DryRunReporter.new) }
+
+ describe 'run' do
+ before(:each) do
+ DatabaseCleaner.strategy = :truncation
+ DatabaseCleaner.clean
+ end
+
+ let!(:data) {
+ db_helper.do_without_triggers do
+ FactoryBot.create_list(:repository, 2)
+ FactoryBot.create_list(:build, 2)
+ FactoryBot.create_list(:job, 2)
+ FactoryBot.create_list(:branch, 2)
+ FactoryBot.create_list(:tag, 2)
+ FactoryBot.create_list(:commit, 2)
+ FactoryBot.create_list(:cron, 2)
+ FactoryBot.create_list(:pull_request, 2)
+ FactoryBot.create_list(:request, 2)
+ FactoryBot.create_list(:stage, 2)
+ FactoryBot.create_list(:repository_orphaned_on_current_build_id, 2)
+ FactoryBot.create_list(:repository_with_current_build_id, 2)
+ FactoryBot.create_list(:repository_orphaned_on_last_build_id, 2)
+ FactoryBot.create_list(:repository_with_last_build_id, 2)
+ FactoryBot.create_list(:build_orphaned_on_repository_id_with_mutually_related_repo, 2)
+ FactoryBot.create_list(:build_with_repository_id, 2)
+ FactoryBot.create_list(:build_orphaned_on_commit_id_with_mutually_related_repo, 2)
+ FactoryBot.create_list(:build_with_commit_id, 2)
+ FactoryBot.create_list(:build_orphaned_on_request_id_with_mutually_related_repo, 2)
+ FactoryBot.create_list(:build_with_request_id, 2)
+ FactoryBot.create_list(:build_orphaned_on_pull_request_id_with_mutually_related_repo, 2)
+ FactoryBot.create_list(:build_with_pull_request_id, 2)
+ FactoryBot.create_list(:build_orphaned_on_branch_id_with_mutually_related_repo, 2)
+ FactoryBot.create_list(:build_with_branch_id, 2)
+ FactoryBot.create_list(:build_orphaned_on_tag_id_with_mutually_related_repo, 2)
+ FactoryBot.create_list(:build_with_tag_id, 2)
+ FactoryBot.create_list(:job_orphaned_on_repository_id, 2)
+ FactoryBot.create_list(:job_with_repository_id, 2)
+ FactoryBot.create_list(:job_orphaned_on_commit_id, 2)
+ FactoryBot.create_list(:job_with_commit_id, 2)
+ FactoryBot.create_list(:job_orphaned_on_stage_id, 2)
+ FactoryBot.create_list(:job_with_stage_id, 2)
+ FactoryBot.create_list(:branch_orphaned_on_repository_id, 2)
+ FactoryBot.create_list(:branch_orphaned_on_last_build_id, 2)
+ FactoryBot.create_list(:branch_with_last_build_id, 2)
+ FactoryBot.create_list(:tag_orphaned_on_repository_id, 2)
+ FactoryBot.create_list(:tag_with_repository_id, 2)
+ FactoryBot.create_list(:tag_orphaned_on_last_build_id, 2)
+ FactoryBot.create_list(:tag_with_last_build_id, 2)
+ FactoryBot.create_list(:commit_orphaned_on_repository_id, 2)
+ FactoryBot.create_list(:commit_with_repository_id, 2)
+ FactoryBot.create_list(:commit_orphaned_on_branch_id, 2)
+ FactoryBot.create_list(:commit_with_branch_id, 2)
+ FactoryBot.create_list(:commit_orphaned_on_tag_id, 2)
+ FactoryBot.create_list(:commit_with_tag_id, 2)
+ FactoryBot.create_list(:cron_orphaned_on_branch_id, 2)
+ FactoryBot.create_list(:cron_with_branch_id, 2)
+ FactoryBot.create_list(:pull_request_orphaned_on_repository_id, 2)
+ FactoryBot.create_list(:pull_request_with_repository_id, 2)
+ FactoryBot.create_list(:request_orphaned_on_commit_id, 2)
+ FactoryBot.create_list(:request_with_commit_id, 2)
+ FactoryBot.create_list(:request_orphaned_on_pull_request_id, 2)
+ FactoryBot.create_list(:request_with_pull_request_id, 2)
+ FactoryBot.create_list(:request_orphaned_on_branch_id, 2)
+ FactoryBot.create_list(:request_with_branch_id, 2)
+ FactoryBot.create_list(:request_orphaned_on_tag_id, 2)
+ FactoryBot.create_list(:request_with_tag_id, 2)
+ FactoryBot.create_list(:stage_orphaned_on_build_id, 2)
+ FactoryBot.create_list(:stage_with_build_id, 2)
+ end
+ }
+ it 'removes orphaned repositories (with these dependent on orphaned builds)' do
+ expect { remove_orphans.run }.to change { Repository.all.size }.by -16
+ end
+
+ it 'removes orphaned builds' do
+ expect { remove_orphans.run }.to change { Build.all.size }.by -12
+ end
+
+ it 'removes orphaned jobs' do
+ expect { remove_orphans.run }.to change { Job.all.size }.by -6
+ end
+
+ it 'removes orphaned branches' do
+ expect { remove_orphans.run }.to change { Branch.all.size }.by -4
+ end
+
+ it 'removes orphaned tags' do
+ expect { remove_orphans.run }.to change { Tag.all.size }.by -4
+ end
+
+ it 'removes orphaned commits' do
+ expect { remove_orphans.run }.to change { Commit.all.size }.by -6
+ end
+
+ it 'removes orphaned crons' do
+ expect { remove_orphans.run }.to change { Cron.all.size }.by -2
+ end
+
+ it 'removes orphaned pull requests' do
+ expect { remove_orphans.run }.to change { PullRequest.all.size }.by -2
+ end
+
+ it 'removes orphaned requests' do
+ expect { remove_orphans.run }.to change { Request.all.size }.by -8
+ end
+
+ it 'removes orphaned stages' do
+ expect { remove_orphans.run }.to change { Stage.all.size }.by -2
+ end
+
+ context 'when dry run mode is on' do
+ let!(:config) { Config.new(files_location: files_location, limit: 5, dry_run: true) }
+ let!(:remove_orphans) { Backup::RemoveOrphans.new(config, DryRunReporter.new) }
+
+ before do
+ allow_any_instance_of(IO).to receive(:puts)
+ end
+
+ it 'prepares proper dry run report' do
+ remove_orphans.run
+ report = remove_orphans.dry_run_report
+ expect(report[:repositories].size).to eql 16
+ expect(report[:builds].size).to eql 12
+ expect(report[:jobs].size).to eql 6
+ expect(report[:branches].size).to eql 4
+ expect(report[:tags].size).to eql 4
+ expect(report[:commits].size).to eql 6
+ expect(report[:crons].size).to eql 2
+ expect(report[:pull_requests].size).to eql 2
+ expect(report[:requests].size).to eql 8
+ expect(report[:stages].size).to eql 2
+ end
+ end
+ end
+end
diff --git a/spec/backup_spec.rb b/spec/backup_spec.rb
index 31e440f..b7a49d7 100644
--- a/spec/backup_spec.rb
+++ b/spec/backup_spec.rb
@@ -1,196 +1,147 @@
$: << 'lib'
-require 'backup'
-require 'models/repository'
+require 'uri'
+require 'travis-backup'
+require 'models/build'
+require 'models/job'
+require 'models/organization'
+require 'models/user'
require 'support/factories'
+require 'support/expected_files'
+require 'support/before_tests'
+require 'support/utils'
require 'pry'
describe Backup do
- let!(:config) { Config.new }
- let!(:backup) { Backup.new }
- let(:datetime) { (config.delay + 1).months.ago.to_time.utc }
- let(:org_id) { rand(100000) }
- let(:com_id) { rand(100000) }
- let(:private_org_id) { rand(100000) }
- let(:private_com_id) { rand(100000) }
- let(:repository) {
- FactoryBot.create(
- :repository
- )
- }
- let(:private_repository) {
- FactoryBot.create(
- :repository,
- private: true
- )
- }
- let(:build_config) {
- FactoryBot.create(
- :build_config,
- repository_id: repository.id,
- key: '',
- org_id: org_id,
- com_id: com_id,
- config: ''
- )
- }
- let(:private_build_config) {
- FactoryBot.create(
- :build_config,
- repository_id: private_repository.id,
- key: '',
- org_id: private_org_id,
- com_id: private_com_id,
- config: ''
- )
- }
- let(:build) {
- FactoryBot.create(
- :build,
- created_at: datetime,
- updated_at: datetime,
- repository: repository,
- build_config: build_config
- )
- }
- let(:private_build) {
- FactoryBot.create(
- :build,
- created_at: datetime,
- updated_at: datetime,
- repository: private_repository,
- build_config: private_build_config
- )
- }
- let(:job_config) {
- FactoryBot.create(
- :job_config,
- repository_id: repository.id,
- key: '',
- org_id: org_id,
- com_id: com_id,
- config: ''
- )
- }
- let(:private_job_config) {
- FactoryBot.create(
- :job_config,
- repository_id: private_repository.id,
- key: '',
- org_id: private_org_id,
- com_id: private_com_id,
- config: ''
- )
- }
- let(:job) {
- FactoryBot.create(
- :job,
- created_at: datetime,
- updated_at: datetime,
- source_id: build.id,
- source_type: 'Build',
- repository: repository,
- job_config: job_config
- )
- }
- let(:private_job) {
- FactoryBot.create(
- :job,
- created_at: datetime,
- updated_at: datetime,
- source_id: private_build.id,
- source_type: 'Build',
- repository: private_repository,
- job_config: private_job_config
- )
- }
- let(:exported_object) {
- [[{"id"=>build.id,
- "repository_id"=>repository.id,
- "number"=>nil,
- "started_at"=>nil,
- "finished_at"=>nil,
- "created_at"=>datetime,
- "updated_at"=>datetime,
- "commit_id"=>nil,
- "request_id"=>nil,
- "state"=>nil,
- "duration"=>nil,
- "owner_id"=>nil,
- "owner_type"=>nil,
- "event_type"=>nil,
- "previous_state"=>nil,
- "pull_request_title"=>nil,
- "pull_request_number"=>nil,
- "branch"=>nil,
- "canceled_at"=>nil,
- "cached_matrix_ids"=>nil,
- "received_at"=>nil,
- "private"=>nil,
- "pull_request_id"=>nil,
- "branch_id"=>nil,
- "tag_id"=>nil,
- "sender_id"=>nil,
- "sender_type"=>nil,
- "org_id"=>nil,
- "com_id"=>nil,
- "config_id"=>build_config.id,
- "restarted_at"=>nil,
- "unique_number"=>nil,
- :build_config=>{"id"=>build_config.id, "repository_id"=>repository.id, "key"=>"", "org_id"=>org_id, "com_id"=>com_id, "config"=>""},
- :jobs=>
- [{"id"=>job.id,
- "repository_id"=>repository.id,
- "commit_id"=>nil,
- "source_id"=>build.id,
- "source_type"=>"Build",
- "queue"=>nil,
- "type"=>nil,
- "state"=>nil,
- "number"=>nil,
- "worker"=>nil,
- "started_at"=>nil,
- "finished_at"=>nil,
- "created_at"=>datetime,
- "updated_at"=>datetime,
- "tags"=>nil,
- "allow_failure"=>false,
- "owner_id"=>nil,
- "owner_type"=>nil,
- "result"=>nil,
- "queued_at"=>nil,
- "canceled_at"=>nil,
- "received_at"=>nil,
- "debug_options"=>nil,
- "private"=>nil,
- "job_state_id"=>nil,
- "stage_number"=>nil,
- "stage_id"=>nil,
- "org_id"=>nil,
- "com_id"=>nil,
- "config_id"=>job_config.id,
- "restarted_at"=>nil,
- "priority"=>nil,
- :job_config=>{"id"=>job_config.id, "repository_id"=>repository.id, "key"=>"", "org_id"=>org_id, "com_id"=>com_id, "config"=>""},
- :log_url=>"https://api.travis-ci.org/v3/job/#{job.id}/log.txt"}]}]]
- }
-
- before do
- build.jobs = [job]
- repository.builds = [build]
- private_build.jobs = [private_job]
- private_repository.builds = [private_build]
+ before(:all) do
+ BeforeTests.new.run
end
- it 'should prepare proper JSON export' do
- build_export = backup.process_repo(repository)
- build_export.first.first[:updated_at] = datetime
- build_export.first.first[:jobs].first[:updated_at] = datetime
- expect(build_export.to_json).to eq(exported_object.to_json)
- end
+ let(:files_location) { "dump/tests" }
+ let!(:backup) { Backup.new(files_location: files_location, limit: 5) }
+
+ describe 'run' do
+ after(:each) do
+ Organization.destroy_all
+ User.destroy_all
+ Repository.destroy_all
+ Build.destroy_all
+ Job.destroy_all
+ Log.destroy_all
+ Request.destroy_all
+ end
+
+ let!(:unassigned_repositories) {
+ FactoryBot.create_list(:repository_with_requests, 3)
+ }
+ let!(:user1) {
+ FactoryBot.create(:user_with_repos)
+ }
+ let!(:user2) {
+ FactoryBot.create(:user_with_repos)
+ }
+ let!(:organization1) {
+ FactoryBot.create(:organization_with_repos)
+ }
+ let!(:organization2) {
+ FactoryBot.create(:organization_with_repos)
+ }
+
+ context 'when no arguments are given' do
+ it 'processes every repository' do
+ Repository.all.each do |repository|
+ expect_any_instance_of(Backup::RemoveOld).to receive(:process_repo_builds).once.with(repository)
+ end
+ backup.run
+ end
+ end
+
+ context 'when user_id is given' do
+ it 'processes only the repositories of the given user' do
+ user_repos = Repository.where('owner_id = ? and owner_type = ?', user1.id, 'User')
+
+ expect_method_calls_on(
+ Backup::RemoveOld,
+ :process_repo_builds,
+ user_repos,
+ allow_instances: true,
+ arguments_to_check: :first
+ ) do
+ backup.run(user_id: user1.id)
+ end
+ end
+ end
+
+ context 'when org_id is given' do
+ it 'processes only the repositories of the given organization' do
+ org_repos = Repository.where('owner_id = ? and owner_type = ?', organization1.id, 'Organization')
+
+ expect_method_calls_on(
+ Backup::RemoveOld,
+ :process_repo_builds,
+ org_repos,
+ allow_instances: true,
+ arguments_to_check: :first
+ ) do
+ backup.run(org_id: organization1.id)
+ end
+ end
+ end
+
+ context 'when repo_id is given' do
+ it 'processes only the repository with the given id' do
+ repo = Repository.first
+ expect_any_instance_of(Backup::RemoveOld).to receive(:process_repo_builds).once.with(repo)
+ backup.run(repo_id: repo.id)
+ end
+ end
+
+ context 'when move logs mode is on' do
+ let!(:backup) { Backup.new(files_location: files_location, limit: 5, move_logs: true) }
+
+ it 'does not process repositories' do
+ expect(backup).not_to receive(:process_repo)
+ backup.run
+ end
+
+ it 'moves logs' do
+ expect_any_instance_of(Backup::MoveLogs).to receive(:run).once
+ backup.run
+ end
+ end
+
+ context 'when remove orphans mode is on' do
+ let!(:backup) { Backup.new(files_location: files_location, limit: 5, remove_orphans: true) }
+
+ it 'does not process repositories' do
+ expect(backup).not_to receive(:process_repo)
+ backup.run
+ end
+
+ it 'removes orphans' do
+ expect_any_instance_of(Backup::RemoveOrphans).to receive(:run).once
+ backup.run
+ end
+ end
+
+ context 'when dry run mode is on' do
+ let!(:backup) { Backup.new(files_location: files_location, limit: 10, dry_run: true, threshold: 0) }
+
+ before do
+ allow_any_instance_of(IO).to receive(:puts)
+ end
+
+ it 'prepares proper dry run report' do
+ backup.run
+ expect(backup.dry_run_report[:builds].size).to eql 24
+ expect(backup.dry_run_report[:jobs].size).to eql 48
+ expect(backup.dry_run_report[:logs].size).to eql 96
+ expect(backup.dry_run_report[:requests].size).to eql 6
+ end
- it 'for private repository should prepare proper JSON export with token for log urls' do
- build_export = backup.process_repo(private_repository)
- log_url = build_export.first.first[:jobs].first[:log_url]
- expect(log_url).to include('?log.token=')
+ it 'prints dry run report' do
+ expect_any_instance_of(DryRunReporter).to receive(:print_report).once
+ backup.run
+ end
+ end
end
end
diff --git a/spec/support/before_tests.rb b/spec/support/before_tests.rb
new file mode 100644
index 0000000..d3fb128
--- /dev/null
+++ b/spec/support/before_tests.rb
@@ -0,0 +1,11 @@
+class BeforeTests
+ def run
+ config = Config.new
+ system("psql '#{config.database_url}' -f db/schema.sql > /dev/null 2> /dev/null")
+ if config.destination_db_url
+ system("psql '#{config.destination_db_url}' -f db/schema.sql > /dev/null 2> /dev/null")
+ end
+ end
+end
+
+ARGV = ['-t', '6']
diff --git a/spec/support/expected_files.rb b/spec/support/expected_files.rb
new file mode 100644
index 0000000..74c82cb
--- /dev/null
+++ b/spec/support/expected_files.rb
@@ -0,0 +1,228 @@
+class ExpectedFiles
+ def initialize(repository, datetime)
+ @repository = repository
+ @datetime = datetime
+ end
+
+ def builds_json
+ [
+ {
+ "id": @repository.builds.first.id,
+ "repository_id": @repository.id,
+ "number": nil,
+ "started_at": nil,
+ "finished_at": nil,
+ "created_at": @datetime,
+ "updated_at": @datetime,
+ "config": nil,
+ "commit_id": nil,
+ "request_id": nil,
+ "state": nil,
+ "duration": nil,
+ "owner_id": nil,
+ "owner_type": nil,
+ "event_type": nil,
+ "previous_state": nil,
+ "pull_request_title": nil,
+ "pull_request_number": nil,
+ "branch": nil,
+ "canceled_at": nil,
+ "cached_matrix_ids": nil,
+ "received_at": nil,
+ "private": nil,
+ "pull_request_id": nil,
+ "branch_id": nil,
+ "tag_id": nil,
+ "sender_id": nil,
+ "sender_type": nil
+ },
+ {
+ "id": @repository.builds.second.id,
+ "repository_id": @repository.id,
+ "number": nil,
+ "started_at": nil,
+ "finished_at": nil,
+ "created_at": @datetime,
+ "updated_at": @datetime,
+ "config": nil,
+ "commit_id": nil,
+ "request_id": nil,
+ "state": nil,
+ "duration": nil,
+ "owner_id": nil,
+ "owner_type": nil,
+ "event_type": nil,
+ "previous_state": nil,
+ "pull_request_title": nil,
+ "pull_request_number": nil,
+ "branch": nil,
+ "canceled_at": nil,
+ "cached_matrix_ids": nil,
+ "received_at": nil,
+ "private": nil,
+ "pull_request_id": nil,
+ "branch_id": nil,
+ "tag_id": nil,
+ "sender_id": nil,
+ "sender_type": nil,
+ }
+ ]
+ end
+
+ def jobs_json(build)
+ [
+ {
+ "id": build.jobs.first.id,
+ "repository_id": @repository.id,
+ "commit_id": nil,
+ "source_id": build.id,
+ "source_type": "Build",
+ "queue": nil,
+ "type": nil,
+ "state": nil,
+ "number": nil,
+ "config": nil,
+ "worker": nil,
+ "started_at": nil,
+ "finished_at": nil,
+ "created_at": @datetime,
+ "updated_at": @datetime,
+ "tags": nil,
+ "allow_failure": false,
+ "owner_id": nil,
+ "owner_type": nil,
+ "result": nil,
+ "queued_at": nil,
+ "canceled_at": nil,
+ "received_at": nil,
+ "debug_options": nil,
+ "private": nil,
+ "stage_number": nil,
+ "stage_id": nil,
+ },
+ {
+ "id": build.jobs.second.id,
+ "repository_id": @repository.id,
+ "commit_id": nil,
+ "source_id": build.id,
+ "source_type": "Build",
+ "queue": nil,
+ "type": nil,
+ "state": nil,
+ "number": nil,
+ "config": nil,
+ "worker": nil,
+ "started_at": nil,
+ "finished_at": nil,
+ "created_at": @datetime,
+ "updated_at": @datetime,
+ "tags": nil,
+ "allow_failure": false,
+ "owner_id": nil,
+ "owner_type": nil,
+ "result": nil,
+ "queued_at": nil,
+ "canceled_at": nil,
+ "received_at": nil,
+ "debug_options": nil,
+ "private": nil,
+ "stage_number": nil,
+ "stage_id": nil,
+ }
+ ]
+ end
+
+ def logs_json(job)
+ [
+ {
+ "id": job.logs.first.id,
+ "job_id": job.id,
+ "content": "some log content",
+ "removed_by": nil,
+ "created_at": @datetime,
+ "updated_at": @datetime,
+ "aggregated_at": nil,
+ "archived_at": nil,
+ "purged_at": nil,
+ "removed_at": nil,
+ "archiving": false,
+ "archive_verified": true
+ },
+ {
+ "id": job.logs.second.id,
+ "job_id": job.id,
+ "content": "some log content",
+ "removed_by": nil,
+ "created_at": @datetime,
+ "updated_at": @datetime,
+ "aggregated_at": nil,
+ "archived_at": nil,
+ "purged_at": nil,
+ "removed_at": nil,
+ "archiving": false,
+ "archive_verified": true
+ }
+ ]
+ end
+
+ def requests_json
+ [
+ {
+ "id": @repository.requests.first.id,
+ "repository_id": @repository.id,
+ "commit_id": nil,
+ "state": nil,
+ "source": nil,
+ "payload": nil,
+ "token": nil,
+ "config": nil,
+ "started_at": nil,
+ "finished_at": nil,
+ "created_at": @datetime,
+ "updated_at": @datetime,
+ "event_type": nil,
+ "comments_url": nil,
+ "base_commit": nil,
+ "head_commit": nil,
+ "owner_id": nil,
+ "owner_type": nil,
+ "result": nil,
+ "message": nil,
+ "private": nil,
+ "pull_request_id": nil,
+ "branch_id": nil,
+ "tag_id": nil,
+ "sender_id": nil,
+ "sender_type": nil
+ },
+ {
+ "id": @repository.requests.second.id,
+ "repository_id": @repository.id,
+ "commit_id": nil,
+ "state": nil,
+ "source": nil,
+ "payload": nil,
+ "token": nil,
+ "config": nil,
+ "started_at": nil,
+ "finished_at": nil,
+ "created_at": @datetime,
+ "updated_at": @datetime,
+ "event_type": nil,
+ "comments_url": nil,
+ "base_commit": nil,
+ "head_commit": nil,
+ "owner_id": nil,
+ "owner_type": nil,
+ "result": nil,
+ "message": nil,
+ "private": nil,
+ "pull_request_id": nil,
+ "branch_id": nil,
+ "tag_id": nil,
+ "sender_id": nil,
+ "sender_type": nil
+ }
+ ]
+ end
+end
diff --git a/spec/support/factories.rb b/spec/support/factories.rb
index ad1e927..cf345f6 100644
--- a/spec/support/factories.rb
+++ b/spec/support/factories.rb
@@ -3,9 +3,377 @@
require 'factory_bot'
FactoryBot.define do
- factory :repository, class: Repository
- factory :build, class: Build
- factory :build_config, class: BuildConfig
- factory :job, class: Job
- factory :job_config, class: JobConfig
+ factory :organization do
+ factory :organization_with_repos do
+ transient do
+ repos_count { 3 }
+ end
+ after(:create) do |organization, evaluator|
+ create_list(
+ :repository_with_builds_jobs_and_logs,
+ evaluator.repos_count,
+ owner_id: organization.id,
+ owner_type: 'Organization'
+ )
+ end
+ end
+ end
+
+ factory :user do
+ factory :user_with_repos do
+ transient do
+ repos_count { 3 }
+ end
+ after(:create) do |user, evaluator|
+ create_list(
+ :repository_with_builds_jobs_and_logs,
+ evaluator.repos_count,
+ owner_id: user.id,
+ owner_type: 'User'
+ )
+ end
+ end
+ end
+
+ factory :repository do
+ factory :repository_with_builds_jobs_and_logs do
+ transient do
+ builds_count { 2 }
+ end
+ after(:create) do |repository, evaluator|
+ create_list(
+ :build_with_jobs_and_logs,
+ evaluator.builds_count,
+ repository: repository,
+ created_at: repository.created_at,
+ updated_at: repository.updated_at
+ )
+ end
+ end
+
+ factory :repository_with_builds do
+ transient do
+ builds_count { 2 }
+ end
+ after(:create) do |repository, evaluator|
+ create_list(
+ :build,
+ evaluator.builds_count,
+ repository: repository,
+ created_at: repository.created_at,
+ updated_at: repository.updated_at
+ )
+ end
+ end
+
+ factory :repository_orphaned_on_current_build_id do
+ current_build_id { 2_000_000_000 }
+ end
+
+ factory :repository_with_current_build_id do
+ current_build_id { Build.first.id }
+ end
+
+ factory :repository_orphaned_on_last_build_id do
+ last_build_id { 2_000_000_000 }
+ end
+
+ factory :repository_with_last_build_id do
+ last_build_id { Build.first.id }
+ end
+
+ factory :repository_with_requests do
+ transient do
+ requests_count { 2 }
+ end
+ after(:create) do |repository, evaluator|
+ create_list(
+ :request,
+ evaluator.requests_count,
+ repository: repository,
+ created_at: repository.created_at,
+ updated_at: repository.updated_at
+ )
+ end
+ end
+ end
+
+ factory :build do
+ factory :build_with_jobs_and_logs do
+ transient do
+ jobs_count { 2 }
+ end
+ after(:create) do |build, evaluator|
+ create_list(
+ :job_with_logs,
+ evaluator.jobs_count,
+ repository: build.repository,
+ source_type: 'Build',
+ source_id: build.id,
+ created_at: build.created_at,
+ updated_at: build.updated_at
+ )
+ end
+ end
+
+ factory :build_with_repo do
+ after(:create) do |build|
+ create(
+ :repository,
+ current_build_id: build.id,
+ created_at: build.created_at,
+ updated_at: build.updated_at
+ )
+ end
+
+ factory :build_orphaned_on_repository_id_with_mutually_related_repo do
+ repository_id { 2_000_000_000 }
+ end
+ end
+
+ factory :build_with_repository_id do
+ repository_id { Repository.first.id }
+ end
+
+ factory :build_with_mutually_related_repo do
+ after(:create) do |build|
+ repo = create(
+ :repository,
+ current_build_id: build.id,
+ created_at: build.created_at,
+ updated_at: build.updated_at
+ )
+ build.repository_id = repo.id
+ build.save!
+ end
+
+ factory :build_orphaned_on_commit_id_with_mutually_related_repo do
+ commit_id { 2_000_000_000 }
+ end
+
+ factory :build_orphaned_on_request_id_with_mutually_related_repo do
+ request_id { 2_000_000_000 }
+ end
+
+ factory :build_orphaned_on_pull_request_id_with_mutually_related_repo do
+ pull_request_id { 2_000_000_000 }
+ end
+
+ factory :build_orphaned_on_branch_id_with_mutually_related_repo do
+ branch_id { 2_000_000_000 }
+ end
+
+ factory :build_orphaned_on_tag_id_with_mutually_related_repo do
+ tag_id { 2_000_000_000 }
+ end
+ end
+
+ factory :build_with_commit_id do
+ commit_id { Commit.first.id }
+ end
+
+ factory :build_with_request_id do
+ request_id { Request.first.id }
+ end
+
+ factory :build_with_pull_request_id do
+ pull_request_id { PullRequest.first.id }
+ end
+
+ factory :build_with_branch_id do
+ branch_id { Branch.first.id }
+ end
+
+ factory :build_with_tag_id do
+ tag_id { Tag.first.id }
+ end
+ end
+
+ factory :job do
+ factory :job_with_logs do
+ transient do
+ logs_count { 2 }
+ end
+ after(:create) do |job, evaluator|
+ create_list(
+ :log,
+ evaluator.logs_count,
+ job_id: job.id,
+ content: 'some log content',
+ removed_by: nil,
+ archiving: false,
+ archive_verified: true,
+ created_at: job.created_at,
+ updated_at: job.updated_at
+ )
+ end
+ end
+
+ factory :job_orphaned_on_repository_id do
+ repository_id { 2_000_000_000 }
+ end
+
+ factory :job_with_repository_id do
+ repository_id { Repository.first.id }
+ end
+
+ factory :job_orphaned_on_commit_id do
+ commit_id { 2_000_000_000 }
+ end
+
+ factory :job_with_commit_id do
+ commit_id { Commit.first.id }
+ end
+
+ factory :job_orphaned_on_stage_id do
+ stage_id { 2_000_000_000 }
+ end
+
+ factory :job_with_stage_id do
+ stage_id { Stage.first.id }
+ end
+ end
+
+ factory :log do
+ job_id { 1 }
+ content { 'some log content' }
+ removed_by { 1 }
+ archiving { false }
+ archive_verified { true }
+ end
+
+ factory :branch do
+ name { "branch_#{Time.now.to_f}" }
+ repository_id { 1 }
+ factory :branch_orphaned_on_repository_id do
+ repository_id { 2_000_000_000 }
+ end
+
+ factory :branch_orphaned_on_last_build_id do
+ last_build_id { 2_000_000_000 }
+ end
+
+ factory :branch_with_last_build_id do
+ last_build_id { Build.first.id }
+ end
+ end
+
+ factory :tag do
+ factory :tag_orphaned_on_repository_id do
+ repository_id { 2_000_000_000 }
+ end
+
+ factory :tag_with_repository_id do
+ repository_id { Repository.first.id }
+ end
+
+ factory :tag_orphaned_on_last_build_id do
+ last_build_id { 2_000_000_000 }
+ end
+
+ factory :tag_with_last_build_id do
+ last_build_id { Build.first.id }
+ end
+ end
+
+ factory :commit do
+ factory :commit_orphaned_on_repository_id do
+ repository_id { 2_000_000_000 }
+ end
+
+ factory :commit_with_repository_id do
+ repository_id { Repository.first.id }
+ end
+
+ factory :commit_orphaned_on_branch_id do
+ branch_id { 2_000_000_000 }
+ end
+
+ factory :commit_with_branch_id do
+ branch_id { Branch.first.id }
+ end
+
+ factory :commit_orphaned_on_tag_id do
+ tag_id { 2_000_000_000 }
+ end
+
+ factory :commit_with_tag_id do
+ tag_id { Tag.first.id }
+ end
+ end
+
+ factory :cron do
+ interval { 'test' }
+ factory :cron_orphaned_on_branch_id do
+ branch_id { 2_000_000_000 }
+ end
+
+ factory :cron_with_branch_id do
+ branch_id { Branch.first.id }
+ end
+ end
+
+ factory :pull_request do
+ factory :pull_request_orphaned_on_repository_id do
+ repository_id { 2_000_000_000 }
+ end
+
+ factory :pull_request_with_repository_id do
+ repository_id { Repository.first.id }
+ end
+ end
+
+ factory :ssl_key do
+ factory :ssl_key_orphaned_on_repository_id do
+ repository_id { 2_000_000_000 }
+ end
+
+ factory :ssl_key_with_repository_id do
+ repository_id { Repository.first.id }
+ end
+ end
+
+ factory :request do
+ factory :request_orphaned_on_commit_id do
+ commit_id { 2_000_000_000 }
+ end
+
+ factory :request_with_commit_id do
+ commit_id { Commit.first.id }
+ end
+
+ factory :request_orphaned_on_pull_request_id do
+ pull_request_id { 2_000_000_000 }
+ end
+
+ factory :request_with_pull_request_id do
+ pull_request_id { PullRequest.first.id }
+ end
+
+ factory :request_orphaned_on_branch_id do
+ branch_id { 2_000_000_000 }
+ end
+
+ factory :request_with_branch_id do
+ branch_id { Branch.first.id }
+ end
+
+ factory :request_orphaned_on_tag_id do
+ tag_id { 2_000_000_000 }
+ end
+
+ factory :request_with_tag_id do
+ tag_id { Tag.first.id }
+ end
+ end
+
+ factory :stage do
+ factory :stage_orphaned_on_build_id do
+ build_id { 2_000_000_000 }
+ end
+
+ factory :stage_with_build_id do
+ build_id { Build.first.id }
+ end
+ end
end
diff --git a/spec/support/utils.rb b/spec/support/utils.rb
new file mode 100644
index 0000000..6958d34
--- /dev/null
+++ b/spec/support/utils.rb
@@ -0,0 +1,30 @@
+def expect_method_calls_on(cl, method, call_with, options)
+ match_mode = options[:mode] || :including
+ allow_instances = options[:allow_instances] || false
+ arguments_to_check = options[:arguments_to_check] || :all
+
+ calls_args = []
+
+ allowed = allow_instances ? allow_any_instance_of(cl) : allow(cl)
+
+ allowed.to receive(method).and_wrap_original do |method, *args, &block|
+ if arguments_to_check == :all
+ calls_args.push(args)
+ else
+ calls_args.push(args.send(arguments_to_check)) # = args.first, args.second, args.third etc.
+ end
+ method.call(*args, &block)
+ end
+
+ yield
+
+ case match_mode
+ when :including
+ call_with.each do |args|
+ expect(calls_args).to include(args)
+ end
+ when :match
+ expect(call_with).to match_array(calls_args)
+ end
+ end
+
\ No newline at end of file
diff --git a/test/application_system_test_case.rb b/test/application_system_test_case.rb
deleted file mode 100644
index d19212a..0000000
--- a/test/application_system_test_case.rb
+++ /dev/null
@@ -1,5 +0,0 @@
-require "test_helper"
-
-class ApplicationSystemTestCase < ActionDispatch::SystemTestCase
- driven_by :selenium, using: :chrome, screen_size: [1400, 1400]
-end
diff --git a/test/channels/application_cable/connection_test.rb b/test/channels/application_cable/connection_test.rb
deleted file mode 100644
index 800405f..0000000
--- a/test/channels/application_cable/connection_test.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-require "test_helper"
-
-class ApplicationCable::ConnectionTest < ActionCable::Connection::TestCase
- # test "connects with cookies" do
- # cookies.signed[:user_id] = 42
- #
- # connect
- #
- # assert_equal connection.user_id, "42"
- # end
-end
diff --git a/test/controllers/.keep b/test/controllers/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/test/fixtures/files/.keep b/test/fixtures/files/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/test/helpers/.keep b/test/helpers/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/test/integration/.keep b/test/integration/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/test/mailers/.keep b/test/mailers/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/test/models/.keep b/test/models/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/test/system/.keep b/test/system/.keep
deleted file mode 100644
index e69de29..0000000
diff --git a/test/test_helper.rb b/test/test_helper.rb
deleted file mode 100644
index a4a491d..0000000
--- a/test/test_helper.rb
+++ /dev/null
@@ -1,13 +0,0 @@
-ENV['RAILS_ENV'] = 'test'
-require_relative "../config/environment"
-require "rails/test_help"
-
-class ActiveSupport::TestCase
- # Run tests in parallel with specified workers
- parallelize(workers: :number_of_processors)
-
- # Setup all fixtures in test/fixtures/*.yml for all tests in alphabetical order.
- fixtures :all
-
- # Add more helper methods to be used by all tests here...
-end
diff --git a/travis-backup.gemspec b/travis-backup.gemspec
new file mode 100644
index 0000000..77927fd
--- /dev/null
+++ b/travis-backup.gemspec
@@ -0,0 +1,30 @@
+Gem::Specification.new do |s|
+ s.name = 'travis-backup'
+ s.version = '0.2.1'
+ s.summary = 'Travis CI backup tool'
+ s.authors = ['Karol Selak']
+ s.required_ruby_version = Gem::Requirement.new(">= 2.3.0")
+ s.files = Dir.chdir(File.expand_path('..', __FILE__)) do
+ `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
+ end
+ s.executables = Dir.glob('bin/*').map { |f| File.basename(f) }
+ s.require_paths = ["lib"]
+ s.license = 'Beerware'
+
+ s.add_dependency 'activerecord'
+ s.add_dependency 'pg'
+ s.add_dependency 'pry'
+ s.add_dependency 'rails'
+
+ s.add_dependency 'bootsnap'
+ s.add_dependency 'tzinfo-data'
+
+ s.add_development_dependency 'brakeman'
+ s.add_development_dependency 'byebug'
+ s.add_development_dependency 'factory_bot'
+ s.add_development_dependency 'rspec-rails'
+ s.add_development_dependency 'listen'
+ s.add_development_dependency 'rubocop', '~> 0.75.1'
+ s.add_development_dependency 'rubocop-rspec'
+ s.add_development_dependency 'database_cleaner-active_record'
+end
\ No newline at end of file