Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
@skava/bs / src / scripts / shell / webhooks.ts
Size: Mime:
// javascript-infra-build-antfile-in
//
// overview
//
// https://jira.skava.net/confluence/display/ux/Pipeline+TODO
// https://jira.skava.net/confluence/display/ux/Jenkins+-+resources+and+scripts
//
// `/root/skava/build`
const { shell, resolve, resolveToRoot, ENVIRONMENT } = require('./params')

// @toppercase
console.log(ENVIRONMENT)

// @TODO enum
const {
  PULL_REQUEST_SRC,
  PULL_REQUEST_DEST,
  CURRENT_BRANCH_NAME,
  BUILD_NUMBER,
  WEBHOOK_TYPE_HOOK,
} = ENVIRONMENT

// ------------ DEFINITION FOR MEANING BASED ON ENV & PARAMS -----------

// @example: pullrequest:push
//             ^          ^
//           type        event
const [WEBHOOK_HOOK_TYPE, WEBHOOK_EVENT_NAME] = WEBHOOK_TYPE_HOOK.split(':')

// only when it is a pull request
// hook type
const IS_TYPE_PR = WEBHOOK_HOOK_TYPE === 'pullrequest'
// event
const IS_PUSH = WEBHOOK_HOOK_TYPE === 'push'
const IS_CREATED = WEBHOOK_HOOK_TYPE === 'created'
const IS_DEPLOYABLE_EVENT = IS_PUSH && IS_CREATED
const IS_SHUTDOWN_EVENT =
  WEBHOOK_EVENT_NAME === 'merge' || WEBHOOK_EVENT_NAME === 'decline'

// * -> dev
const IS_FEATURE_TO_DEV =
  PULL_REQUEST_DEST === 'dev' && PULL_REQUEST_SRC !== undefined

// we can do something with our main build pipeline
const IS_DEV_TO_MASTER =
  PULL_REQUEST_DEST === 'master' && PULL_REQUEST_SRC === 'dev'

// we can handle this and make it more optimized for jenkins, faster, cheaper, easier
const IS_DEV_TO_MASTER_DEPLOYABLE = IS_DEV_TO_MASTER && IS_DEPLOYABLE_EVENT

const IS_DEV_TO_MASTER_UPDATE =
  PULL_REQUEST_DEST === 'master' &&
  PULL_REQUEST_SRC === 'dev' &&
  WEBHOOK_EVENT_NAME === 'push'

const IS_DEV_TO_MASTER_SHUTDOWN = IS_DEV_TO_MASTER && IS_SHUTDOWN_EVENT

// ---------- LOGIC -------

// @template
// <JENKINS_BULD_NUMBER>.<GIT_BRANCH_NAME_TO>.<GIT_BRANCH_NAME_FROM>
//
// @example template
// 31.dev.314-feature-name-here.taz
//
// @example flow - which I will handle in that js file
//
// @NOTE create-or-delete-then-replace strategy
// 31.dev.314-feature-name-here.tar (created from a pr:created)
// 31.dev.314-feature-name-here.tar (created from a pr:push)
//
// delete 31.dev.314-feature-name-here.tar (triggered from a pr.merge or pr.decline)
// then the pr merge would run a separate flow from a fresh git clone/pull

// .......

// http://34.226.131.221:9090/jenkins/job/canada/ws/

// named configs
// const STAGES = [
//   'DEV_QA',
//   'LEVEL0',
//   'LEVEL1',
//   'EXPERT_QA',
//   'PRE_STAGING',
//   'STAGING',
//   'PROD',
// ]
// const HOOKS = {
//   EDITOR_TO_FEATURE: 'PREPUSH',
//   FEATURE_TO_DEV_PR: 'DEV_QA',
//   FEATURE_TO_DEV_MERGE: 'LEVEL1',
//   DEV_TO_MASTER_PR: 'EXPERT_QA',
//   DEV_TO_MASTER_MERGE: 'PRE_STAGING',
//   PRE_STAGING_TO_STAGING: 1,
//   STAGING_TO_PRE_PROD: 1,
//   PRE_PROD_TO_PROD: 1,
// }

//
// 1. webhook
// 2. pull github code
// 3. run JS script (to handle logic with pipeline)
// 4. this will BUILD the signed nodejs bundle - single file ***
//    - [ ] how do we move the server
// 5. each pull request should be stateless
// 6. artifacts with sharing dependencies
//   - (artifact, each container separated by port and how they share, developer 1 & developer 2
//   - unit tests in build - pre deploy
//   - separate the deploy scripts
// Advanced Performance Build
// scripts to install
// SEPARATE server is built with custom node in order to do performance testing installation instructions here https://gist.github.com/kevincennis/0cd2138c78a07412ef21
// installation scripts written
// installation scripts integrated

// tar -cvf skreact_31.tar dist
// tar -x
// - container
// - plugin config (ask my )
// - consistent single powerful interface to use a single
//
// - copy tar file into container
// - unzip into a folder that will be unique
// - ensure we only have 1 script running the qa server PER BRANCH NAME - NOT ONE
//
// ec2 run-instances --image-id @{imageid} --count 1 --instance-type m3.medium --placement AvailabilityZone=us-east-1d --key-name ecomm_fdc-docker --security-group-ids sg-600e5110 --subnet-id subnet-eebf70c2 --associate-public-ip-address ${aws.profile}