Browse Source

Merge pull request #2581 from ahhda/refactor-devops

DevOps: Refactor Pulumi code stacks
Mokhtar Naamani 3 years ago
parent
commit
8b1f4a5df3

+ 3 - 0
devops/infrastructure/node-network/Pulumi.yaml

@@ -19,3 +19,6 @@ template:
     isLoadBalancerReady:
       description: Whether the load balancer service is ready and has been assigned an IP
       default: false
+    nodeImage:
+      description: Docker image with tag to be used as validator and RPC nodes
+      default: 'joystream/node:latest'

+ 2 - 1
devops/infrastructure/node-network/README.md

@@ -38,7 +38,8 @@ After cloning this repo, from this working directory, run these commands:
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext numberOfValidators=2 --plaintext isMinikube=true --plaintext networkSuffix=8122
+    --plaintext numberOfValidators=2 --plaintext isMinikube=true --plaintext networkSuffix=8122 \
+    --plaintext nodeImage=joystream/node:latest
    ```
 
    If you want to build the stack on AWS set the `isMinikube` config to `false`

+ 17 - 6
devops/infrastructure/node-network/index.ts

@@ -3,7 +3,7 @@ import * as eks from '@pulumi/eks'
 import * as pulumi from '@pulumi/pulumi'
 import * as k8s from '@pulumi/kubernetes'
 import { configMapFromFile } from './configMap'
-import { CaddyServiceDeployment } from './caddy'
+import { CaddyServiceDeployment } from 'pulumi-common'
 import { getSubkeyContainers } from './utils'
 import { ValidatorServiceDeployment } from './validator'
 import { NFSServiceDeployment } from './nfsVolume'
@@ -56,6 +56,7 @@ const networkSuffix = config.get('networkSuffix') || '8129'
 const numberOfValidators = config.getNumber('numberOfValidators') || 1
 const chainDataPath = '/chain-data'
 const chainSpecPath = `${chainDataPath}/chainspec-raw.json`
+const nodeImage = config.get('nodeImage') || 'joystream/node:latest'
 
 const subkeyContainers = getSubkeyContainers(numberOfValidators, chainDataPath)
 let pvcClaimName: pulumi.Output<any>
@@ -126,7 +127,7 @@ const chainDataPrepareJob = new k8s.batch.v1.Job(
             ...subkeyContainers,
             {
               name: 'builder-node',
-              image: 'joystream/node:latest',
+              image: nodeImage,
               command: ['/bin/sh', '-c'],
               args: [
                 `/joystream/chain-spec-builder generate -a ${numberOfValidators} \
@@ -167,7 +168,7 @@ const chainDataPrepareJob = new k8s.batch.v1.Job(
             },
             {
               name: 'raw-chain-spec',
-              image: 'joystream/node:latest',
+              image: nodeImage,
               command: ['/bin/sh', '-c'],
               args: [`/joystream/node build-spec --chain ${chainDataPath}/chainspec.json --raw > ${chainSpecPath}`],
               volumeMounts: [
@@ -206,7 +207,7 @@ const validators = []
 for (let i = 1; i <= numberOfValidators; i++) {
   const validator = new ValidatorServiceDeployment(
     `node-${i}`,
-    { namespace: namespaceName, index: i, chainSpecPath, dataPath: chainDataPath, pvc: pvcClaimName },
+    { namespace: namespaceName, index: i, chainSpecPath, dataPath: chainDataPath, pvc: pvcClaimName, nodeImage },
     { ...resourceOptions, dependsOn: chainDataPrepareJob }
   )
   validators.push(validator)
@@ -231,7 +232,7 @@ const deployment = new k8s.apps.v1.Deployment(
           containers: [
             {
               name: 'rpc-node',
-              image: 'joystream/node:latest',
+              image: nodeImage,
               ports: [
                 { name: 'rpc-9944', containerPort: 9944 },
                 { name: 'rpc-9933', containerPort: 9933 },
@@ -302,9 +303,19 @@ const service = new k8s.core.v1.Service(
 export const serviceName = service.metadata.name
 
 const lbReady = config.get('isLoadBalancerReady') === 'true'
+
+const caddyEndpoints = [
+  `/ws-rpc {
+  reverse_proxy node-network:9944
+}`,
+  `/http-rpc {
+  reverse_proxy node-network:9933
+}`,
+]
+
 const caddy = new CaddyServiceDeployment(
   'caddy-proxy',
-  { lbReady, namespaceName: namespaceName, isMinikube },
+  { lbReady, namespaceName: namespaceName, isMinikube, caddyEndpoints },
   resourceOptions
 )
 

+ 2 - 1
devops/infrastructure/node-network/package.json

@@ -9,6 +9,7 @@
     "@pulumi/eks": "^0.31.0",
     "@pulumi/kubernetes": "^3.0.0",
     "@pulumi/pulumi": "^3.0.0",
-    "@pulumi/docker": "^3.0.0"
+    "@pulumi/docker": "^3.0.0",
+    "pulumi-common": "file:../pulumi-common"
   }
 }

+ 2 - 1
devops/infrastructure/node-network/validator.ts

@@ -17,7 +17,7 @@ export class ValidatorServiceDeployment extends pulumi.ComponentResource {
     const labels = { app: name }
     const container: k8stypes.core.v1.Container = {
       name: `joystream-node-${args.index}`,
-      image: 'joystream/node:latest',
+      image: args.nodeImage,
       args: [
         '--chain',
         args.chainSpecPath,
@@ -90,5 +90,6 @@ export interface ServiceDeploymentArgs {
   index: number
   chainSpecPath: string
   dataPath: string
+  nodeImage: string
   pvc: pulumi.OutputInstance<any>
 }

+ 7 - 8
devops/infrastructure/node-network/caddy.ts → devops/infrastructure/pulumi-common/caddy.ts

@@ -14,7 +14,7 @@ export class CaddyServiceDeployment extends pulumi.ComponentResource {
   public readonly secondaryEndpoint?: pulumi.Output<string>
 
   constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
-    super('k8sjs:service:ServiceDeployment', name, {}, opts)
+    super('caddy:service:CaddyServiceDeployment', name, {}, opts)
 
     const labels = { app: name }
     let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
@@ -58,14 +58,11 @@ export class CaddyServiceDeployment extends pulumi.ComponentResource {
       })
 
       function getProxyString(ipAddress: pulumi.Output<string>) {
-        return pulumi.interpolate`${ipAddress}.nip.io/ws-rpc {
-          reverse_proxy node-network:9944
+        let result: pulumi.Output<string> = pulumi.interpolate``
+        for (const endpoint of args.caddyEndpoints) {
+          result = pulumi.interpolate`${ipAddress}.nip.io${endpoint}\n${result}`
         }
-
-        ${ipAddress}.nip.io/http-rpc {
-          reverse_proxy node-network:9933
-        }
-        `
+        return result
       }
 
       caddyConfig = pulumi.interpolate`${getProxyString(lbIps[0].address)}
@@ -130,6 +127,8 @@ export class CaddyServiceDeployment extends pulumi.ComponentResource {
 
 export interface ServiceDeploymentArgs {
   namespaceName: pulumi.Output<string>
+  // Endpoints are caddyConfig strings concatenated after IP.nip.io
+  caddyEndpoints: string[]
   lbReady?: boolean
   isMinikube?: boolean
 }

+ 1 - 0
devops/infrastructure/pulumi-common/index.ts

@@ -0,0 +1 @@
+export { CaddyServiceDeployment } from './caddy'

+ 11 - 0
devops/infrastructure/pulumi-common/package.json

@@ -0,0 +1,11 @@
+{
+  "name": "pulumi-common",
+  "version": "1.0.0",
+  "devDependencies": {
+    "@types/node": "^10.0.0"
+  },
+  "dependencies": {
+    "@pulumi/kubernetes": "^3.0.0",
+    "@pulumi/pulumi": "^3.0.0"
+  }
+}

+ 15 - 0
devops/infrastructure/pulumi-common/tsconfig.json

@@ -0,0 +1,15 @@
+{
+  "compilerOptions": {
+    "strict": true,
+    "target": "es2016",
+    "module": "commonjs",
+    "moduleResolution": "node",
+    "sourceMap": true,
+    "experimentalDecorators": true,
+    "pretty": true,
+    "composite": true,
+    "noFallthroughCasesInSwitch": true,
+    "noImplicitReturns": true,
+    "forceConsistentCasingInFileNames": true
+  }
+}

+ 0 - 137
devops/infrastructure/query-node/caddy.ts

@@ -1,137 +0,0 @@
-import * as k8s from '@pulumi/kubernetes'
-import * as pulumi from '@pulumi/pulumi'
-import * as dns from 'dns'
-
-/**
- * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
- * Kubernetes Deployment and its associated Service object.
- */
-export class CaddyServiceDeployment extends pulumi.ComponentResource {
-  public readonly deployment: k8s.apps.v1.Deployment
-  public readonly service: k8s.core.v1.Service
-  public readonly hostname?: pulumi.Output<string>
-  public readonly primaryEndpoint?: pulumi.Output<string>
-  public readonly secondaryEndpoint?: pulumi.Output<string>
-
-  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
-    super('k8sjs:service:ServiceDeployment', name, {}, opts)
-
-    const labels = { app: name }
-    let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
-    let caddyVolumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
-
-    async function lookupPromise(url: string): Promise<dns.LookupAddress[]> {
-      return new Promise((resolve, reject) => {
-        dns.lookup(url, { all: true }, (err: any, addresses: dns.LookupAddress[]) => {
-          if (err) reject(err)
-          resolve(addresses)
-        })
-      })
-    }
-
-    this.service = new k8s.core.v1.Service(
-      name,
-      {
-        metadata: {
-          name: name,
-          namespace: args.namespaceName,
-          labels: labels,
-        },
-        spec: {
-          type: 'LoadBalancer',
-          ports: [
-            { name: 'http', port: 80 },
-            { name: 'https', port: 443 },
-          ],
-          selector: labels,
-        },
-      },
-      { parent: this }
-    )
-
-    this.hostname = this.service.status.loadBalancer.ingress[0].hostname
-
-    if (args.lbReady) {
-      let caddyConfig: pulumi.Output<string>
-      const lbIps: pulumi.Output<dns.LookupAddress[]> = this.hostname.apply((dnsName) => {
-        return lookupPromise(dnsName)
-      })
-
-      function getProxyString(ipAddress: pulumi.Output<string>) {
-        return pulumi.interpolate`${ipAddress}.nip.io/indexer/* {
-          uri strip_prefix /indexer
-          reverse_proxy query-node:4000
-        }
-
-        ${ipAddress}.nip.io/server/* {
-          uri strip_prefix /server
-          reverse_proxy query-node:8081
-        }
-        `
-      }
-
-      caddyConfig = pulumi.interpolate`${getProxyString(lbIps[0].address)}
-        ${getProxyString(lbIps[1].address)}`
-
-      this.primaryEndpoint = pulumi.interpolate`${lbIps[0].address}.nip.io`
-      this.secondaryEndpoint = pulumi.interpolate`${lbIps[1].address}.nip.io`
-
-      const keyConfig = new k8s.core.v1.ConfigMap(
-        name,
-        {
-          metadata: { namespace: args.namespaceName, labels: labels },
-          data: { 'fileData': caddyConfig },
-        },
-        { parent: this }
-      )
-      const keyConfigName = keyConfig.metadata.apply((m) => m.name)
-
-      caddyVolumeMounts.push({
-        mountPath: '/etc/caddy/Caddyfile',
-        name: 'caddy-volume',
-        subPath: 'fileData',
-      })
-      volumes.push({
-        name: 'caddy-volume',
-        configMap: {
-          name: keyConfigName,
-        },
-      })
-    }
-
-    this.deployment = new k8s.apps.v1.Deployment(
-      name,
-      {
-        metadata: { namespace: args.namespaceName, labels: labels },
-        spec: {
-          selector: { matchLabels: labels },
-          replicas: 1,
-          template: {
-            metadata: { labels: labels },
-            spec: {
-              containers: [
-                {
-                  name: 'caddy',
-                  image: 'caddy',
-                  ports: [
-                    { name: 'caddy-http', containerPort: 80 },
-                    { name: 'caddy-https', containerPort: 443 },
-                  ],
-                  volumeMounts: caddyVolumeMounts,
-                },
-              ],
-              volumes,
-            },
-          },
-        },
-      },
-      { parent: this }
-    )
-  }
-}
-
-export interface ServiceDeploymentArgs {
-  namespaceName: pulumi.Output<string>
-  lbReady?: boolean
-  isMinikube?: boolean
-}

+ 18 - 9
devops/infrastructure/query-node/index.ts

@@ -5,9 +5,7 @@ import * as pulumi from '@pulumi/pulumi'
 import { configMapFromFile } from './configMap'
 import * as k8s from '@pulumi/kubernetes'
 import * as s3Helpers from './s3Helpers'
-import { CaddyServiceDeployment } from './caddy'
-import { workers } from 'cluster'
-// import * as fs from 'fs'
+import { CaddyServiceDeployment } from 'pulumi-common'
 
 require('dotenv').config()
 
@@ -33,10 +31,10 @@ if (isMinikube) {
   // joystreamAppsImage = pulumi.interpolate`joystream/apps`
 } else {
   // Create a VPC for our cluster.
-  const vpc = new awsx.ec2.Vpc('query-node-vpc', { numberOfAvailabilityZones: 2 })
+  const vpc = new awsx.ec2.Vpc('query-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
 
   // Create an EKS cluster with the default configuration.
-  const cluster = new eks.Cluster('eksctl-my-cluster', {
+  const cluster = new eks.Cluster('eksctl-query-node', {
     vpcId: vpc.id,
     subnetIds: vpc.publicSubnetIds,
     desiredCapacity: 3,
@@ -441,12 +439,23 @@ const service = new k8s.core.v1.Service(
 // Export the Service name and public LoadBalancer Endpoint
 export const serviceName = service.metadata.name
 
-// When "done", this will print the public IP.
-// export let serviceHostname: pulumi.Output<string>
+const caddyEndpoints = [
+  `/indexer/* {
+    uri strip_prefix /indexer
+    reverse_proxy query-node:4000
+}`,
+  `/server/* {
+    uri strip_prefix /server
+    reverse_proxy query-node:8081
+}`,
+]
 
-// serviceHostname = service.status.loadBalancer.ingress[0].hostname
 const lbReady = config.get('isLoadBalancerReady') === 'true'
-const caddy = new CaddyServiceDeployment('caddy-proxy', { lbReady, namespaceName: namespaceName }, resourceOptions)
+const caddy = new CaddyServiceDeployment(
+  'caddy-proxy',
+  { lbReady, namespaceName: namespaceName, isMinikube, caddyEndpoints },
+  resourceOptions
+)
 
 export const endpoint1 = caddy.primaryEndpoint
 export const endpoint2 = caddy.secondaryEndpoint

+ 2 - 1
devops/infrastructure/query-node/package.json

@@ -12,6 +12,7 @@
     "@pulumi/docker": "^3.0.0",
     "dotenv": "^10.0.0",
     "mime": "^2.5.2",
-    "@types/mime": "^2.0.0"
+    "@types/mime": "^2.0.0",
+    "pulumi-common": "file:../pulumi-common"
   }
 }

+ 41 - 88
devops/infrastructure/storage-node/index.ts

@@ -3,10 +3,9 @@ import * as aws from '@pulumi/aws'
 import * as eks from '@pulumi/eks'
 import * as k8s from '@pulumi/kubernetes'
 import * as pulumi from '@pulumi/pulumi'
+import { CaddyServiceDeployment } from 'pulumi-common'
 import * as fs from 'fs'
 
-const dns = require('dns')
-
 const awsConfig = new pulumi.Config('aws')
 const config = new pulumi.Config()
 
@@ -19,17 +18,16 @@ const storage = parseInt(config.get('storage') || '40')
 
 let additionalParams: string[] | pulumi.Input<string>[] = []
 let volumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
-let caddyVolumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
 let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
 
 // Create a VPC for our cluster.
-const vpc = new awsx.ec2.Vpc('vpc', { numberOfAvailabilityZones: 2 })
+const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
 
 // Create an EKS cluster with the default configuration.
-const cluster = new eks.Cluster('eksctl-my-cluster', {
+const cluster = new eks.Cluster('eksctl-storage-node', {
   vpcId: vpc.id,
   subnetIds: vpc.publicSubnetIds,
-  instanceType: 't2.micro',
+  instanceType: 't2.medium',
   providerCredentialOpts: {
     profileName: awsConfig.get('profile'),
   },
@@ -47,8 +45,10 @@ export const colossusImage = repo.buildAndPushImage({
   context: '../../../',
 })
 
+const resourceOptions = { provider: cluster.provider }
+
 // Create a Kubernetes Namespace
-const ns = new k8s.core.v1.Namespace(name, {}, { provider: cluster.provider })
+const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
 
 // Export the Namespace name
 export const namespaceName = ns.metadata.name
@@ -72,7 +72,7 @@ const pvc = new k8s.core.v1.PersistentVolumeClaim(
       },
     },
   },
-  { provider: cluster.provider }
+  resourceOptions
 )
 
 volumes.push({
@@ -82,76 +82,25 @@ volumes.push({
   },
 })
 
-// Create a LoadBalancer Service for the Deployment
-const service = new k8s.core.v1.Service(
-  name,
-  {
-    metadata: {
-      labels: appLabels,
-      namespace: namespaceName,
-    },
-    spec: {
-      type: 'LoadBalancer',
-      ports: [
-        { name: 'http', port: 80 },
-        { name: 'https', port: 443 },
-      ],
-      selector: appLabels,
-    },
-  },
-  {
-    provider: cluster.provider,
-  }
+const caddyEndpoints = [
+  ` {
+    reverse_proxy storage-node:${colossusPort}
+}`,
+]
+
+const caddy = new CaddyServiceDeployment(
+  'caddy-proxy',
+  { lbReady, namespaceName: namespaceName, caddyEndpoints },
+  resourceOptions
 )
 
-// Export the Service name and public LoadBalancer Endpoint
-export const serviceName = service.metadata.name
-// When "done", this will print the hostname
-export let serviceHostname: pulumi.Output<string>
-serviceHostname = service.status.loadBalancer.ingress[0].hostname
+export const endpoint1 = caddy.primaryEndpoint
+export const endpoint2 = caddy.secondaryEndpoint
 
 export let appLink: pulumi.Output<string>
 
 if (lbReady) {
-  async function lookupPromise(url: string) {
-    return new Promise((resolve, reject) => {
-      dns.lookup(url, (err: any, address: any) => {
-        if (err) reject(err)
-        resolve(address)
-      })
-    })
-  }
-
-  const lbIp = serviceHostname.apply((dnsName) => {
-    return lookupPromise(dnsName)
-  })
-
-  const caddyConfig = pulumi.interpolate`${lbIp}.nip.io {
-  reverse_proxy localhost:${colossusPort}
-}`
-
-  const keyConfig = new k8s.core.v1.ConfigMap(name, {
-    metadata: { namespace: namespaceName, labels: appLabels },
-    data: { 'fileData': caddyConfig },
-  })
-  const keyConfigName = keyConfig.metadata.apply((m) => m.name)
-
-  caddyVolumeMounts.push({
-    mountPath: '/etc/caddy/Caddyfile',
-    name: 'caddy-volume',
-    subPath: 'fileData',
-  })
-
-  volumes.push({
-    name: 'caddy-volume',
-    configMap: {
-      name: keyConfigName,
-    },
-  })
-
-  appLink = pulumi.interpolate`https://${lbIp}.nip.io`
-
-  lbIp.apply((value) => console.log(`You can now access the app at: ${value}.nip.io`))
+  appLink = pulumi.interpolate`https://${endpoint1}`
 
   if (!isAnonymous) {
     const remoteKeyFilePath = '/joystream/key-file.json'
@@ -228,20 +177,6 @@ const deployment = new k8s.apps.v1.Deployment(
                 },
               ],
             },
-            // {
-            //   name: 'httpd',
-            //   image: 'crccheck/hello-world',
-            //   ports: [{ name: 'hello-world', containerPort: 8000 }],
-            // },
-            {
-              name: 'caddy',
-              image: 'caddy',
-              ports: [
-                { name: 'caddy-http', containerPort: 80 },
-                { name: 'caddy-https', containerPort: 443 },
-              ],
-              volumeMounts: caddyVolumeMounts,
-            },
             {
               name: 'colossus',
               image: colossusImage,
@@ -274,10 +209,28 @@ const deployment = new k8s.apps.v1.Deployment(
       },
     },
   },
+  resourceOptions
+)
+
+// Create a LoadBalancer Service for the Deployment
+const service = new k8s.core.v1.Service(
+  name,
   {
-    provider: cluster.provider,
-  }
+    metadata: {
+      labels: appLabels,
+      namespace: namespaceName,
+      name: 'storage-node',
+    },
+    spec: {
+      ports: [{ name: 'port-1', port: colossusPort }],
+      selector: appLabels,
+    },
+  },
+  resourceOptions
 )
 
+// Export the Service name
+export const serviceName = service.metadata.name
+
 // Export the Deployment name
 export const deploymentName = deployment.metadata.name

+ 2 - 1
devops/infrastructure/storage-node/package.json

@@ -8,6 +8,7 @@
     "@pulumi/awsx": "^0.30.0",
     "@pulumi/eks": "^0.31.0",
     "@pulumi/kubernetes": "^3.0.0",
-    "@pulumi/pulumi": "^3.0.0"
+    "@pulumi/pulumi": "^3.0.0",
+    "pulumi-common": "file:../pulumi-common"
   }
 }