Преглед изворни кода

Remove redundancy, add caddy

Anuj Bansal пре 3 година
родитељ
комит
f99e81da06

+ 6 - 0
devops/infrastructure/node-network/Pulumi.yaml

@@ -10,6 +10,12 @@ template:
     isMinikube:
       description: Whether you are deploying to minikube
       default: false
+    numberOfValidators:
+      description: Number of validators as starting nodes
+      default: 2
+    networkSuffix:
+      description: Suffix to attach to the network id and name
+      default: 8129
     isLoadBalancerReady:
       description: Whether the load balancer service is ready and has been assigned an IP
       default: false

+ 1 - 5
devops/infrastructure/node-network/README.md

@@ -38,7 +38,7 @@ After cloning this repo, from this working directory, run these commands:
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext workersFilePath=<PATH> --plaintext membersFilePath=<PATH> --plaintext isMinikube=true
+    --plaintext numberOfValidators=2 --plaintext isMinikube=true
    ```
 
    If you want to build the stack on AWS set the `isMinikube` config to `false`
@@ -47,10 +47,6 @@ After cloning this repo, from this working directory, run these commands:
    $ pulumi config set isMinikube false
    ```
 
-1. Create a `.env` file in this directory (`cp ../../../.env ./.env`) and set the database and other variables in it
-
-   Make sure to set `GRAPHQL_SERVER_PORT=4001`
-
 1. Stand up the Kubernetes cluster:
 
    Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a

+ 135 - 0
devops/infrastructure/node-network/caddy.ts

@@ -0,0 +1,135 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import * as dns from 'dns'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ */
+export class CaddyServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+  public readonly hostname?: pulumi.Output<string>
+  public readonly primaryEndpoint?: pulumi.Output<string>
+  public readonly secondaryEndpoint?: pulumi.Output<string>
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('k8sjs:service:ServiceDeployment', name, {}, opts)
+
+    const labels = { app: name }
+    let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
+    let caddyVolumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
+
+    async function lookupPromise(url: string): Promise<dns.LookupAddress[]> {
+      return new Promise((resolve, reject) => {
+        dns.lookup(url, { all: true }, (err: any, addresses: dns.LookupAddress[]) => {
+          if (err) reject(err)
+          resolve(addresses)
+        })
+      })
+    }
+
+    this.service = new k8s.core.v1.Service(
+      name,
+      {
+        metadata: {
+          name: name,
+          namespace: args.namespaceName,
+          labels: labels,
+        },
+        spec: {
+          type: args.isMinikube ? 'NodePort' : 'LoadBalancer',
+          ports: [
+            { name: 'http', port: 80 },
+            { name: 'https', port: 443 },
+          ],
+          selector: labels,
+        },
+      },
+      { parent: this }
+    )
+
+    this.hostname = this.service.status.loadBalancer.ingress[0].hostname
+
+    if (args.lbReady) {
+      let caddyConfig: pulumi.Output<string>
+      const lbIps: pulumi.Output<dns.LookupAddress[]> = this.hostname.apply((dnsName) => {
+        return lookupPromise(dnsName)
+      })
+
+      function getProxyString(ipAddress: pulumi.Output<string>) {
+        return pulumi.interpolate`${ipAddress}.nip.io/ws-rpc {
+          reverse_proxy node-network:9944
+        }
+
+        ${ipAddress}.nip.io/http-rpc {
+          reverse_proxy node-network:9933
+        }
+        `
+      }
+
+      caddyConfig = pulumi.interpolate`${getProxyString(lbIps[0].address)}
+        ${getProxyString(lbIps[1].address)}`
+
+      this.primaryEndpoint = pulumi.interpolate`${lbIps[0].address}.nip.io`
+      this.secondaryEndpoint = pulumi.interpolate`${lbIps[1].address}.nip.io`
+
+      const keyConfig = new k8s.core.v1.ConfigMap(
+        name,
+        {
+          metadata: { namespace: args.namespaceName, labels: labels },
+          data: { 'fileData': caddyConfig },
+        },
+        { parent: this }
+      )
+      const keyConfigName = keyConfig.metadata.apply((m) => m.name)
+
+      caddyVolumeMounts.push({
+        mountPath: '/etc/caddy/Caddyfile',
+        name: 'caddy-volume',
+        subPath: 'fileData',
+      })
+      volumes.push({
+        name: 'caddy-volume',
+        configMap: {
+          name: keyConfigName,
+        },
+      })
+    }
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      name,
+      {
+        metadata: { namespace: args.namespaceName, labels: labels },
+        spec: {
+          selector: { matchLabels: labels },
+          replicas: 1,
+          template: {
+            metadata: { labels: labels },
+            spec: {
+              containers: [
+                {
+                  name: 'caddy',
+                  image: 'caddy',
+                  ports: [
+                    { name: 'caddy-http', containerPort: 80 },
+                    { name: 'caddy-https', containerPort: 443 },
+                  ],
+                  volumeMounts: caddyVolumeMounts,
+                },
+              ],
+              volumes,
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  lbReady?: boolean
+  isMinikube?: boolean
+}

+ 23 - 103
devops/infrastructure/node-network/index.ts

@@ -3,6 +3,8 @@ import * as eks from '@pulumi/eks'
 import * as pulumi from '@pulumi/pulumi'
 import * as k8s from '@pulumi/kubernetes'
 import { configMapFromFile } from './configMap'
+import { CaddyServiceDeployment } from './caddy'
+import { getSubkeyContainers, getValidatorContainers } from './utils'
 
 const config = new pulumi.Config()
 const awsConfig = new pulumi.Config('aws')
@@ -13,23 +15,12 @@ let provider: k8s.Provider
 
 if (isMinikube) {
   provider = new k8s.Provider('local', {})
-
-  // Create image from local app
-  // joystreamAppsImage = new docker.Image('joystream/apps', {
-  //   build: {
-  //     context: '../../../',
-  //     dockerfile: '../../../apps.Dockerfile',
-  //   },
-  //   imageName: 'joystream/apps:latest',
-  //   skipPush: true,
-  // }).baseImageName
-  // joystreamAppsImage = pulumi.interpolate`joystream/apps`
 } else {
   // Create a VPC for our cluster.
   const vpc = new awsx.ec2.Vpc('joystream-node-vpc', { numberOfAvailabilityZones: 2 })
 
   // Create an EKS cluster with the default configuration.
-  const cluster = new eks.Cluster('eksctl-my-cluster', {
+  const cluster = new eks.Cluster('eksctl-node-network', {
     vpcId: vpc.id,
     subnetIds: vpc.publicSubnetIds,
     desiredCapacity: 3,
@@ -69,8 +60,12 @@ const jsonModifyConfig = new configMapFromFile(
 
 const dataPath = '/subkey-data'
 const builderPath = '/builder-data'
-const networkPrefix = '8129'
+const networkSuffix = config.get('networkSuffix') || '8129'
 const chainSpecPath = `${builderPath}/chainspec-raw.json`
+const numberOfValidators = config.getNumber('numberOfValidators') || 2
+
+const subkeyContainers = getSubkeyContainers(numberOfValidators, dataPath)
+const validatorContainers = getValidatorContainers(numberOfValidators, dataPath, builderPath, chainSpecPath)
 
 const deployment = new k8s.apps.v1.Deployment(
   name,
@@ -88,36 +83,15 @@ const deployment = new k8s.apps.v1.Deployment(
         },
         spec: {
           initContainers: [
-            {
-              name: 'subkey-node',
-              image: 'parity/subkey:latest',
-              command: ['/bin/sh', '-c'],
-              args: [`subkey generate-node-key >> ${dataPath}/privatekey1 2>> ${dataPath}/publickey1`],
-              volumeMounts: [
-                {
-                  name: 'subkey-data',
-                  mountPath: dataPath,
-                },
-              ],
-            },
-            {
-              name: 'subkey-node-1',
-              image: 'parity/subkey:latest',
-              command: ['/bin/sh', '-c'],
-              args: [`subkey generate-node-key >> ${dataPath}/privatekey2 2>> ${dataPath}/publickey2`],
-              volumeMounts: [
-                {
-                  name: 'subkey-data',
-                  mountPath: dataPath,
-                },
-              ],
-            },
+            ...subkeyContainers,
             {
               name: 'builder-node',
               image: 'joystream/node:latest',
               command: ['/bin/sh', '-c'],
               args: [
-                `/joystream/chain-spec-builder generate -a 2 --chain-spec-path ${builderPath}/chainspec.json --deployment live --endowed 1 --keystore-path ${builderPath}/data >> ${builderPath}/seeds.txt`,
+                `/joystream/chain-spec-builder generate -a ${numberOfValidators} \
+                --chain-spec-path ${builderPath}/chainspec.json --deployment live \
+                --endowed 1 --keystore-path ${builderPath}/data >> ${builderPath}/seeds.txt`,
               ],
               volumeMounts: [
                 {
@@ -130,7 +104,7 @@ const deployment = new k8s.apps.v1.Deployment(
               name: 'json-modify',
               image: 'python',
               command: ['python'],
-              args: ['/scripts/json_modify.py', '--path', `${builderPath}/chainspec.json`, '--prefix', networkPrefix],
+              args: ['/scripts/json_modify.py', '--path', `${builderPath}/chainspec.json`, '--prefix', networkSuffix],
               volumeMounts: [
                 {
                   mountPath: '/scripts/json_modify.py',
@@ -161,62 +135,7 @@ const deployment = new k8s.apps.v1.Deployment(
             },
           ],
           containers: [
-            {
-              name: 'joystream-node',
-              image: 'joystream/node:latest',
-              ports: [{ containerPort: 9944 }, { containerPort: 9933 }],
-              args: [
-                '--chain',
-                chainSpecPath,
-                '--pruning',
-                'archive',
-                '--node-key-file',
-                `${dataPath}/privatekey1`,
-                '--keystore-path',
-                `${builderPath}/data/auth-0`,
-                '--validator',
-                '--log',
-                'runtime,txpool,transaction-pool,trace=sync',
-              ],
-              volumeMounts: [
-                {
-                  name: 'subkey-data',
-                  mountPath: dataPath,
-                },
-                {
-                  name: 'builder-data',
-                  mountPath: builderPath,
-                },
-              ],
-            },
-            {
-              name: 'joystream-node-2',
-              image: 'joystream/node:latest',
-              ports: [{ containerPort: 9944 }, { containerPort: 9933 }],
-              args: [
-                '--chain',
-                chainSpecPath,
-                '--pruning',
-                'archive',
-                '--node-key-file',
-                `${dataPath}/privatekey2`,
-                '--keystore-path',
-                `${builderPath}/data/auth-1`,
-                '--validator',
-                '--log',
-                'runtime,txpool,transaction-pool,trace=sync',
-              ],
-              volumeMounts: [
-                {
-                  name: 'subkey-data',
-                  mountPath: dataPath,
-                },
-                {
-                  name: 'builder-data',
-                  mountPath: builderPath,
-                },
-              ],
-            },
+            ...validatorContainers,
             {
               name: 'rpc-node',
               image: 'joystream/node:latest',
@@ -281,9 +200,9 @@ const service = new k8s.core.v1.Service(
     metadata: {
       labels: appLabels,
       namespace: namespaceName,
+      name: 'node-network',
     },
     spec: {
-      type: 'NodePort',
       ports: [
         { name: 'port-1', port: 9944 },
         { name: 'port-2', port: 9933 },
@@ -297,11 +216,12 @@ const service = new k8s.core.v1.Service(
 // Export the Service name and public LoadBalancer Endpoint
 export const serviceName = service.metadata.name
 
-// When "done", this will print the public IP.
-export let serviceHostname: pulumi.Output<string>
+const lbReady = config.get('isLoadBalancerReady') === 'true'
+const caddy = new CaddyServiceDeployment(
+  'caddy-proxy',
+  { lbReady, namespaceName: namespaceName, isMinikube },
+  resourceOptions
+)
 
-if (isMinikube) {
-  serviceHostname = service.spec.clusterIP
-} else {
-  serviceHostname = service.status.loadBalancer.ingress[0].hostname
-}
+export const endpoint1 = caddy.primaryEndpoint
+export const endpoint2 = caddy.secondaryEndpoint

+ 58 - 0
devops/infrastructure/node-network/utils.ts

@@ -0,0 +1,58 @@
+export const getSubkeyContainers = (validators: number, dataPath: string) => {
+  const result = []
+  for (let i = 1; i <= validators; i++) {
+    result.push({
+      name: `subkey-node-${i}`,
+      image: 'parity/subkey:latest',
+      command: ['/bin/sh', '-c'],
+      args: [`subkey generate-node-key >> ${dataPath}/privatekey${i} 2>> ${dataPath}/publickey${i}`],
+      volumeMounts: [
+        {
+          name: 'subkey-data',
+          mountPath: dataPath,
+        },
+      ],
+    })
+  }
+  return result
+}
+
+export const getValidatorContainers = (
+  validators: number,
+  dataPath: string,
+  builderPath: string,
+  chainSpecPath: string
+) => {
+  const result = []
+  for (let i = 1; i <= validators; i++) {
+    result.push({
+      name: `joystream-node-${i}`,
+      image: 'joystream/node:latest',
+      ports: [{ containerPort: 9944 }, { containerPort: 9933 }],
+      args: [
+        '--chain',
+        chainSpecPath,
+        '--pruning',
+        'archive',
+        '--node-key-file',
+        `${dataPath}/privatekey${i}`,
+        '--keystore-path',
+        `${builderPath}/data/auth-${i - 1}`,
+        '--validator',
+        '--log',
+        'runtime,txpool,transaction-pool,trace=sync',
+      ],
+      volumeMounts: [
+        {
+          name: 'subkey-data',
+          mountPath: dataPath,
+        },
+        {
+          name: 'builder-data',
+          mountPath: builderPath,
+        },
+      ],
+    })
+  }
+  return result
+}