浏览代码

Merge pull request #2499 from ahhda/deploy-query-node

DevOps - Query node deployment on EKS with Kubernetes and Pulumi
Mokhtar Naamani 3 年之前
父节点
当前提交
815fbc080b

+ 1 - 1
apps.Dockerfile

@@ -1,4 +1,4 @@
-FROM node:14 as builder
+FROM --platform=linux/x86-64 node:14 as builder
 
 WORKDIR /joystream
 COPY . /joystream

+ 6 - 0
devops/infrastructure/query-node/.gitignore

@@ -0,0 +1,6 @@
+/bin/
+/node_modules/
+kubeconfig.yml
+package-lock.json
+.env
+Pulumi.*.yaml

+ 19 - 0
devops/infrastructure/query-node/Pulumi.yaml

@@ -0,0 +1,19 @@
+name: query-node
+runtime: nodejs
+description: Kubernetes IaC for Query Node
+template:
+  config:
+    aws:profile:
+      default: joystream-user
+    aws:region:
+      default: us-east-1
+    isMinikube:
+      description: Whether you are deploying to minikube
+      default: false
+    isLoadBalancerReady:
+      description: Whether the load balancer service is ready and has been assigned an IP
+      default: false
+    membersFilePath:
+      description: Path to members.json file for processor initialization
+    workersFilePath:
+      description: Path to workers.json file for processor initialization

+ 117 - 0
devops/infrastructure/query-node/README.md

@@ -0,0 +1,117 @@
+# Query Node automated deployment
+
+Deploys an EKS Kubernetes cluster with query node
+
+## Deploying the App
+
+To deploy your infrastructure, follow the below steps.
+
+### Prerequisites
+
+1. [Install Pulumi](https://www.pulumi.com/docs/get-started/install/)
+1. [Install Node.js](https://nodejs.org/en/download/)
+1. Install a package manager for Node.js, such as [npm](https://www.npmjs.com/get-npm) or [Yarn](https://yarnpkg.com/en/docs/install).
+1. [Configure AWS Credentials](https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/)
+1. Optional (for debugging): [Install kubectl](https://kubernetes.io/docs/tasks/tools/)
+
+### Steps
+
+After cloning this repo, from this working directory, run these commands:
+
+1. Install the required Node.js packages:
+
+   This installs the dependent packages [needed](https://www.pulumi.com/docs/intro/concepts/how-pulumi-works/) for our Pulumi program.
+
+   ```bash
+   $ npm install
+   ```
+
+1. Create a new stack, which is an isolated deployment target for this example:
+
+   This will initialize the Pulumi program in TypeScript.
+
+   ```bash
+   $ pulumi stack init
+   ```
+
+1. Set the required configuration variables in `Pulumi.<stack>.yaml`
+
+   ```bash
+   $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
+    --plaintext workersFilePath=<PATH> --plaintext membersFilePath=<PATH> --plaintext isMinikube=true
+   ```
+
+   If you want to build the stack on AWS set the `isMinikube` config to `false`
+
+   ```bash
+   $ puluim config set isMinikube false
+   ```
+
+1. Create a `.env` file in this directory (`cp ../../../.env ./.env`) and set the database and other variables in it
+
+   Make sure to set `GRAPHQL_SERVER_PORT=4001`
+
+1. Stand up the Kubernetes cluster:
+
+   Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
+   new EKS cluster takes between 10-15 minutes.
+
+1. Once the stack is up and running, we will modify the Caddy config to get SSL certificate for the load balancer
+
+   Modify the config variable `isLoadBalancerReady`
+
+   ```bash
+   $ pulumi config set isLoadBalancerReady true
+   ```
+
+   Run `pulumi up -y` to update the Caddy config
+
+1. You can now access the endpoints using `pulumi stack output endpoint1` or `pulumi stack output endpoint2`
+
+   The GraphQl server is accessible at `https://<ENDPOINT>/server/graphql` and indexer at `https://<ENDPOINT>/indexer/graphql`
+
+1. Access the Kubernetes Cluster using `kubectl`
+
+   To access your new Kubernetes cluster using `kubectl`, we need to set up the
+   `kubeconfig` file and download `kubectl`. We can leverage the Pulumi
+   stack output in the CLI, as Pulumi facilitates exporting these objects for us.
+
+   ```bash
+   $ pulumi stack output kubeconfig --show-secrets > kubeconfig
+   $ export KUBECONFIG=$PWD/kubeconfig
+   $ kubectl get nodes
+   ```
+
+   We can also use the stack output to query the cluster for our newly created Deployment:
+
+   ```bash
+   $ kubectl get deployment $(pulumi stack output deploymentName) --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get service $(pulumi stack output serviceName) --namespace=$(pulumi stack output namespaceName)
+   ```
+
+   To get logs
+
+   ```bash
+   $ kubectl config set-context --current --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get pods
+   $ kubectl logs <PODNAME> --all-containers
+   ```
+
+   To see complete pulumi stack output
+
+   ```bash
+   $ pulumi stack output
+   ```
+
+   To execute a command
+
+   ```bash
+   $ kubectl exec --stdin --tty <PODNAME> -c colossus -- /bin/bash
+   ```
+
+1. Once you've finished experimenting, tear down your stack's resources by destroying and removing it:
+
+   ```bash
+   $ pulumi destroy --yes
+   $ pulumi stack rm --yes
+   ```

+ 137 - 0
devops/infrastructure/query-node/caddy.ts

@@ -0,0 +1,137 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import * as dns from 'dns'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ */
+export class CaddyServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+  public readonly hostname?: pulumi.Output<string>
+  public readonly primaryEndpoint?: pulumi.Output<string>
+  public readonly secondaryEndpoint?: pulumi.Output<string>
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('k8sjs:service:ServiceDeployment', name, {}, opts)
+
+    const labels = { app: name }
+    let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
+    let caddyVolumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
+
+    async function lookupPromise(url: string): Promise<dns.LookupAddress[]> {
+      return new Promise((resolve, reject) => {
+        dns.lookup(url, { all: true }, (err: any, addresses: dns.LookupAddress[]) => {
+          if (err) reject(err)
+          resolve(addresses)
+        })
+      })
+    }
+
+    this.service = new k8s.core.v1.Service(
+      name,
+      {
+        metadata: {
+          name: name,
+          namespace: args.namespaceName,
+          labels: labels,
+        },
+        spec: {
+          type: 'LoadBalancer',
+          ports: [
+            { name: 'http', port: 80 },
+            { name: 'https', port: 443 },
+          ],
+          selector: labels,
+        },
+      },
+      { parent: this }
+    )
+
+    this.hostname = this.service.status.loadBalancer.ingress[0].hostname
+
+    if (args.lbReady) {
+      let caddyConfig: pulumi.Output<string>
+      const lbIps: pulumi.Output<dns.LookupAddress[]> = this.hostname.apply((dnsName) => {
+        return lookupPromise(dnsName)
+      })
+
+      function getProxyString(ipAddress: pulumi.Output<string>) {
+        return pulumi.interpolate`${ipAddress}.nip.io/indexer/* {
+          uri strip_prefix /indexer
+          reverse_proxy query-node:4000
+        }
+
+        ${ipAddress}.nip.io/server/* {
+          uri strip_prefix /server
+          reverse_proxy query-node:8081
+        }
+        `
+      }
+
+      caddyConfig = pulumi.interpolate`${getProxyString(lbIps[0].address)}
+        ${getProxyString(lbIps[1].address)}`
+
+      this.primaryEndpoint = pulumi.interpolate`${lbIps[0].address}.nip.io`
+      this.secondaryEndpoint = pulumi.interpolate`${lbIps[1].address}.nip.io`
+
+      const keyConfig = new k8s.core.v1.ConfigMap(
+        name,
+        {
+          metadata: { namespace: args.namespaceName, labels: labels },
+          data: { 'fileData': caddyConfig },
+        },
+        { parent: this }
+      )
+      const keyConfigName = keyConfig.metadata.apply((m) => m.name)
+
+      caddyVolumeMounts.push({
+        mountPath: '/etc/caddy/Caddyfile',
+        name: 'caddy-volume',
+        subPath: 'fileData',
+      })
+      volumes.push({
+        name: 'caddy-volume',
+        configMap: {
+          name: keyConfigName,
+        },
+      })
+    }
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      name,
+      {
+        metadata: { namespace: args.namespaceName, labels: labels },
+        spec: {
+          selector: { matchLabels: labels },
+          replicas: 1,
+          template: {
+            metadata: { labels: labels },
+            spec: {
+              containers: [
+                {
+                  name: 'caddy',
+                  image: 'caddy',
+                  ports: [
+                    { name: 'caddy-http', containerPort: 80 },
+                    { name: 'caddy-https', containerPort: 443 },
+                  ],
+                  volumeMounts: caddyVolumeMounts,
+                },
+              ],
+              volumes,
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  lbReady?: boolean
+  isMinikube?: boolean
+}

+ 29 - 0
devops/infrastructure/query-node/configMap.ts

@@ -0,0 +1,29 @@
+import * as pulumi from '@pulumi/pulumi'
+import * as k8s from '@pulumi/kubernetes'
+import * as fs from 'fs'
+
+export class configMapFromFile extends pulumi.ComponentResource {
+  public readonly configName?: pulumi.Output<string>
+
+  constructor(name: string, args: ConfigMapArgs, opts: pulumi.ComponentResourceOptions = {}) {
+    super('pkg:query-node:configMap', name, {}, opts)
+
+    this.configName = new k8s.core.v1.ConfigMap(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+        },
+        data: {
+          'fileData': fs.readFileSync(args.filePath).toString(),
+        },
+      },
+      opts
+    ).metadata.apply((m) => m.name)
+  }
+}
+
+export interface ConfigMapArgs {
+  filePath: string
+  namespaceName: pulumi.Output<string>
+}

+ 452 - 0
devops/infrastructure/query-node/index.ts

@@ -0,0 +1,452 @@
+import * as awsx from '@pulumi/awsx'
+import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
+import * as pulumi from '@pulumi/pulumi'
+import { configMapFromFile } from './configMap'
+import * as k8s from '@pulumi/kubernetes'
+import * as s3Helpers from './s3Helpers'
+import { CaddyServiceDeployment } from './caddy'
+import { workers } from 'cluster'
+// import * as fs from 'fs'
+
+require('dotenv').config()
+
+const config = new pulumi.Config()
+const awsConfig = new pulumi.Config('aws')
+const isMinikube = config.getBoolean('isMinikube')
+export let kubeconfig: pulumi.Output<any>
+export let joystreamAppsImage: pulumi.Output<string>
+let provider: k8s.Provider
+
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+
+  // Create image from local app
+  joystreamAppsImage = new docker.Image('joystream/apps', {
+    build: {
+      context: '../../../',
+      dockerfile: '../../../apps.Dockerfile',
+    },
+    imageName: 'joystream/apps:latest',
+    skipPush: true,
+  }).baseImageName
+  // joystreamAppsImage = pulumi.interpolate`joystream/apps`
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('query-node-vpc', { numberOfAvailabilityZones: 2 })
+
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-my-cluster', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    desiredCapacity: 3,
+    maxSize: 3,
+    instanceType: 't2.large',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
+
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
+
+  // Create a repository
+  const repo = new awsx.ecr.Repository('joystream/apps')
+
+  joystreamAppsImage = repo.buildAndPushImage({
+    dockerfile: '../../../apps.Dockerfile',
+    context: '../../../',
+  })
+}
+
+const resourceOptions = { provider: provider }
+
+const name = 'query-node'
+
+// Create a Kubernetes Namespace
+// const ns = new k8s.core.v1.Namespace(name, {}, { provider: cluster.provider })
+const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
+
+// Export the Namespace name
+export const namespaceName = ns.metadata.name
+
+const appLabels = { appClass: name }
+
+// Create a Deployment
+const databaseLabels = { app: 'postgres-db' }
+
+const pvc = new k8s.core.v1.PersistentVolumeClaim(
+  `db-pvc`,
+  {
+    metadata: {
+      labels: databaseLabels,
+      namespace: namespaceName,
+      name: `db-pvc`,
+    },
+    spec: {
+      accessModes: ['ReadWriteOnce'],
+      resources: {
+        requests: {
+          storage: `10Gi`,
+        },
+      },
+    },
+  },
+  resourceOptions
+)
+
+const databaseDeployment = new k8s.apps.v1.Deployment(
+  'postgres-db',
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: databaseLabels,
+    },
+    spec: {
+      selector: { matchLabels: databaseLabels },
+      template: {
+        metadata: { labels: databaseLabels },
+        spec: {
+          containers: [
+            {
+              name: 'postgres-db',
+              image: 'postgres:12',
+              env: [
+                { name: 'POSTGRES_USER', value: process.env.DB_USER! },
+                { name: 'POSTGRES_PASSWORD', value: process.env.DB_PASS! },
+                { name: 'POSTGRES_DB', value: process.env.INDEXER_DB_NAME! },
+              ],
+              ports: [{ containerPort: 5432 }],
+              volumeMounts: [
+                {
+                  name: 'postgres-data',
+                  mountPath: '/var/lib/postgresql/data',
+                  subPath: 'postgres',
+                },
+              ],
+            },
+          ],
+          volumes: [
+            {
+              name: 'postgres-data',
+              persistentVolumeClaim: {
+                claimName: `db-pvc`,
+              },
+            },
+          ],
+        },
+      },
+    },
+  },
+  resourceOptions
+)
+
+const databaseService = new k8s.core.v1.Service(
+  'postgres-db',
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: databaseDeployment.metadata.labels,
+      name: 'postgres-db',
+    },
+    spec: {
+      ports: [{ port: 5432 }],
+      selector: databaseDeployment.spec.template.metadata.labels,
+    },
+  },
+  resourceOptions
+)
+
+const migrationJob = new k8s.batch.v1.Job(
+  'db-migration',
+  {
+    metadata: {
+      namespace: namespaceName,
+    },
+    spec: {
+      backoffLimit: 0,
+      template: {
+        spec: {
+          containers: [
+            {
+              name: 'db-migration',
+              image: joystreamAppsImage,
+              imagePullPolicy: 'IfNotPresent',
+              resources: { requests: { cpu: '100m', memory: '100Mi' } },
+              env: [
+                {
+                  name: 'WARTHOG_DB_HOST',
+                  value: 'postgres-db',
+                },
+                {
+                  name: 'DB_HOST',
+                  value: 'postgres-db',
+                },
+                { name: 'DB_NAME', value: process.env.DB_NAME! },
+                { name: 'DB_PASS', value: process.env.DB_PASS! },
+              ],
+              command: ['/bin/sh', '-c'],
+              args: ['yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate'],
+            },
+          ],
+          restartPolicy: 'Never',
+        },
+      },
+    },
+  },
+  { ...resourceOptions, dependsOn: databaseService }
+)
+
+const membersFilePath = config.get('membersFilePath')
+  ? config.get('membersFilePath')!
+  : '../../../query-node/mappings/bootstrap/data/members.json'
+const workersFilePath = config.get('workersFilePath')
+  ? config.get('workersFilePath')!
+  : '../../../query-node/mappings/bootstrap/data/workers.json'
+
+const dataBucket = new s3Helpers.FileBucket('bootstrap-data', {
+  files: [
+    { path: membersFilePath, name: 'members.json' },
+    { path: workersFilePath, name: 'workers.json' },
+  ],
+  policy: s3Helpers.publicReadPolicy,
+})
+
+const membersUrl = dataBucket.getUrlForFile('members.json')
+const workersUrl = dataBucket.getUrlForFile('workers.json')
+
+const dataPath = '/joystream/query-node/mappings/bootstrap/data'
+
+const processorJob = new k8s.batch.v1.Job(
+  'processor-migration',
+  {
+    metadata: {
+      namespace: namespaceName,
+    },
+    spec: {
+      backoffLimit: 0,
+      template: {
+        spec: {
+          initContainers: [
+            {
+              name: 'curl-init',
+              image: 'appropriate/curl',
+              command: ['/bin/sh', '-c'],
+              args: [
+                pulumi.interpolate`curl -o ${dataPath}/workers.json ${workersUrl}; curl -o ${dataPath}/members.json ${membersUrl}; ls -al ${dataPath};`,
+              ],
+              volumeMounts: [
+                {
+                  name: 'bootstrap-data',
+                  mountPath: dataPath,
+                },
+              ],
+            },
+          ],
+          containers: [
+            {
+              name: 'processor-migration',
+              image: joystreamAppsImage,
+              imagePullPolicy: 'IfNotPresent',
+              env: [
+                {
+                  name: 'INDEXER_ENDPOINT_URL',
+                  value: `http://localhost:${process.env.WARTHOG_APP_PORT}/graphql`,
+                },
+                { name: 'TYPEORM_HOST', value: 'postgres-db' },
+                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
+                { name: 'DEBUG', value: 'index-builder:*' },
+                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
+              ],
+              volumeMounts: [
+                {
+                  name: 'bootstrap-data',
+                  mountPath: dataPath,
+                },
+              ],
+              args: ['workspace', 'query-node-root', 'processor:bootstrap'],
+            },
+          ],
+          restartPolicy: 'Never',
+          volumes: [
+            {
+              name: 'bootstrap-data',
+              emptyDir: {},
+            },
+          ],
+        },
+      },
+    },
+  },
+  { ...resourceOptions, dependsOn: migrationJob }
+)
+
+const defsConfig = new configMapFromFile(
+  'defs-config',
+  {
+    filePath: '../../../types/augment/all/defs.json',
+    namespaceName: namespaceName,
+  },
+  resourceOptions
+).configName
+
+const deployment = new k8s.apps.v1.Deployment(
+  name,
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: appLabels,
+    },
+    spec: {
+      replicas: 1,
+      selector: { matchLabels: appLabels },
+      template: {
+        metadata: {
+          labels: appLabels,
+        },
+        spec: {
+          containers: [
+            {
+              name: 'redis',
+              image: 'redis:6.0-alpine',
+              ports: [{ containerPort: 6379 }],
+            },
+            {
+              name: 'indexer',
+              image: 'joystream/hydra-indexer:2.1.0-beta.9',
+              env: [
+                { name: 'DB_HOST', value: 'postgres-db' },
+                { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
+                { name: 'DB_PASS', value: process.env.DB_PASS! },
+                { name: 'INDEXER_WORKERS', value: '5' },
+                { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
+                { name: 'DEBUG', value: 'index-builder:*' },
+                { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
+                { name: 'TYPES_JSON', value: 'types.json' },
+                { name: 'PGUSER', value: process.env.DB_USER! },
+                { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
+              ],
+              volumeMounts: [
+                {
+                  mountPath: '/home/hydra/packages/hydra-indexer/types.json',
+                  name: 'indexer-volume',
+                  subPath: 'fileData',
+                },
+              ],
+              command: ['/bin/sh', '-c'],
+              args: ['yarn db:bootstrap && yarn start:prod'],
+            },
+            {
+              name: 'hydra-indexer-gateway',
+              image: 'joystream/hydra-indexer-gateway:2.1.0-beta.5',
+              env: [
+                { name: 'WARTHOG_STARTER_DB_DATABASE', value: process.env.INDEXER_DB_NAME! },
+                { name: 'WARTHOG_STARTER_DB_HOST', value: 'postgres-db' },
+                { name: 'WARTHOG_STARTER_DB_PASSWORD', value: process.env.DB_PASS! },
+                { name: 'WARTHOG_STARTER_DB_PORT', value: process.env.DB_PORT! },
+                { name: 'WARTHOG_STARTER_DB_USERNAME', value: process.env.DB_USER! },
+                { name: 'WARTHOG_STARTER_REDIS_URI', value: 'redis://localhost:6379/0' },
+                { name: 'WARTHOG_APP_PORT', value: process.env.WARTHOG_APP_PORT! },
+                { name: 'PORT', value: process.env.WARTHOG_APP_PORT! },
+                { name: 'DEBUG', value: '*' },
+              ],
+              ports: [{ containerPort: 4002 }],
+            },
+            {
+              name: 'processor',
+              image: joystreamAppsImage,
+              imagePullPolicy: 'IfNotPresent',
+              env: [
+                {
+                  name: 'INDEXER_ENDPOINT_URL',
+                  value: `http://localhost:${process.env.WARTHOG_APP_PORT}/graphql`,
+                },
+                { name: 'TYPEORM_HOST', value: 'postgres-db' },
+                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
+                { name: 'DEBUG', value: 'index-builder:*' },
+                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
+              ],
+              volumeMounts: [
+                {
+                  mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
+                  name: 'processor-volume',
+                  subPath: 'fileData',
+                },
+              ],
+              command: ['/bin/sh', '-c'],
+              args: ['cd query-node && yarn hydra-processor run -e ../.env'],
+            },
+            {
+              name: 'graphql-server',
+              image: joystreamAppsImage,
+              imagePullPolicy: 'IfNotPresent',
+              env: [
+                { name: 'DB_HOST', value: 'postgres-db' },
+                { name: 'DB_PASS', value: process.env.DB_PASS! },
+                { name: 'DB_USER', value: process.env.DB_USER! },
+                { name: 'DB_PORT', value: process.env.DB_PORT! },
+                { name: 'DB_NAME', value: process.env.DB_NAME! },
+                { name: 'GRAPHQL_SERVER_HOST', value: process.env.GRAPHQL_SERVER_HOST! },
+                { name: 'GRAPHQL_SERVER_PORT', value: process.env.GRAPHQL_SERVER_PORT! },
+              ],
+              ports: [{ name: 'graph-ql-port', containerPort: Number(process.env.GRAPHQL_SERVER_PORT!) }],
+              args: ['workspace', 'query-node-root', 'query-node:start:prod'],
+            },
+          ],
+          volumes: [
+            {
+              name: 'processor-volume',
+              configMap: {
+                name: defsConfig,
+              },
+            },
+            {
+              name: 'indexer-volume',
+              configMap: {
+                name: defsConfig,
+              },
+            },
+          ],
+        },
+      },
+    },
+  },
+  { ...resourceOptions, dependsOn: processorJob }
+)
+
+// Export the Deployment name
+export const deploymentName = deployment.metadata.name
+
+// Create a LoadBalancer Service for the NGINX Deployment
+const service = new k8s.core.v1.Service(
+  name,
+  {
+    metadata: {
+      labels: appLabels,
+      namespace: namespaceName,
+      name: 'query-node',
+    },
+    spec: {
+      ports: [
+        { name: 'port-1', port: 8081, targetPort: 'graph-ql-port' },
+        { name: 'port-2', port: 4000, targetPort: 4002 },
+      ],
+      selector: appLabels,
+    },
+  },
+  resourceOptions
+)
+
+// Export the Service name and public LoadBalancer Endpoint
+export const serviceName = service.metadata.name
+
+// When "done", this will print the public IP.
+// export let serviceHostname: pulumi.Output<string>
+
+// serviceHostname = service.status.loadBalancer.ingress[0].hostname
+const lbReady = config.get('isLoadBalancerReady') === 'true'
+const caddy = new CaddyServiceDeployment('caddy-proxy', { lbReady, namespaceName: namespaceName }, resourceOptions)
+
+export const endpoint1 = caddy.primaryEndpoint
+export const endpoint2 = caddy.secondaryEndpoint

+ 17 - 0
devops/infrastructure/query-node/package.json

@@ -0,0 +1,17 @@
+{
+  "name": "query-node",
+  "devDependencies": {
+    "@types/node": "^10.0.0"
+  },
+  "dependencies": {
+    "@pulumi/aws": "^4.0.0",
+    "@pulumi/awsx": "^0.30.0",
+    "@pulumi/eks": "^0.31.0",
+    "@pulumi/kubernetes": "^3.0.0",
+    "@pulumi/pulumi": "^3.0.0",
+    "@pulumi/docker": "^3.0.0",
+    "dotenv": "^10.0.0",
+    "mime": "^2.5.2",
+    "@types/mime": "^2.0.0"
+  }
+}

+ 73 - 0
devops/infrastructure/query-node/s3Helpers.ts

@@ -0,0 +1,73 @@
+import * as fs from 'fs'
+import * as mime from 'mime'
+
+import * as aws from '@pulumi/aws'
+import * as pulumi from '@pulumi/pulumi'
+
+interface FileObject {
+  name: string
+  path: string
+}
+
+export interface FileBucketOpts {
+  files: FileObject[]
+  policy?: (bucket: aws.s3.Bucket) => pulumi.Output<string>
+}
+
+export class FileBucket {
+  public readonly bucket: aws.s3.Bucket
+  public readonly files: { [key: string]: aws.s3.BucketObject }
+  public readonly policy: aws.s3.BucketPolicy | undefined
+
+  private readonly fileContents: { [key: string]: string }
+
+  constructor(bucketName: string, opts: FileBucketOpts) {
+    this.bucket = new aws.s3.Bucket(bucketName)
+    this.fileContents = {}
+    this.files = {}
+    for (const file of opts.files) {
+      this.fileContents[file.name] = fs.readFileSync(file.path).toString()
+      this.files[file.name] = new aws.s3.BucketObject(file.name, {
+        bucket: this.bucket,
+        source: new pulumi.asset.FileAsset(file.path),
+        contentType: mime.getType(file.path) || undefined,
+      })
+    }
+
+    if (opts.policy !== undefined) {
+      // Set the access policy for the bucket so all objects are readable
+      this.policy = new aws.s3.BucketPolicy(`bucketPolicy`, {
+        bucket: this.bucket.bucket,
+        // policy: this.bucket.bucket.apply(publicReadPolicyForBucket)
+        policy: opts.policy(this.bucket),
+      })
+    }
+  }
+
+  getUrlForFile(file: string): pulumi.Output<string> {
+    if (!(file in this.files)) {
+      throw new Error(`Bucket does not have file '${file}'`)
+    }
+
+    return pulumi.all([this.bucket.bucketDomainName, this.files[file].id]).apply(([domain, id]) => `${domain}/${id}`)
+  }
+}
+
+// Create an S3 Bucket Policy to allow public read of all objects in bucket
+export function publicReadPolicy(bucket: aws.s3.Bucket): pulumi.Output<string> {
+  return bucket.bucket.apply((bucketName) =>
+    JSON.stringify({
+      Version: '2012-10-17',
+      Statement: [
+        {
+          Effect: 'Allow',
+          Principal: '*',
+          Action: ['s3:GetObject'],
+          Resource: [
+            `arn:aws:s3:::${bucketName}/*`, // policy refers to bucket name explicitly
+          ],
+        },
+      ],
+    })
+  )
+}

+ 18 - 0
devops/infrastructure/query-node/tsconfig.json

@@ -0,0 +1,18 @@
+{
+    "compilerOptions": {
+        "strict": true,
+        "outDir": "bin",
+        "target": "es2016",
+        "module": "commonjs",
+        "moduleResolution": "node",
+        "sourceMap": true,
+        "experimentalDecorators": true,
+        "pretty": true,
+        "noFallthroughCasesInSwitch": true,
+        "noImplicitReturns": true,
+        "forceConsistentCasingInFileNames": true
+    },
+    "files": [
+        "index.ts"
+    ]
+}