Bläddra i källkod

Upload boostrap data files to S3, add init container

Anuj Bansal 3 år sedan
förälder
incheckning
c0c07296f9

+ 43 - 44
devops/infrastructure/query-node/index.ts

@@ -4,6 +4,9 @@ import * as docker from '@pulumi/docker'
 import * as pulumi from '@pulumi/pulumi'
 import { configMapFromFile } from './configMap'
 import * as k8s from '@pulumi/kubernetes'
+import * as s3Helpers from './s3Helpers'
+import { workers } from 'cluster'
+// import * as fs from 'fs'
 
 require('dotenv').config()
 
@@ -69,15 +72,6 @@ export const namespaceName = ns.metadata.name
 
 const appLabels = { appClass: name }
 
-const defsConfig = new configMapFromFile(
-  'defs-config',
-  {
-    filePath: '../../../types/augment/all/defs.json',
-    namespaceName: namespaceName,
-  },
-  resourceOptions
-).configName
-
 // Create a Deployment
 const databaseLabels = { app: 'postgres-db' }
 
@@ -210,23 +204,18 @@ const workersFilePath = config.get('workersFilePath')
   ? config.get('workersFilePath')!
   : '../../../query-node/mappings/bootstrap/data/workers.json'
 
-const membersConfig = new configMapFromFile(
-  'processor-config-members',
-  {
-    filePath: membersFilePath,
-    namespaceName: namespaceName,
-  },
-  resourceOptions
-).configName
+const dataBucket = new s3Helpers.FileBucket('bootstrap-data', {
+  files: [
+    { path: membersFilePath, name: 'members.json' },
+    { path: workersFilePath, name: 'workers.json' },
+  ],
+  policy: s3Helpers.publicReadPolicy,
+})
 
-const workersConfig = new configMapFromFile(
-  'processor-config-workers',
-  {
-    filePath: workersFilePath,
-    namespaceName: namespaceName,
-  },
-  resourceOptions
-).configName
+const membersUrl = dataBucket.getUrlForFile('members.json')
+const workersUrl = dataBucket.getUrlForFile('workers.json')
+
+const dataPath = '/joystream/query-node/mappings/bootstrap/data'
 
 const processorJob = new k8s.batch.v1.Job(
   'processor-migration',
@@ -238,6 +227,22 @@ const processorJob = new k8s.batch.v1.Job(
       backoffLimit: 0,
       template: {
         spec: {
+          initContainers: [
+            {
+              name: 'curl-init',
+              image: 'appropriate/curl',
+              command: ['/bin/sh', '-c'],
+              args: [
+                pulumi.interpolate`curl -o ${dataPath}/workers.json ${workersUrl}; curl -o ${dataPath}/members.json ${membersUrl}; ls -al ${dataPath};`,
+              ],
+              volumeMounts: [
+                {
+                  name: 'bootstrap-data',
+                  mountPath: dataPath,
+                },
+              ],
+            },
+          ],
           containers: [
             {
               name: 'processor-migration',
@@ -255,14 +260,8 @@ const processorJob = new k8s.batch.v1.Job(
               ],
               volumeMounts: [
                 {
-                  mountPath: '/joystream/query-node/mappings/bootstrap/data/members.json',
-                  name: 'processor-volume-members',
-                  subPath: 'fileData',
-                },
-                {
-                  mountPath: '/joystream/query-node/mappings/bootstrap/data/workers.json',
-                  name: 'processor-volume-workers',
-                  subPath: 'fileData',
+                  name: 'bootstrap-data',
+                  mountPath: dataPath,
                 },
               ],
               args: ['workspace', 'query-node-root', 'processor:bootstrap'],
@@ -271,16 +270,8 @@ const processorJob = new k8s.batch.v1.Job(
           restartPolicy: 'Never',
           volumes: [
             {
-              name: 'processor-volume-members',
-              configMap: {
-                name: membersConfig,
-              },
-            },
-            {
-              name: 'processor-volume-workers',
-              configMap: {
-                name: workersConfig,
-              },
+              name: 'bootstrap-data',
+              emptyDir: {},
             },
           ],
         },
@@ -288,9 +279,17 @@ const processorJob = new k8s.batch.v1.Job(
     },
   },
   { ...resourceOptions, dependsOn: migrationJob }
-  // { provider: provider }
 )
 
+const defsConfig = new configMapFromFile(
+  'defs-config',
+  {
+    filePath: '../../../types/augment/all/defs.json',
+    namespaceName: namespaceName,
+  },
+  resourceOptions
+).configName
+
 const deployment = new k8s.apps.v1.Deployment(
   name,
   {

+ 3 - 1
devops/infrastructure/query-node/package.json

@@ -10,6 +10,8 @@
     "@pulumi/kubernetes": "^3.0.0",
     "@pulumi/pulumi": "^3.0.0",
     "@pulumi/docker": "^3.0.0",
-    "dotenv": "^10.0.0"
+    "dotenv": "^10.0.0",
+    "mime": "^2.5.2",
+    "@types/mime": "^2.0.0"
   }
 }

+ 73 - 0
devops/infrastructure/query-node/s3Helpers.ts

@@ -0,0 +1,73 @@
+import * as fs from 'fs'
+import * as mime from 'mime'
+
+import * as aws from '@pulumi/aws'
+import * as pulumi from '@pulumi/pulumi'
+
+interface FileObject {
+  name: string
+  path: string
+}
+
+export interface FileBucketOpts {
+  files: FileObject[]
+  policy?: (bucket: aws.s3.Bucket) => pulumi.Output<string>
+}
+
+export class FileBucket {
+  public readonly bucket: aws.s3.Bucket
+  public readonly files: { [key: string]: aws.s3.BucketObject }
+  public readonly policy: aws.s3.BucketPolicy | undefined
+
+  private readonly fileContents: { [key: string]: string }
+
+  constructor(bucketName: string, opts: FileBucketOpts) {
+    this.bucket = new aws.s3.Bucket(bucketName)
+    this.fileContents = {}
+    this.files = {}
+    for (const file of opts.files) {
+      this.fileContents[file.name] = fs.readFileSync(file.path).toString()
+      this.files[file.name] = new aws.s3.BucketObject(file.name, {
+        bucket: this.bucket,
+        source: new pulumi.asset.FileAsset(file.path),
+        contentType: mime.getType(file.path) || undefined,
+      })
+    }
+
+    if (opts.policy !== undefined) {
+      // Set the access policy for the bucket so all objects are readable
+      this.policy = new aws.s3.BucketPolicy(`bucketPolicy`, {
+        bucket: this.bucket.bucket,
+        // policy: this.bucket.bucket.apply(publicReadPolicyForBucket)
+        policy: opts.policy(this.bucket),
+      })
+    }
+  }
+
+  getUrlForFile(file: string): pulumi.Output<string> {
+    if (!(file in this.files)) {
+      throw new Error(`Bucket does not have file '${file}'`)
+    }
+
+    return pulumi.all([this.bucket.bucketDomainName, this.files[file].id]).apply(([domain, id]) => `${domain}/${id}`)
+  }
+}
+
+// Create an S3 Bucket Policy to allow public read of all objects in bucket
+export function publicReadPolicy(bucket: aws.s3.Bucket): pulumi.Output<string> {
+  return bucket.bucket.apply((bucketName) =>
+    JSON.stringify({
+      Version: '2012-10-17',
+      Statement: [
+        {
+          Effect: 'Allow',
+          Principal: '*',
+          Action: ['s3:GetObject'],
+          Resource: [
+            `arn:aws:s3:::${bucketName}/*`, // policy refers to bucket name explicitly
+          ],
+        },
+      ],
+    })
+  )
+}