Forráskód Böngészése

Merge branch 'master-rollback-nightly-2021-02-20' into giza-update-master

Mokhtar Naamani 3 éve
szülő
commit
3ffe88f608
100 módosított fájl, 1459 hozzáadás és 432 törlés
  1. 58 47
      .github/workflows/create-ami.yml
  2. 13 4
      .github/workflows/joystream-node-docker.yml
  3. 2 0
      .github/workflows/network-tests.yml
  4. 2 0
      .github/workflows/run-network-tests.yml
  5. 1 1
      README.md
  6. 0 0
      devops/aws/.gitignore
  7. 8 2
      devops/aws/README.md
  8. 0 0
      devops/aws/ansible.cfg
  9. 45 0
      devops/aws/build-arm64-playbook.yml
  10. 0 0
      devops/aws/build-code.yml
  11. 0 0
      devops/aws/chain-spec-pioneer.yml
  12. 13 0
      devops/aws/cloudformation/infrastructure.yml
  13. 6 0
      devops/aws/cloudformation/single-instance-docker.yml
  14. 3 0
      devops/aws/cloudformation/single-instance.yml
  15. 0 0
      devops/aws/common.sh
  16. 39 0
      devops/aws/create-joystream-node-ami-playbook.yml
  17. 5 9
      devops/aws/deploy-infra.sample.cfg
  18. 3 2
      devops/aws/deploy-infra.sh
  19. 0 0
      devops/aws/deploy-single-node-playbook.yml
  20. 18 0
      devops/aws/deploy-single-node.sample.cfg
  21. 3 3
      devops/aws/deploy-single-node.sh
  22. 1 1
      devops/aws/destroy-infra.sh
  23. 0 0
      devops/aws/group_vars/all
  24. 0 0
      devops/aws/library/json_modify.py
  25. 0 0
      devops/aws/requirements.yml
  26. 0 0
      devops/aws/roles/admin/tasks/deploy-pioneer.yml
  27. 3 0
      devops/aws/roles/admin/tasks/main.yml
  28. 96 0
      devops/aws/roles/common/tasks/chain-spec-node-keys.yml
  29. 0 0
      devops/aws/roles/common/tasks/get-code-git.yml
  30. 0 0
      devops/aws/roles/common/tasks/get-code-local.yml
  31. 7 4
      devops/aws/roles/common/tasks/run-setup-build.yml
  32. 0 0
      devops/aws/roles/node/templates/joystream-node.service.j2
  33. 0 0
      devops/aws/roles/rpc/tasks/main.yml
  34. 0 0
      devops/aws/roles/rpc/templates/Caddyfile.j2
  35. 0 0
      devops/aws/roles/rpc/templates/joystream-node.service.j2
  36. 0 0
      devops/aws/roles/validators/tasks/main.yml
  37. 0 0
      devops/aws/roles/validators/templates/joystream-node.service.j2
  38. 0 0
      devops/aws/setup-admin.yml
  39. 3 3
      devops/git-hooks/pre-push
  40. 0 50
      devops/infrastructure/build-arm64-playbook.yml
  41. 0 45
      devops/infrastructure/github-action-playbook.yml
  42. 0 76
      devops/infrastructure/roles/common/tasks/chain-spec-node-keys.yml
  43. 1 1
      devops/kubernetes/node-network/.gitignore
  44. 0 0
      devops/kubernetes/node-network/Pulumi.yaml
  45. 0 0
      devops/kubernetes/node-network/README.md
  46. 0 0
      devops/kubernetes/node-network/configMap.ts
  47. 0 0
      devops/kubernetes/node-network/index.ts
  48. 0 0
      devops/kubernetes/node-network/json_modify.py
  49. 0 0
      devops/kubernetes/node-network/nfsVolume.ts
  50. 0 0
      devops/kubernetes/node-network/package.json
  51. 0 0
      devops/kubernetes/node-network/tsconfig.json
  52. 0 0
      devops/kubernetes/node-network/utils.ts
  53. 0 0
      devops/kubernetes/node-network/validator.ts
  54. 0 0
      devops/kubernetes/pulumi-common/caddy.ts
  55. 0 0
      devops/kubernetes/pulumi-common/index.ts
  56. 0 0
      devops/kubernetes/pulumi-common/package.json
  57. 0 0
      devops/kubernetes/pulumi-common/tsconfig.json
  58. 1 1
      devops/kubernetes/query-node/.gitignore
  59. 3 0
      devops/kubernetes/query-node/Pulumi.yaml
  60. 2 1
      devops/kubernetes/query-node/README.md
  61. 0 0
      devops/kubernetes/query-node/configMap.ts
  62. 92 57
      devops/kubernetes/query-node/index.ts
  63. 0 0
      devops/kubernetes/query-node/package.json
  64. 0 0
      devops/kubernetes/query-node/s3Helpers.ts
  65. 0 0
      devops/kubernetes/query-node/tsconfig.json
  66. 1 1
      devops/kubernetes/storage-node/.gitignore
  67. 0 0
      devops/kubernetes/storage-node/Pulumi.yaml
  68. 2 2
      devops/kubernetes/storage-node/README.md
  69. 56 28
      devops/kubernetes/storage-node/index.ts
  70. 1 0
      devops/kubernetes/storage-node/package.json
  71. 0 0
      devops/kubernetes/storage-node/tsconfig.json
  72. 5 5
      joystream-node-armv7.Dockerfile
  73. 5 5
      joystream-node.Dockerfile
  74. 2 2
      node/README.md
  75. 1 1
      pioneer/packages/joy-proposals/src/Proposal/Body.tsx
  76. 2 1
      pioneer/packages/joy-proposals/src/Proposal/ProposalDetails.tsx
  77. 2 2
      pioneer/packages/joy-utils/src/transport/proposals.ts
  78. 284 0
      query-node/mappings/src/content/channel.ts
  79. 280 0
      query-node/mappings/src/storage.ts
  80. 2 2
      scripts/cargo-build.sh
  81. 2 2
      scripts/cargo-tests-with-networking.sh
  82. 1 1
      scripts/raspberry-cross-build.sh
  83. 4 4
      scripts/run-dev-chain.sh
  84. 4 4
      setup.sh
  85. 7 0
      storage-node/packages/helios/README.md
  86. 5 0
      tests/network-tests/.env
  87. 2 0
      tests/network-tests/.gitignore
  88. 13 13
      tests/network-tests/run-tests.sh
  89. 228 34
      tests/network-tests/src/Api.ts
  90. 30 4
      tests/network-tests/src/Scenario.ts
  91. 25 0
      tests/network-tests/src/fixtures/councilAssignment.ts
  92. 9 4
      tests/network-tests/src/fixtures/membershipModule.ts
  93. 1 1
      tests/network-tests/src/fixtures/proposalsModule.ts
  94. 2 2
      tests/network-tests/src/fixtures/workingGroupModule.ts
  95. 48 0
      tests/network-tests/src/flows/council/assign.ts
  96. 2 2
      tests/network-tests/src/flows/council/setup.ts
  97. 2 2
      tests/network-tests/src/flows/membership/creatingMemberships.ts
  98. 1 1
      tests/network-tests/src/flows/proposals/manageLeaderRole.ts
  99. 1 1
      tests/network-tests/src/flows/proposals/updateRuntime.ts
  100. 1 1
      tests/network-tests/src/flows/workingGroup/leaderSetup.ts

+ 58 - 47
.github/workflows/create-ami.yml

@@ -1,3 +1,5 @@
+# Creates an AWS AMI (system image) with compiled joystream-node and subkey
+# 
 name: Create AWS AMI
 
 on:
@@ -8,52 +10,61 @@ jobs:
     name: Build the code and run setup
     runs-on: ubuntu-latest
     env:
-      STACK_NAME: joystream-github-action-${{ github.run_number }}
+      STACK_NAME: create-joystream-node-ami-ga-${{ github.run_number }}
       KEY_NAME: joystream-github-action-key
     steps:
-    - name: Extract branch name
-      shell: bash
-      run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
-      id: extract_branch
-
-    - name: Set AMI Name environment variable
-      shell: bash
-      run: echo "ami_name=joystream-${{ steps.extract_branch.outputs.branch }}-${{ github.run_number }}" >> $GITHUB_ENV
-      id: ami_name
-
-    - name: Checkout
-      uses: actions/checkout@v2
-
-    - name: Configure AWS credentials
-      uses: aws-actions/configure-aws-credentials@v1
-      with:
-        aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
-        aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-        aws-region: us-east-1
-
-    - name: Deploy to AWS CloudFormation
-      uses: aws-actions/aws-cloudformation-github-deploy@v1
-      id: deploy_stack
-      with:
-        name: ${{ env.STACK_NAME }}
-        template: devops/infrastructure/single-instance.yml
-        no-fail-on-empty-changeset: "1"
-        parameter-overrides: "KeyName=${{ env.KEY_NAME }}"
-
-    - name: Install Ansible dependencies
-      run: pipx inject ansible-core boto3 botocore
-
-    - name: Run playbook
-      uses: dawidd6/action-ansible-playbook@v2
-      with:
-        playbook: github-action-playbook.yml
-        directory: devops/infrastructure
-        requirements: requirements.yml
-        key: ${{ secrets.SSH_PRIVATE_KEY }}
-        inventory: |
-          [all]
-          ${{ steps.deploy_stack.outputs.PublicIp }}
-        options: |
-          --extra-vars "git_repo=https://github.com/${{ github.repository }} \
-                        branch_name=${{ steps.extract_branch.outputs.branch }} instance_id=${{ steps.deploy_stack.outputs.InstanceId }}
-                        stack_name=${{ env.STACK_NAME }} ami_name=${{ env.ami_name }}"
+      - name: Extract branch name
+        shell: bash
+        run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
+        id: extract_branch
+
+      - name: Set AMI Name environment variable
+        shell: bash
+        run: echo "ami_name=joystream-node-${{ steps.extract_branch.outputs.branch }}-${{ github.run_number }}" >> $GITHUB_ENV
+        id: ami_name
+
+      - name: Checkout
+        uses: actions/checkout@v2
+
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v1
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: us-east-1
+
+      - name: Deploy to AWS CloudFormation
+        uses: aws-actions/aws-cloudformation-github-deploy@v1
+        id: deploy_stack
+        with:
+          name: ${{ env.STACK_NAME }}
+          template: devops/aws/cloudformation/single-instance.yml
+          no-fail-on-empty-changeset: '1'
+          parameter-overrides: 'KeyName=${{ env.KEY_NAME }}'
+
+      - name: Install Ansible dependencies
+        run: pipx inject ansible-core boto3 botocore
+
+      - name: Run playbook
+        uses: dawidd6/action-ansible-playbook@v2
+        with:
+          playbook: create-joystream-node-ami-playbook.yml
+          directory: devops/aws
+          requirements: requirements.yml
+          key: ${{ secrets.SSH_PRIVATE_KEY }}
+          inventory: |
+            [all]
+            ${{ steps.deploy_stack.outputs.PublicIp }}
+          options: |
+            --extra-vars "git_repo=https://github.com/${{ github.repository }} \
+                          branch_name=${{ steps.extract_branch.outputs.branch }} instance_id=${{ steps.deploy_stack.outputs.InstanceId }}
+                          ami_name=${{ env.ami_name }}"
+
+      - name: Delete CloudFormation Stack
+        if: always()
+        continue-on-error: true
+        run: |
+          echo "Deleting ${{ env.STACK_NAME }} stack"
+          aws cloudformation delete-stack --stack-name ${{ env.STACK_NAME }}
+          echo "Waiting for ${{ env.STACK_NAME }} to be deleted..."
+          aws cloudformation wait stack-delete-complete --stack-name ${{ env.STACK_NAME }}

+ 13 - 4
.github/workflows/joystream-node-docker.yml

@@ -71,7 +71,7 @@ jobs:
             platform_tag: 'arm'
             file: 'joystream-node-armv7.Dockerfile'
     env:
-      STACK_NAME: joystream-ga-docker-${{ github.run_number }}-${{ matrix.platform_tag }}
+      STACK_NAME: build-joystream-node-docker-ga-${{ github.run_number }}-${{ matrix.platform_tag }}
     steps:
       - name: Extract branch name
         shell: bash
@@ -120,7 +120,7 @@ jobs:
         id: deploy_stack
         with:
           name: ${{ env.STACK_NAME }}
-          template: devops/infrastructure/single-instance-docker.yml
+          template: devops/aws/cloudformation/single-instance-docker.yml
           no-fail-on-empty-changeset: '1'
           parameter-overrides: 'KeyName=${{ env.KEY_NAME }},EC2AMI=ami-00d1ab6b335f217cf,EC2InstanceType=t4g.xlarge'
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
@@ -129,7 +129,7 @@ jobs:
         uses: dawidd6/action-ansible-playbook@v2
         with:
           playbook: build-arm64-playbook.yml
-          directory: devops/infrastructure
+          directory: devops/aws
           requirements: requirements.yml
           key: ${{ secrets.SSH_PRIVATE_KEY }}
           inventory: |
@@ -142,9 +142,18 @@ jobs:
                           docker_password=${{ secrets.DOCKERHUB_PASSWORD }} \
                           tag_name=${{ steps.compute_shasum.outputs.shasum }}-${{ matrix.platform_tag }} \
                           repository=${{ env.REPOSITORY }} dockerfile=${{ matrix.file }} \
-                          stack_name=${{ env.STACK_NAME }} platform=${{ matrix.platform }}"
+                          platform=${{ matrix.platform }}"
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
 
+      - name: Delete CloudFormation Stack
+        if: always()
+        continue-on-error: true
+        run: |
+          echo "Deleting ${{ env.STACK_NAME }} stack"
+          aws cloudformation delete-stack --stack-name ${{ env.STACK_NAME }}
+          echo "Waiting for ${{ env.STACK_NAME }} to be deleted..."
+          aws cloudformation wait stack-delete-complete --stack-name ${{ env.STACK_NAME }}
+
   push-manifest:
     name: Create manifest using both the arch images
     needs: [push-amd64, push-arm]

+ 2 - 0
.github/workflows/network-tests.yml

@@ -18,6 +18,7 @@ jobs:
       run: |
         yarn install --frozen-lockfile
         yarn workspace @joystream/types build
+        yarn workspace @joystream/content-metadata-protobuf build:ts
         yarn workspace network-tests checks --quiet
 
   network_build_osx:
@@ -36,4 +37,5 @@ jobs:
       run: |
         yarn install --frozen-lockfile --network-timeout 120000
         yarn workspace @joystream/types build
+        yarn workspace @joystream/content-metadata-protobuf build:ts
         yarn workspace network-tests checks --quiet

+ 2 - 0
.github/workflows/run-network-tests.yml

@@ -100,6 +100,7 @@ jobs:
         run: |
           yarn install --frozen-lockfile
           yarn workspace @joystream/types build
+          yarn workspace @joystream/content-metadata-protobuf build:ts
       - name: Ensure tests are runnable
         run: yarn workspace network-tests build
       - name: Execute network tests
@@ -126,6 +127,7 @@ jobs:
         run: |
           yarn install --frozen-lockfile
           yarn workspace @joystream/types build
+          yarn workspace @joystream/content-metadata-protobuf build:ts
       - name: Ensure tests are runnable
         run: yarn workspace network-tests build
       - name: Execute network tests

+ 1 - 1
README.md

@@ -89,7 +89,7 @@ You can also run your our own joystream-node:
 
 ```sh
 git checkout master
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo build --release
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 build --release
 ./target/release/joystream-node -- --pruning archive --chain testnets/joy-testnet-5.json
 ```
 

+ 0 - 0
devops/infrastructure/.gitignore → devops/aws/.gitignore


+ 8 - 2
devops/infrastructure/README.md → devops/aws/README.md

@@ -26,10 +26,16 @@ On Mac run the command:
 Follow [the official installation guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for your system.
 
 # How to run
-Copy and edit the file `deploy-config.sample.cfg` and update parameters like AWS_KEY_PAIR_NAME, KEY_PATH
+Copy and edit the file `deploy-infra.sample.cfg` and update parameters like AWS_KEY_PAIR_NAME, KEY_PATH
 Run the `deploy-infra.sh` script to deploy the infrastructure
 
 ```
-cd devops/infrastructure
+cd devops/aws
 ./deploy-infra.sh your-deploy-config.cfg
 ```
+
+# To tear down a network
+
+```
+./destroy-infra.sh your-deploy-config.cfg
+```

+ 0 - 0
devops/infrastructure/ansible.cfg → devops/aws/ansible.cfg


+ 45 - 0
devops/aws/build-arm64-playbook.yml

@@ -0,0 +1,45 @@
+---
+# Setup joystream code, build docker image
+
+- name: Build image and push to docker hub
+  hosts: all
+
+  tasks:
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
+
+    - name: Install Docker Module for Python
+      pip:
+        name: docker
+
+    - name: Log into DockerHub
+      community.docker.docker_login:
+        username: '{{ docker_username }}'
+        password: '{{ docker_password }}'
+
+    - name: Build an image and push it to a private repo
+      community.docker.docker_image:
+        build:
+          path: ./joystream
+          dockerfile: '{{ dockerfile }}'
+          platform: '{{ platform }}'
+        name: '{{ repository }}'
+        tag: '{{ tag_name }}'
+        push: yes
+        source: build
+      # Run in async fashion for max duration of 2 hours
+      async: 7200
+      poll: 0
+      register: build_result
+
+    - name: Check on build async task
+      async_status:
+        jid: '{{ build_result.ansible_job_id }}'
+      register: job_result
+      until: job_result.finished
+      # Max number of times to check for status
+      retries: 72
+      # Check for the status every 100s
+      delay: 100

+ 0 - 0
devops/infrastructure/build-code.yml → devops/aws/build-code.yml


+ 0 - 0
devops/infrastructure/chain-spec-pioneer.yml → devops/aws/chain-spec-pioneer.yml


+ 13 - 0
devops/infrastructure/infrastructure.yml → devops/aws/cloudformation/infrastructure.yml

@@ -1,3 +1,9 @@
+# Deploy inftrastructure required to run a new joystream chain.
+# This is comprised of:
+#   - N validators
+#   - One RPC node
+#   - s3 bucket with a build of Pionner
+
 AWSTemplateFormatVersion: 2010-09-09
 
 Parameters:
@@ -73,6 +79,10 @@ Resources:
           FromPort: 443
           ToPort: 443
           CidrIp: 0.0.0.0/0
+        - IpProtocol: tcp
+          FromPort: 80
+          ToPort: 80
+          CidrIp: 0.0.0.0/0
         - IpProtocol: tcp
           FromPort: 22
           ToPort: 22
@@ -112,6 +122,9 @@ Resources:
             # Update all packages
             apt-get update -y
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             apt-get upgrade -y
 

+ 6 - 0
devops/infrastructure/single-instance-docker.yml → devops/aws/cloudformation/single-instance-docker.yml

@@ -1,3 +1,6 @@
+# Deploys and EC2 node with docker tools suitable for
+# building joystream node docker images
+
 AWSTemplateFormatVersion: 2010-09-09
 
 Parameters:
@@ -58,6 +61,9 @@ Resources:
             # Update all packages
             apt-get update -y
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             apt-get upgrade -y
 

+ 3 - 0
devops/infrastructure/single-instance.yml → devops/aws/cloudformation/single-instance.yml

@@ -59,6 +59,9 @@ Resources:
             # Update all packages
             apt-get update -y
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             apt-get upgrade -y
 

+ 0 - 0
devops/infrastructure/common.sh → devops/aws/common.sh


+ 39 - 0
devops/aws/create-joystream-node-ami-playbook.yml

@@ -0,0 +1,39 @@
+---
+# Setup joystream code, build and Create AMI
+
+- name: Setup instance
+  hosts: all
+
+  tasks:
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
+
+    - name: Run setup and build
+      include_role:
+        name: common
+        tasks_from: run-setup-build
+
+    - name: Install subkey
+      include_role:
+        name: admin
+        tasks_from: main
+
+    - name: Basic AMI Creation
+      amazon.aws.ec2_ami:
+        instance_id: '{{ instance_id }}'
+        wait: yes
+        # How long before wait gives up, in seconds
+        wait_timeout: 3600
+        name: '{{ ami_name }}'
+        launch_permissions:
+          group_names: ['all']
+        tags:
+          Name: '{{ ami_name }}'
+      register: ami_data
+      delegate_to: localhost
+
+    - name: Print AMI ID
+      debug:
+        msg: 'AMI ID is: {{ ami_data.image_id }}'

+ 5 - 9
devops/infrastructure/deploy-config.sample.cfg → devops/aws/deploy-infra.sample.cfg

@@ -1,6 +1,6 @@
 #### PARAMETERS USED BY AWS
 
-STACK_NAME=joystream-node
+STACK_NAME=joystream-network
 REGION=us-east-1
 CLI_PROFILE=joystream-user
 KEY_PATH="/Users/joystream/Joystream/joystream-key.pem"
@@ -23,19 +23,15 @@ INVENTORY_PATH="$DATA_PATH/inventory"
 
 NUMBER_OF_VALIDATORS=2
 
-## Used for Deploying a new node
-DATE_TIME=$(date +"%d-%b-%Y-%H-%M-%S")
-
-SINGLE_NODE_STACK_NAME="new-node-$DATE_TIME"
-
-BINARY_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joystream-node-5.1.0-9d9e77751-x86_64-linux-gnu.tar.gz"
-CHAIN_SPEC_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joy-testnet-5.json"
-
 #### PARAMETERS USED BY ANSIBLE
 
 LOCAL_CODE_PATH="~/Joystream/joystream"
 NETWORK_SUFFIX=7891
 
+DEPLOYMENT_TYPE=live
+INITIAL_MEMBERS_PATH=""
+INITIAL_BALANCES_PATH=""
+
 GIT_REPO="https://github.com/Joystream/joystream.git"
 BRANCH_NAME=sumer
 

+ 3 - 2
devops/infrastructure/deploy-infra.sh → devops/aws/deploy-infra.sh

@@ -29,7 +29,7 @@ aws cloudformation deploy \
   --region $REGION \
   --profile $CLI_PROFILE \
   --stack-name $NEW_STACK_NAME \
-  --template-file infrastructure.yml \
+  --template-file cloudformation/infrastructure.yml \
   --no-fail-on-empty-changeset \
   --capabilities CAPABILITY_NAMED_IAM \
   --parameter-overrides \
@@ -84,7 +84,8 @@ if [ $? -eq 0 ]; then
   echo -e "\n\n=========== Configure and start new validators, rpc node and pioneer ==========="
   ansible-playbook -i $INVENTORY_PATH --private-key $KEY_PATH chain-spec-pioneer.yml \
     --extra-vars "local_dir=$LOCAL_CODE_PATH network_suffix=$NETWORK_SUFFIX
-                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS"
+                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS
+                  deployment_type=$DEPLOYMENT_TYPE initial_balances_file=$INITIAL_BALANCES_PATH initial_members_file=$INITIAL_MEMBERS_PATH"
 
   echo -e "\n\n Pioneer URL: https://$DOMAIN_NAME"
 fi

+ 0 - 0
devops/infrastructure/single-node-playbook.yml → devops/aws/deploy-single-node-playbook.yml


+ 18 - 0
devops/aws/deploy-single-node.sample.cfg

@@ -0,0 +1,18 @@
+#### PARAMETERS USED BY AWS
+
+REGION=us-east-1
+CLI_PROFILE=joystream-user
+KEY_PATH="/Users/joystream/Joystream/joystream-key.pem"
+AWS_KEY_PAIR_NAME="joystream-key"
+
+DEFAULT_EC2_INSTANCE_TYPE=t2.micro
+
+ACCOUNT_ID=$(aws sts get-caller-identity --profile $CLI_PROFILE --query Account --output text)
+
+## Used for Deploying a new node
+DATE_TIME=$(date +"%d-%b-%Y-%H-%M-%S")
+
+SINGLE_NODE_STACK_NAME="joystream-node-$DATE_TIME"
+
+BINARY_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joystream-node-5.1.0-9d9e77751-x86_64-linux-gnu.tar.gz"
+CHAIN_SPEC_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joy-testnet-5.json"

+ 3 - 3
devops/infrastructure/deploy-single-node.sh → devops/aws/deploy-single-node.sh

@@ -23,13 +23,13 @@ if [ ! -f "$KEY_PATH" ]; then
     exit 1
 fi
 
-# # Deploy the CloudFormation template
+# Deploy the CloudFormation template
 echo -e "\n\n=========== Deploying single node ==========="
 aws cloudformation deploy \
   --region $REGION \
   --profile $CLI_PROFILE \
   --stack-name $SINGLE_NODE_STACK_NAME \
-  --template-file single-instance.yml \
+  --template-file cloudformation/single-instance.yml \
   --no-fail-on-empty-changeset \
   --capabilities CAPABILITY_NAMED_IAM \
   --parameter-overrides \
@@ -46,6 +46,6 @@ if [ $? -eq 0 ]; then
   echo -e "New Node Public IP: $SERVER_IP"
 
   echo -e "\n\n=========== Configuring node ==========="
-  ansible-playbook -i $SERVER_IP, --private-key $KEY_PATH single-node-playbook.yml \
+  ansible-playbook -i $SERVER_IP, --private-key $KEY_PATH deploy-single-node-playbook.yml \
     --extra-vars "binary_file=$BINARY_FILE chain_spec_file=$CHAIN_SPEC_FILE"
 fi

+ 1 - 1
devops/infrastructure/delete-stack.sh → devops/aws/destroy-infra.sh

@@ -6,7 +6,7 @@ source common.sh
 
 if [ -z "$1" ]; then
   echo "ERROR: Configuration file not passed"
-  echo "Please use ./delete-stack.sh PATH/TO/CONFIG to run this script"
+  echo "Please use ./destroy-infra.sh PATH/TO/CONFIG to run this script"
   exit 1
 else
   echo "Using $1 file for config"

+ 0 - 0
devops/infrastructure/group_vars/all → devops/aws/group_vars/all


+ 0 - 0
devops/infrastructure/library/json_modify.py → devops/aws/library/json_modify.py


+ 0 - 0
devops/infrastructure/requirements.yml → devops/aws/requirements.yml


+ 0 - 0
devops/infrastructure/roles/admin/tasks/deploy-pioneer.yml → devops/aws/roles/admin/tasks/deploy-pioneer.yml


+ 3 - 0
devops/infrastructure/roles/admin/tasks/main.yml → devops/aws/roles/admin/tasks/main.yml

@@ -16,6 +16,7 @@
 
 - name: Install subkey
   shell: cargo install --force subkey --git https://github.com/paritytech/substrate --version 2.0.1 --locked
+  # Run in async fashion for max duration of 1 hr
   async: 3600
   poll: 0
   register: install_result
@@ -25,5 +26,7 @@
     jid: '{{ install_result.ansible_job_id }}'
   register: job_result
   until: job_result.finished
+  # Max number of times to check for status
   retries: 36
+  # Check for the status every 100s
   delay: 100

+ 96 - 0
devops/aws/roles/common/tasks/chain-spec-node-keys.yml

@@ -0,0 +1,96 @@
+---
+# Create chain spec files and keys and copy to all the servers
+
+- name: Debug to test variable
+  debug:
+    msg: 'Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}'
+  run_once: true
+
+- name: Copying initial members file to the server
+  copy:
+    src: '{{ initial_members_file }}'
+    dest: '{{ admin_code_dir }}/initial-members.json'
+  when: initial_members_file is defined and initial_members_file|length > 0
+  run_once: true
+
+- name: Copying initial balances file to the server
+  copy:
+    src: '{{ initial_balances_file }}'
+    dest: '{{ admin_code_dir }}/initial-balances.json'
+  when: initial_balances_file is defined and initial_balances_file|length > 0
+  run_once: true
+
+- name: Run chain-spec-builder to generate chainspec.json file (with initial data)
+  shell: >
+    {{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }}
+    --chain-spec-path {{ chain_spec_path }}
+    --endowed 1 --keystore-path {{ data_path }}
+    {% if deployment_type is defined and deployment_type|length > 0 %}--deployment {{ deployment_type }}{% endif %}
+    {% if initial_members_file is defined and initial_members_file|length > 0 %}--initial-balances-path {{ admin_code_dir }}/initial-balances.json{% endif %}
+    {% if initial_balances_file is defined and initial_balances_file|length > 0 %}--initial-members-path {{ admin_code_dir }}/initial-members.json{% endif %}
+  register: chain_spec_output
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Run subkey to generate node keys
+  shell: subkey generate-node-key
+  delegate_to: '{{ local_or_admin }}'
+  register: subkey_output
+
+- name: Print to stdout
+  debug:
+    msg:
+      - 'Public Key: {{ subkey_output.stderr }}'
+      - 'Private Key: {{ subkey_output.stdout }}'
+
+- name: Print to stdout chain spec
+  debug: var=chain_spec_output.stdout
+  run_once: true
+
+- name: Save output of chain spec to local file
+  copy:
+    content: '{{ chain_spec_output.stdout | regex_replace("\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]", "") }}'
+    dest: '{{ data_path }}/chain_spec_output.txt'
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Change chain spec name, id, protocolId
+  json_modify:
+    chain_spec_path: '{{ chain_spec_path }}'
+    prefix: '{{ network_suffix }}'
+    all_nodes: '{{ hostvars }}'
+  delegate_to: '{{ local_or_admin }}'
+  register: result
+  run_once: true
+
+- name: Print output of modified chainspec
+  debug:
+    var: result.result
+  run_once: true
+
+- name: Run build-spec to generate raw chainspec file
+  shell: '{{ admin_code_dir }}/target/release/joystream-node build-spec --chain {{ chain_spec_path }} --raw > {{ raw_chain_spec_path }}'
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Copying chain spec files to localhost
+  synchronize:
+    src: '/home/ubuntu/{{ data_path }}/'
+    dest: '{{ data_path }}'
+    mode: pull
+  run_once: true
+  when: run_on_admin_server|bool
+
+- name: Copy joystream-node binary to localhost
+  fetch:
+    src: '{{ admin_code_dir }}/target/release/joystream-node'
+    dest: '{{ data_path }}/joystream-node'
+    flat: yes
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+  when: run_on_admin_server|bool
+
+- name: Copying raw chain spec file to all servers
+  copy:
+    src: '{{ raw_chain_spec_path }}'
+    dest: '{{ remote_chain_spec_path }}'

+ 0 - 0
devops/infrastructure/roles/common/tasks/get-code-git.yml → devops/aws/roles/common/tasks/get-code-git.yml


+ 0 - 0
devops/infrastructure/roles/common/tasks/get-code-local.yml → devops/aws/roles/common/tasks/get-code-local.yml


+ 7 - 4
devops/infrastructure/roles/common/tasks/run-setup-build.yml → devops/aws/roles/common/tasks/run-setup-build.yml

@@ -2,25 +2,28 @@
 # Run setup and build code
 
 - name: Creat bash profile file
-  command: "touch /home/ubuntu/.bash_profile"
+  command: 'touch /home/ubuntu/.bash_profile'
 
 - name: Run setup script
   command: ./setup.sh
   args:
-    chdir: "{{ remote_code_path }}"
+    chdir: '{{ remote_code_path }}'
 
 - name: Build joystream node
   shell: . ~/.bash_profile && yarn cargo-build
   args:
-    chdir: "{{ remote_code_path }}"
+    chdir: '{{ remote_code_path }}'
+  # Run in async fashion for max duration of 1 hr
   async: 3600
   poll: 0
   register: build_result
 
 - name: Check on build async task
   async_status:
-    jid: "{{ build_result.ansible_job_id }}"
+    jid: '{{ build_result.ansible_job_id }}'
   register: job_result
   until: job_result.finished
+  # Max number of times to check for status
   retries: 36
+  # Check for the status every 100s
   delay: 100

+ 0 - 0
devops/infrastructure/roles/node/templates/joystream-node.service.j2 → devops/aws/roles/node/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/roles/rpc/tasks/main.yml → devops/aws/roles/rpc/tasks/main.yml


+ 0 - 0
devops/infrastructure/roles/rpc/templates/Caddyfile.j2 → devops/aws/roles/rpc/templates/Caddyfile.j2


+ 0 - 0
devops/infrastructure/roles/rpc/templates/joystream-node.service.j2 → devops/aws/roles/rpc/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/roles/validators/tasks/main.yml → devops/aws/roles/validators/tasks/main.yml


+ 0 - 0
devops/infrastructure/roles/validators/templates/joystream-node.service.j2 → devops/aws/roles/validators/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/setup-admin.yml → devops/aws/setup-admin.yml


+ 3 - 3
devops/git-hooks/pre-push

@@ -1,13 +1,13 @@
 #!/bin/sh
 set -e
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 echo 'running clippy (rust linter)'
 # When custom build.rs triggers wasm-build-runner-impl to build we get error:
 # "Rust WASM toolchain not installed, please install it!"
 # So we skip building the WASM binary by setting BUILD_DUMMY_WASM_BINARY=1
-BUILD_DUMMY_WASM_BINARY=1 cargo clippy --release --all -- -D warnings
+BUILD_DUMMY_WASM_BINARY=1 cargo +nightly-2021-02-20 clippy --release --all -- -D warnings
 
 echo 'running cargo unit tests'
-cargo test --release --all
+cargo +nightly-2021-02-20 test --release --all

+ 0 - 50
devops/infrastructure/build-arm64-playbook.yml

@@ -1,50 +0,0 @@
----
-# Setup joystream code, build docker image
-
-- name: Build image and push to docker hub
-  hosts: all
-
-  tasks:
-    - block:
-        - name: Get code from git repo
-          include_role:
-            name: common
-            tasks_from: get-code-git
-
-        - name: Install Docker Module for Python
-          pip:
-            name: docker
-
-        - name: Log into DockerHub
-          community.docker.docker_login:
-            username: '{{ docker_username }}'
-            password: '{{ docker_password }}'
-
-        - name: Build an image and push it to a private repo
-          community.docker.docker_image:
-            build:
-              path: ./joystream
-              dockerfile: '{{ dockerfile }}'
-              platform: '{{ platform }}'
-            name: '{{ repository }}'
-            tag: '{{ tag_name }}'
-            push: yes
-            source: build
-          async: 7200
-          poll: 0
-          register: build_result
-
-        - name: Check on build async task
-          async_status:
-            jid: '{{ build_result.ansible_job_id }}'
-          register: job_result
-          until: job_result.finished
-          retries: 72
-          delay: 100
-
-      always:
-        - name: Delete the stack
-          amazon.aws.cloudformation:
-            stack_name: '{{ stack_name }}'
-            state: 'absent'
-          delegate_to: localhost

+ 0 - 45
devops/infrastructure/github-action-playbook.yml

@@ -1,45 +0,0 @@
----
-# Setup joystream code, build and Create AMI
-
-- name: Setup instance
-  hosts: all
-
-  tasks:
-    - block:
-      - name: Get code from git repo
-        include_role:
-          name: common
-          tasks_from: get-code-git
-
-      - name: Run setup and build
-        include_role:
-          name: common
-          tasks_from: run-setup-build
-
-      - name: Install subkey
-        include_role:
-          name: admin
-          tasks_from: main
-
-      - name: Basic AMI Creation
-        amazon.aws.ec2_ami:
-          instance_id: "{{ instance_id }}"
-          wait: yes
-          name: "{{ ami_name }}"
-          launch_permissions:
-            group_names: ['all']
-          tags:
-            Name: "{{ ami_name }}"
-        register: ami_data
-        delegate_to: localhost
-
-      - name: Print AMI ID
-        debug:
-          msg: "AMI ID is: {{ ami_data.image_id }}"
-
-      always:
-      - name: Delete the stack
-        amazon.aws.cloudformation:
-          stack_name: "{{ stack_name }}"
-          state: "absent"
-        delegate_to: localhost

+ 0 - 76
devops/infrastructure/roles/common/tasks/chain-spec-node-keys.yml

@@ -1,76 +0,0 @@
----
-# Create chain spec files and keys and copy to all the servers
-
-- name: Debug to test variable
-  debug:
-    msg: "Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}"
-  run_once: true
-
-- name: Run chain-spec-builder to generate chainspec.json file
-  command: "{{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }} --chain-spec-path {{ chain_spec_path }} --deployment live --endowed 1 --keystore-path {{ data_path }}"
-  register: chain_spec_output
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Run subkey to generate node keys
-  shell: subkey generate-node-key
-  delegate_to: "{{ local_or_admin }}"
-  register: subkey_output
-
-- name: Print to stdout
-  debug:
-    msg:
-    - "Public Key: {{ subkey_output.stderr }}"
-    - "Private Key: {{ subkey_output.stdout }}"
-
-- name: Print to stdout chain spec
-  debug: var=chain_spec_output.stdout
-  run_once: true
-
-- name: Save output of chain spec to local file
-  copy:
-    content: '{{ chain_spec_output.stdout | regex_replace("\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]", "") }}'
-    dest: "{{ data_path }}/chain_spec_output.txt"
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Change chain spec name, id, protocolId
-  json_modify:
-    chain_spec_path: "{{ chain_spec_path }}"
-    prefix: "{{ network_suffix }}"
-    all_nodes: "{{ hostvars }}"
-  delegate_to: "{{ local_or_admin }}"
-  register: result
-  run_once: true
-
-- name: Print output of modified chainspec
-  debug:
-    var: result.result
-  run_once: true
-
-- name: Run build-spec to generate raw chainspec file
-  shell: "{{ admin_code_dir }}/target/release/joystream-node build-spec --chain {{ chain_spec_path }} --raw > {{ raw_chain_spec_path }}"
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Copying chain spec files to localhost
-  synchronize:
-    src: "/home/ubuntu/{{ data_path }}/"
-    dest: "{{ data_path }}"
-    mode: pull
-  run_once: true
-  when: run_on_admin_server|bool
-
-- name: Copy joystream-node binary to localhost
-  fetch:
-    src: "{{ admin_code_dir }}/target/release/joystream-node"
-    dest: "{{ data_path }}/joystream-node"
-    flat: yes
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-  when: run_on_admin_server|bool
-
-- name: Copying raw chain spec file to all servers
-  copy:
-    src: "{{ raw_chain_spec_path }}"
-    dest: "{{ remote_chain_spec_path }}"

+ 1 - 1
devops/infrastructure/node-network/.gitignore → devops/kubernetes/node-network/.gitignore

@@ -1,6 +1,6 @@
 /bin/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 .env
 Pulumi.*.yaml

+ 0 - 0
devops/infrastructure/node-network/Pulumi.yaml → devops/kubernetes/node-network/Pulumi.yaml


+ 0 - 0
devops/infrastructure/node-network/README.md → devops/kubernetes/node-network/README.md


+ 0 - 0
devops/infrastructure/node-network/configMap.ts → devops/kubernetes/node-network/configMap.ts


+ 0 - 0
devops/infrastructure/node-network/index.ts → devops/kubernetes/node-network/index.ts


+ 0 - 0
devops/infrastructure/node-network/json_modify.py → devops/kubernetes/node-network/json_modify.py


+ 0 - 0
devops/infrastructure/node-network/nfsVolume.ts → devops/kubernetes/node-network/nfsVolume.ts


+ 0 - 0
devops/infrastructure/node-network/package.json → devops/kubernetes/node-network/package.json


+ 0 - 0
devops/infrastructure/node-network/tsconfig.json → devops/kubernetes/node-network/tsconfig.json


+ 0 - 0
devops/infrastructure/node-network/utils.ts → devops/kubernetes/node-network/utils.ts


+ 0 - 0
devops/infrastructure/node-network/validator.ts → devops/kubernetes/node-network/validator.ts


+ 0 - 0
devops/infrastructure/pulumi-common/caddy.ts → devops/kubernetes/pulumi-common/caddy.ts


+ 0 - 0
devops/infrastructure/pulumi-common/index.ts → devops/kubernetes/pulumi-common/index.ts


+ 0 - 0
devops/infrastructure/pulumi-common/package.json → devops/kubernetes/pulumi-common/package.json


+ 0 - 0
devops/infrastructure/pulumi-common/tsconfig.json → devops/kubernetes/pulumi-common/tsconfig.json


+ 1 - 1
devops/infrastructure/query-node/.gitignore → devops/kubernetes/query-node/.gitignore

@@ -1,6 +1,6 @@
 /bin/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 .env
 Pulumi.*.yaml

+ 3 - 0
devops/infrastructure/query-node/Pulumi.yaml → devops/kubernetes/query-node/Pulumi.yaml

@@ -17,3 +17,6 @@ template:
       description: Path to members.json file for processor initialization
     workersFilePath:
       description: Path to workers.json file for processor initialization
+    indexerURL:
+      description: URL for the indexer endpoint
+      default: 'http://query-node:4000/graphql'

+ 2 - 1
devops/infrastructure/query-node/README.md → devops/kubernetes/query-node/README.md

@@ -38,7 +38,8 @@ After cloning this repo, from this working directory, run these commands:
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext workersFilePath=<PATH> --plaintext membersFilePath=<PATH> --plaintext isMinikube=true
+    --plaintext workersFilePath=<PATH> --plaintext membersFilePath=<PATH> --plaintext isMinikube=true \
+    --plaintext indexerURL=<URL>
    ```
 
    If you want to build the stack on AWS set the `isMinikube` config to `false`

+ 0 - 0
devops/infrastructure/query-node/configMap.ts → devops/kubernetes/query-node/configMap.ts


+ 92 - 57
devops/infrastructure/query-node/index.ts → devops/kubernetes/query-node/index.ts

@@ -63,7 +63,6 @@ const resourceOptions = { provider: provider }
 const name = 'query-node'
 
 // Create a Kubernetes Namespace
-// const ns = new k8s.core.v1.Namespace(name, {}, { provider: cluster.provider })
 const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
 
 // Export the Namespace name
@@ -289,6 +288,38 @@ const defsConfig = new configMapFromFile(
   resourceOptions
 ).configName
 
+const indexerContainer = []
+
+const existingIndexer = config.get('indexerURL')
+
+if (!existingIndexer) {
+  indexerContainer.push({
+    name: 'indexer',
+    image: 'joystream/hydra-indexer:2.1.0-beta.9',
+    env: [
+      { name: 'DB_HOST', value: 'postgres-db' },
+      { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
+      { name: 'DB_PASS', value: process.env.DB_PASS! },
+      { name: 'INDEXER_WORKERS', value: '5' },
+      { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
+      { name: 'DEBUG', value: 'index-builder:*' },
+      { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
+      { name: 'TYPES_JSON', value: 'types.json' },
+      { name: 'PGUSER', value: process.env.DB_USER! },
+      { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
+    ],
+    volumeMounts: [
+      {
+        mountPath: '/home/hydra/packages/hydra-indexer/types.json',
+        name: 'indexer-volume',
+        subPath: 'fileData',
+      },
+    ],
+    command: ['/bin/sh', '-c'],
+    args: ['yarn db:bootstrap && yarn start:prod'],
+  })
+}
+
 const deployment = new k8s.apps.v1.Deployment(
   name,
   {
@@ -310,31 +341,7 @@ const deployment = new k8s.apps.v1.Deployment(
               image: 'redis:6.0-alpine',
               ports: [{ containerPort: 6379 }],
             },
-            {
-              name: 'indexer',
-              image: 'joystream/hydra-indexer:2.1.0-beta.9',
-              env: [
-                { name: 'DB_HOST', value: 'postgres-db' },
-                { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
-                { name: 'DB_PASS', value: process.env.DB_PASS! },
-                { name: 'INDEXER_WORKERS', value: '5' },
-                { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
-                { name: 'TYPES_JSON', value: 'types.json' },
-                { name: 'PGUSER', value: process.env.DB_USER! },
-                { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
-              ],
-              volumeMounts: [
-                {
-                  mountPath: '/home/hydra/packages/hydra-indexer/types.json',
-                  name: 'indexer-volume',
-                  subPath: 'fileData',
-                },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['yarn db:bootstrap && yarn start:prod'],
-            },
+            ...indexerContainer,
             {
               name: 'hydra-indexer-gateway',
               image: 'joystream/hydra-indexer-gateway:2.1.0-beta.5',
@@ -351,30 +358,6 @@ const deployment = new k8s.apps.v1.Deployment(
               ],
               ports: [{ containerPort: 4002 }],
             },
-            {
-              name: 'processor',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              env: [
-                {
-                  name: 'INDEXER_ENDPOINT_URL',
-                  value: `http://localhost:${process.env.WARTHOG_APP_PORT}/graphql`,
-                },
-                { name: 'TYPEORM_HOST', value: 'postgres-db' },
-                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
-              ],
-              volumeMounts: [
-                {
-                  mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
-                  name: 'processor-volume',
-                  subPath: 'fileData',
-                },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['cd query-node && yarn hydra-processor run -e ../.env'],
-            },
             {
               name: 'graphql-server',
               image: joystreamAppsImage,
@@ -393,12 +376,6 @@ const deployment = new k8s.apps.v1.Deployment(
             },
           ],
           volumes: [
-            {
-              name: 'processor-volume',
-              configMap: {
-                name: defsConfig,
-              },
-            },
             {
               name: 'indexer-volume',
               configMap: {
@@ -436,9 +413,67 @@ const service = new k8s.core.v1.Service(
   resourceOptions
 )
 
-// Export the Service name and public LoadBalancer Endpoint
+// Export the Service name
 export const serviceName = service.metadata.name
 
+const indexerURL = config.get('indexerURL') || `http://query-node:4000/graphql`
+
+const processorDeployment = new k8s.apps.v1.Deployment(
+  `processor`,
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: appLabels,
+    },
+    spec: {
+      replicas: 1,
+      selector: { matchLabels: appLabels },
+      template: {
+        metadata: {
+          labels: appLabels,
+        },
+        spec: {
+          containers: [
+            {
+              name: 'processor',
+              image: joystreamAppsImage,
+              imagePullPolicy: 'IfNotPresent',
+              env: [
+                {
+                  name: 'INDEXER_ENDPOINT_URL',
+                  value: indexerURL,
+                },
+                { name: 'TYPEORM_HOST', value: 'postgres-db' },
+                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
+                { name: 'DEBUG', value: 'index-builder:*' },
+                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
+              ],
+              volumeMounts: [
+                {
+                  mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
+                  name: 'processor-volume',
+                  subPath: 'fileData',
+                },
+              ],
+              command: ['/bin/sh', '-c'],
+              args: ['cd query-node && yarn hydra-processor run -e ../.env'],
+            },
+          ],
+          volumes: [
+            {
+              name: 'processor-volume',
+              configMap: {
+                name: defsConfig,
+              },
+            },
+          ],
+        },
+      },
+    },
+  },
+  { ...resourceOptions, dependsOn: deployment }
+)
+
 const caddyEndpoints = [
   `/indexer/* {
     uri strip_prefix /indexer

+ 0 - 0
devops/infrastructure/query-node/package.json → devops/kubernetes/query-node/package.json


+ 0 - 0
devops/infrastructure/query-node/s3Helpers.ts → devops/kubernetes/query-node/s3Helpers.ts


+ 0 - 0
devops/infrastructure/query-node/tsconfig.json → devops/kubernetes/query-node/tsconfig.json


+ 1 - 1
devops/infrastructure/storage-node/.gitignore → devops/kubernetes/storage-node/.gitignore

@@ -1,5 +1,5 @@
 /bin/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 Pulumi.*.yaml

+ 0 - 0
devops/infrastructure/storage-node/Pulumi.yaml → devops/kubernetes/storage-node/Pulumi.yaml


+ 2 - 2
devops/infrastructure/storage-node/README.md → devops/kubernetes/storage-node/README.md

@@ -39,14 +39,14 @@ After cloning this repo, from this working directory, run these commands:
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
     --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
-    --plaintext isAnonymous=true
+    --plaintext isMinikube=true --plaintext isAnonymous=true
    ```
 
    If running for production use the below mentioned config
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' --plaintext isAnonymous=false \
+    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' --plaintext isAnonymous=false --plaintext isMinikube=false \
     --plaintext providerId=<ID> --plaintext keyFile=<PATH> --plaintext publicURL=<DOMAIN> --secret passphrase=<PASSPHRASE>
    ```
 

+ 56 - 28
devops/infrastructure/storage-node/index.ts → devops/kubernetes/storage-node/index.ts

@@ -1,6 +1,7 @@
 import * as awsx from '@pulumi/awsx'
 import * as aws from '@pulumi/aws'
 import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
 import * as k8s from '@pulumi/kubernetes'
 import * as pulumi from '@pulumi/pulumi'
 import { CaddyServiceDeployment } from 'pulumi-common'
@@ -15,37 +16,57 @@ const lbReady = config.get('isLoadBalancerReady') === 'true'
 const name = 'storage-node'
 const colossusPort = parseInt(config.get('colossusPort') || '3000')
 const storage = parseInt(config.get('storage') || '40')
+const isMinikube = config.getBoolean('isMinikube')
 
 let additionalParams: string[] | pulumi.Input<string>[] = []
 let volumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
 let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
 
-// Create a VPC for our cluster.
-const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+export let kubeconfig: pulumi.Output<any>
+export let colossusImage: pulumi.Output<string>
+let provider: k8s.Provider
 
-// Create an EKS cluster with the default configuration.
-const cluster = new eks.Cluster('eksctl-storage-node', {
-  vpcId: vpc.id,
-  subnetIds: vpc.publicSubnetIds,
-  instanceType: 't2.medium',
-  providerCredentialOpts: {
-    profileName: awsConfig.get('profile'),
-  },
-})
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+  // Create image from local app
+  colossusImage = new docker.Image('joystream/colossus', {
+    build: {
+      context: '../../../',
+      dockerfile: '../../../colossus.Dockerfile',
+    },
+    imageName: 'joystream/colossus:latest',
+    skipPush: true,
+  }).baseImageName
+  // colossusImage = pulumi.interpolate`joystream/colossus:latest`
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
 
-// Export the cluster's kubeconfig.
-export const kubeconfig = cluster.kubeconfig
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-storage-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    instanceType: 't2.medium',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
 
-// Create a repository
-const repo = new awsx.ecr.Repository('colossus-image')
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
 
-// Build an image and publish it to our ECR repository.
-export const colossusImage = repo.buildAndPushImage({
-  dockerfile: '../../../colossus.Dockerfile',
-  context: '../../../',
-})
+  // Create a repository
+  const repo = new awsx.ecr.Repository('colossus-image')
 
-const resourceOptions = { provider: cluster.provider }
+  // Build an image and publish it to our ECR repository.
+  colossusImage = repo.buildAndPushImage({
+    dockerfile: '../../../colossus.Dockerfile',
+    context: '../../../',
+  })
+}
+
+const resourceOptions = { provider: provider }
 
 // Create a Kubernetes Namespace
 const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
@@ -88,14 +109,19 @@ const caddyEndpoints = [
 }`,
 ]
 
-const caddy = new CaddyServiceDeployment(
-  'caddy-proxy',
-  { lbReady, namespaceName: namespaceName, caddyEndpoints },
-  resourceOptions
-)
+export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
+export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
 
-export const endpoint1 = caddy.primaryEndpoint
-export const endpoint2 = caddy.secondaryEndpoint
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}
 
 export let appLink: pulumi.Output<string>
 
@@ -180,6 +206,7 @@ const deployment = new k8s.apps.v1.Deployment(
             {
               name: 'colossus',
               image: colossusImage,
+              imagePullPolicy: 'IfNotPresent',
               env: [
                 {
                   name: 'WS_PROVIDER_ENDPOINT_URI',
@@ -222,6 +249,7 @@ const service = new k8s.core.v1.Service(
       name: 'storage-node',
     },
     spec: {
+      type: isMinikube ? 'NodePort' : 'ClusterIP',
       ports: [{ name: 'port-1', port: colossusPort }],
       selector: appLabels,
     },

+ 1 - 0
devops/infrastructure/storage-node/package.json → devops/kubernetes/storage-node/package.json

@@ -9,6 +9,7 @@
     "@pulumi/eks": "^0.31.0",
     "@pulumi/kubernetes": "^3.0.0",
     "@pulumi/pulumi": "^3.0.0",
+    "@pulumi/docker": "^3.0.0",
     "pulumi-common": "file:../pulumi-common"
   }
 }

+ 0 - 0
devops/infrastructure/storage-node/tsconfig.json → devops/kubernetes/storage-node/tsconfig.json


+ 5 - 5
joystream-node-armv7.Dockerfile

@@ -1,9 +1,9 @@
 FROM rust:1.52.1-buster AS rust
 RUN rustup self update
-RUN rustup install nightly-2021-03-24 --force
-RUN rustup default nightly-2021-03-24
-RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
-RUN rustup component add --toolchain nightly-2021-03-24 clippy
+RUN rustup install nightly-2021-02-20 --force
+RUN rustup default nightly-2021-02-20
+RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
+RUN rustup component add --toolchain nightly-2021-02-20 clippy
 RUN apt-get update && \
   apt-get install -y curl git gcc xz-utils sudo pkg-config unzip clang llvm libc6-dev
 
@@ -14,7 +14,7 @@ COPY . /joystream
 
 # Build all cargo crates
 # Ensure our tests and linter pass before actual build
-ENV WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+ENV WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 RUN apt-get install -y libprotobuf-dev protobuf-compiler
 RUN BUILD_DUMMY_WASM_BINARY=1 cargo clippy --release --all -- -D warnings && \
     cargo test --release --all && \

+ 5 - 5
joystream-node.Dockerfile

@@ -1,9 +1,9 @@
 FROM rust:1.52.1-buster AS rust
 RUN rustup self update
-RUN rustup install nightly-2021-03-24 --force
-RUN rustup default nightly-2021-03-24
-RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
-RUN rustup component add --toolchain nightly-2021-03-24 clippy
+RUN rustup install nightly-2021-02-20 --force
+RUN rustup default nightly-2021-02-20
+RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
+RUN rustup component add --toolchain nightly-2021-02-20 clippy
 RUN apt-get update && \
   apt-get install -y curl git gcc xz-utils sudo pkg-config unzip clang llvm libc6-dev
 
@@ -14,7 +14,7 @@ COPY . /joystream
 
 # Build all cargo crates
 # Ensure our tests and linter pass before actual build
-ENV WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+ENV WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 RUN BUILD_DUMMY_WASM_BINARY=1 cargo clippy --release --all -- -D warnings && \
     cargo test --release --all && \
     cargo build --release

+ 2 - 2
node/README.md

@@ -26,7 +26,7 @@ cd joystream/
 Compile the node and runtime:
 
 ```bash
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo build --release
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 build --release
 ```
 
 This produces the binary in `./target/release/joystream-node`
@@ -79,7 +79,7 @@ If you are building a tagged release from `master` branch and want to install th
 This will install the executable `joystream-node` to your `~/.cargo/bin` folder, which you would normally have in your `$PATH` environment.
 
 ```bash
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo install joystream-node --path node/ --locked
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 install joystream-node --path node/ --locked
 ```
 
 Now you can run and connect to the testnet:

+ 1 - 1
pioneer/packages/joy-proposals/src/Proposal/Body.tsx

@@ -102,7 +102,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
   Text: (content) => [
     new ParsedParam(
       'Content',
-      <ReactMarkdown className='TextProposalContent' source={content.toString()} linkTarget='_blank' />,
+      <ReactMarkdown className='TextProposalContent' source={bytesToString(content)} linkTarget='_blank' />,
       true
     )
   ],

+ 2 - 1
pioneer/packages/joy-proposals/src/Proposal/ProposalDetails.tsx

@@ -15,6 +15,7 @@ import { Seat } from '@joystream/types/council';
 import ProposalDiscussion from './discussion/ProposalDiscussion';
 
 import styled from 'styled-components';
+import { bytesToString } from '@polkadot/joy-utils/functions/misc';
 
 const ProposalDetailsMain = styled.div`
   display: flex;
@@ -99,7 +100,7 @@ export function getExtendedStatus (proposal: ParsedProposal, bestNumber?: BlockN
         if (approvedStatus === 'ExecutionFailed') {
           const executionFailedStatus = proposalStatus.asType('Approved').asType('ExecutionFailed');
 
-          executionFailReason = executionFailedStatus.error.toString();
+          executionFailReason = bytesToString(executionFailedStatus.error);
         }
       }
     }

+ 2 - 2
pioneer/packages/joy-utils/src/transport/proposals.ts

@@ -120,8 +120,8 @@ export default class ProposalsTransport extends BaseTransport {
 
     return {
       id,
-      title: rawProposal.title.toString(),
-      description: rawProposal.description.toString(),
+      title: bytesToString(rawProposal.title),
+      description: bytesToString(rawProposal.description),
       parameters: rawProposal.parameters,
       votingResults: rawProposal.votingResults,
       proposerId: rawProposal.proposerId.toNumber(),

+ 284 - 0
query-node/mappings/src/content/channel.ts

@@ -0,0 +1,284 @@
+import { fixBlockTimestamp } from '../eventFix'
+import { SubstrateEvent } from '@dzlzv/hydra-common'
+import { DatabaseManager } from '@dzlzv/hydra-db-utils'
+import ISO6391 from 'iso-639-1'
+import { FindConditions, In } from 'typeorm'
+
+import { AccountId } from '@polkadot/types/interfaces'
+import { Option } from '@polkadot/types/codec'
+import { Content } from '../../../generated/types'
+import {
+  readProtobuf,
+  readProtobufWithAssets,
+  convertContentActorToChannelOwner,
+  convertContentActorToDataObjectOwner,
+} from './utils'
+import { disconnectDataObjectRelations } from '../storage'
+
+import { Channel, ChannelCategory, DataObject, AssetAvailability } from 'query-node'
+import { inconsistentState, logger } from '../common'
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export async function content_ChannelCreated(db: DatabaseManager, event: SubstrateEvent): Promise<void> {
+  // read event data
+  const { channelId, channelCreationParameters, contentActor } = new Content.ChannelCreatedEvent(event).data
+
+  // read metadata
+  const protobufContent = await readProtobufWithAssets(new Channel(), {
+    metadata: channelCreationParameters.meta,
+    db,
+    event,
+    assets: channelCreationParameters.assets,
+    contentOwner: convertContentActorToDataObjectOwner(contentActor, channelId.toNumber()),
+  })
+
+  // create entity
+  const channel = new Channel({
+    // main data
+    id: channelId.toString(),
+    isCensored: false,
+    videos: [],
+    createdInBlock: event.blockNumber,
+
+    // default values for properties that might or might not be filled by metadata
+    coverPhotoUrls: [],
+    coverPhotoAvailability: AssetAvailability.INVALID,
+    avatarPhotoUrls: [],
+    avatarPhotoAvailability: AssetAvailability.INVALID,
+
+    // fill in auto-generated fields
+    createdAt: new Date(fixBlockTimestamp(event.blockTimestamp).toNumber()),
+    updatedAt: new Date(fixBlockTimestamp(event.blockTimestamp).toNumber()),
+
+    // prepare channel owner (handles fields `ownerMember` and `ownerCuratorGroup`)
+    ...(await convertContentActorToChannelOwner(db, contentActor)),
+
+    // integrate metadata
+    ...protobufContent,
+  })
+
+  // save entity
+  await db.save<Channel>(channel)
+
+  // emit log event
+  logger.info('Channel has been created', { id: channel.id })
+}
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export async function content_ChannelUpdated(db: DatabaseManager, event: SubstrateEvent) {
+  // read event data
+  const { channelId, channelUpdateParameters, contentActor } = new Content.ChannelUpdatedEvent(event).data
+
+  // load channel
+  const channel = await db.get(Channel, { where: { id: channelId.toString() } as FindConditions<Channel> })
+
+  // ensure channel exists
+  if (!channel) {
+    return inconsistentState('Non-existing channel update requested', channelId)
+  }
+
+  // prepare changed metadata
+  const newMetadata = channelUpdateParameters.new_meta.unwrapOr(null)
+
+  //  update metadata if it was changed
+  if (newMetadata) {
+    const protobufContent = await readProtobufWithAssets(new Channel(), {
+      metadata: newMetadata,
+      db,
+      event,
+      assets: channelUpdateParameters.assets.unwrapOr([]),
+      contentOwner: convertContentActorToDataObjectOwner(contentActor, channelId.toNumber()),
+    })
+
+    // update all fields read from protobuf
+    for (const [key, value] of Object.entries(protobufContent)) {
+      channel[key] = value
+    }
+  }
+
+  // prepare changed reward account
+  const newRewardAccount = channelUpdateParameters.reward_account.unwrapOr(null)
+
+  // reward account change happened?
+  if (newRewardAccount) {
+    // this will change the `channel`!
+    handleChannelRewardAccountChange(channel, newRewardAccount)
+  }
+
+  // set last update time
+  channel.updatedAt = new Date(fixBlockTimestamp(event.blockTimestamp).toNumber())
+
+  // save channel
+  await db.save<Channel>(channel)
+
+  // emit log event
+  logger.info('Channel has been updated', { id: channel.id })
+}
+
+export async function content_ChannelAssetsRemoved(db: DatabaseManager, event: SubstrateEvent) {
+  // read event data
+  const { contentId: contentIds } = new Content.ChannelAssetsRemovedEvent(event).data
+
+  // load channel
+  const assets = await db.getMany(DataObject, {
+    where: {
+      id: In(contentIds.toArray().map((item) => item.toString())),
+    } as FindConditions<DataObject>,
+  })
+
+  // delete assets
+  for (const asset of assets) {
+    // ensure dataObject is nowhere used to prevent db constraint error
+    await disconnectDataObjectRelations(db, asset)
+
+    // remove data object
+    await db.remove<DataObject>(asset)
+  }
+
+  // emit log event
+  logger.info('Channel assets have been removed', { ids: contentIds })
+}
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export async function content_ChannelCensorshipStatusUpdated(db: DatabaseManager, event: SubstrateEvent) {
+  // read event data
+  const { channelId, isCensored } = new Content.ChannelCensorshipStatusUpdatedEvent(event).data
+
+  // load event
+  const channel = await db.get(Channel, { where: { id: channelId.toString() } as FindConditions<Channel> })
+
+  // ensure channel exists
+  if (!channel) {
+    return inconsistentState('Non-existing channel censoring requested', channelId)
+  }
+
+  // update channel
+  channel.isCensored = isCensored.isTrue
+
+  // set last update time
+  channel.updatedAt = new Date(fixBlockTimestamp(event.blockTimestamp).toNumber())
+
+  // save channel
+  await db.save<Channel>(channel)
+
+  // emit log event
+  logger.info('Channel censorship status has been updated', { id: channelId, isCensored: isCensored.isTrue })
+}
+
+/// ///////////////// ChannelCategory ////////////////////////////////////////////
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export async function content_ChannelCategoryCreated(db: DatabaseManager, event: SubstrateEvent) {
+  // read event data
+  const { channelCategoryCreationParameters, channelCategoryId } = new Content.ChannelCategoryCreatedEvent(event).data
+
+  // read metadata
+  const protobufContent = await readProtobuf(new ChannelCategory(), {
+    metadata: channelCategoryCreationParameters.meta,
+    db,
+    event,
+  })
+
+  // create new channel category
+  const channelCategory = new ChannelCategory({
+    // main data
+    id: channelCategoryId.toString(),
+    channels: [],
+    createdInBlock: event.blockNumber,
+
+    // fill in auto-generated fields
+    createdAt: new Date(fixBlockTimestamp(event.blockTimestamp).toNumber()),
+    updatedAt: new Date(fixBlockTimestamp(event.blockTimestamp).toNumber()),
+
+    // integrate metadata
+    ...protobufContent,
+  })
+
+  // save channel
+  await db.save<ChannelCategory>(channelCategory)
+
+  // emit log event
+  logger.info('Channel category has been created', { id: channelCategory.id })
+}
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export async function content_ChannelCategoryUpdated(db: DatabaseManager, event: SubstrateEvent) {
+  // read event data
+  const { channelCategoryId, channelCategoryUpdateParameters } = new Content.ChannelCategoryUpdatedEvent(event).data
+
+  // load channel category
+  const channelCategory = await db.get(ChannelCategory, {
+    where: {
+      id: channelCategoryId.toString(),
+    } as FindConditions<ChannelCategory>,
+  })
+
+  // ensure channel exists
+  if (!channelCategory) {
+    return inconsistentState('Non-existing channel category update requested', channelCategoryId)
+  }
+
+  // read metadata
+  const protobufContent = await readProtobuf(new ChannelCategory(), {
+    metadata: channelCategoryUpdateParameters.new_meta,
+    db,
+    event,
+  })
+
+  // update all fields read from protobuf
+  for (const [key, value] of Object.entries(protobufContent)) {
+    channelCategory[key] = value
+  }
+
+  // set last update time
+  channelCategory.updatedAt = new Date(fixBlockTimestamp(event.blockTimestamp).toNumber())
+
+  // save channel category
+  await db.save<ChannelCategory>(channelCategory)
+
+  // emit log event
+  logger.info('Channel category has been updated', { id: channelCategory.id })
+}
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export async function content_ChannelCategoryDeleted(db: DatabaseManager, event: SubstrateEvent) {
+  // read event data
+  const { channelCategoryId } = new Content.ChannelCategoryDeletedEvent(event).data
+
+  // load channel category
+  const channelCategory = await db.get(ChannelCategory, {
+    where: {
+      id: channelCategoryId.toString(),
+    } as FindConditions<ChannelCategory>,
+  })
+
+  // ensure channel category exists
+  if (!channelCategory) {
+    return inconsistentState('Non-existing channel category deletion requested', channelCategoryId)
+  }
+
+  // delete channel category
+  await db.remove<ChannelCategory>(channelCategory)
+
+  // emit log event
+  logger.info('Channel category has been deleted', { id: channelCategory.id })
+}
+
+/// ///////////////// Helpers ////////////////////////////////////////////////////
+
+function handleChannelRewardAccountChange(
+  channel: Channel, // will be modified inside of the function!
+  reward_account: Option<AccountId>
+) {
+  const rewardAccount = reward_account.unwrapOr(null)
+
+  // new different reward account set?
+  if (rewardAccount) {
+    channel.rewardAccount = rewardAccount.toString()
+    return
+  }
+
+  // reward account removed
+
+  channel.rewardAccount = undefined // plan deletion (will have effect when saved to db)
+}

+ 280 - 0
query-node/mappings/src/storage.ts

@@ -0,0 +1,280 @@
+import { fixBlockTimestamp } from './eventFix'
+import { SubstrateEvent } from '@dzlzv/hydra-common'
+import { DatabaseManager } from '@dzlzv/hydra-db-utils'
+import { FindConditions, In } from 'typeorm'
+
+import { inconsistentState, logger, prepareDataObject } from './common'
+
+import { DataDirectory } from '../../generated/types'
+import { ContentId, ContentParameters, StorageObjectOwner } from '@joystream/types/augment'
+
+import { ContentId as Custom_ContentId, ContentParameters as Custom_ContentParameters } from '@joystream/types/storage'
+import { registry } from '@joystream/types'
+
+import {
+  Channel,
+  Video,
+  AssetAvailability,
+  DataObject,
+  DataObjectOwner,
+  DataObjectOwnerMember,
+  DataObjectOwnerChannel,
+  DataObjectOwnerDao,
+  DataObjectOwnerCouncil,
+  DataObjectOwnerWorkingGroup,
+  LiaisonJudgement,
+  Worker,
+  WorkerType,
+} from 'query-node'
+
+export async function dataDirectory_ContentAdded(db: DatabaseManager, event: SubstrateEvent): Promise<void> {
+  // read event data
+  const { contentParameters, storageObjectOwner } = new DataDirectory.ContentAddedEvent(event).data
+
+  // save all content objects
+  for (const parameters of contentParameters) {
+    const owner = convertStorageObjectOwner(storageObjectOwner)
+    const dataObject = await prepareDataObject(db, parameters, event, owner)
+
+    // fill in auto-generated fields
+    dataObject.createdAt = new Date(fixBlockTimestamp(event.blockTimestamp).toNumber())
+    dataObject.updatedAt = new Date(fixBlockTimestamp(event.blockTimestamp).toNumber())
+
+    await db.save<DataObject>(dataObject)
+  }
+
+  // emit log event
+  logger.info('Storage content has beed added', {
+    ids: contentParameters.map((item) => encodeContentId(item.content_id)),
+  })
+}
+
+export async function dataDirectory_ContentRemoved(db: DatabaseManager, event: SubstrateEvent): Promise<void> {
+  // read event data
+  const { contentId: contentIds } = new DataDirectory.ContentRemovedEvent(event).data
+
+  // load assets
+  const dataObjects = await db.getMany(DataObject, {
+    where: {
+      joystreamContentId: In(contentIds.map((item) => encodeContentId(item))),
+    } as FindConditions<DataObject>,
+  })
+
+  // store dataObject ids before they are deleted (for logging purposes)
+  const dataObjectIds = dataObjects.map((item) => item.id)
+
+  // remove assets from database
+  for (const item of dataObjects) {
+    // ensure dataObject is nowhere used to prevent db constraint error
+    await disconnectDataObjectRelations(db, item)
+
+    // remove data object
+    await db.remove<DataObject>(item)
+  }
+
+  // emit log event
+  logger.info('Storage content have been removed', { id: contentIds, dataObjectIds })
+}
+
+export async function dataDirectory_ContentAccepted(db: DatabaseManager, event: SubstrateEvent): Promise<void> {
+  // read event data
+  const { contentId, storageProviderId } = new DataDirectory.ContentAcceptedEvent(event).data
+  const encodedContentId = encodeContentId(contentId)
+
+  // load asset
+  const dataObject = await db.get(DataObject, {
+    where: { joystreamContentId: encodedContentId } as FindConditions<DataObject>,
+  })
+
+  // ensure object exists
+  if (!dataObject) {
+    return inconsistentState('Non-existing content acceptation requested', encodedContentId)
+  }
+
+  // load storage provider
+  const worker = await db.get(Worker, {
+    where: {
+      workerId: storageProviderId.toString(),
+      type: WorkerType.STORAGE,
+    } as FindConditions<Worker>,
+  })
+
+  // ensure object exists
+  if (!worker) {
+    return inconsistentState('Missing Storage Provider Id', storageProviderId)
+  }
+
+  // update object
+  dataObject.liaison = worker
+  dataObject.liaisonJudgement = LiaisonJudgement.ACCEPTED
+
+  // set last update time
+  dataObject.updatedAt = new Date(fixBlockTimestamp(event.blockTimestamp).toNumber())
+
+  // save object
+  await db.save<DataObject>(dataObject)
+
+  // emit log event
+  logger.info('Storage content has been accepted', { id: encodedContentId })
+
+  // update asset availability for all connected channels and videos
+  // this will not be needed after redudant AssetAvailability will be removed (after some Hydra upgrades)
+  await updateConnectedAssets(db, dataObject)
+}
+
+/// ///////////////// Updating connected entities ////////////////////////////////
+
+async function updateConnectedAssets(db: DatabaseManager, dataObject: DataObject) {
+  await updateSingleConnectedAsset(db, new Channel(), 'avatarPhoto', dataObject)
+  await updateSingleConnectedAsset(db, new Channel(), 'coverPhoto', dataObject)
+
+  await updateSingleConnectedAsset(db, new Video(), 'thumbnailPhoto', dataObject)
+  await updateSingleConnectedAsset(db, new Video(), 'media', dataObject)
+}
+
+// async function updateSingleConnectedAsset(db: DatabaseManager, type: typeof Channel | typeof Video, propertyName: string, dataObject: DataObject) {
+async function updateSingleConnectedAsset<T extends Channel | Video>(
+  db: DatabaseManager,
+  type: T,
+  propertyName: string,
+  dataObject: DataObject
+) {
+  // prepare lookup condition
+  const condition = {
+    where: {
+      [propertyName + 'DataObject']: dataObject,
+    },
+  } // as FindConditions<T>
+
+  // NOTE: we don't need to retrieve multiple channels/videos via `db.getMany()` because dataObject
+  //       is allowed to be associated only with one channel/video in runtime
+
+  // in therory the following condition(s) can be generalized `... db.get(type, ...` but in practice it doesn't work :-\
+  const item = type instanceof Channel ? await db.get(Channel, condition) : await db.get(Video, condition)
+
+  // escape when no dataObject association found
+  if (!item) {
+    return
+  }
+
+  item[propertyName + 'Availability'] = AssetAvailability.ACCEPTED
+
+  if (type instanceof Channel) {
+    await db.save<Channel>(item)
+
+    // emit log event
+    logger.info('Channel using Content has been accepted', {
+      channelId: item.id.toString(),
+      joystreamContentId: dataObject.joystreamContentId,
+    })
+  } else {
+    await db.save<Video>(item)
+
+    // emit log event
+    logger.info('Video using Content has been accepted', {
+      videoId: item.id.toString(),
+      joystreamContentId: dataObject.joystreamContentId,
+    })
+  }
+}
+
+// removes connection between dataObject and other entities
+export async function disconnectDataObjectRelations(db: DatabaseManager, dataObject: DataObject) {
+  await disconnectSingleDataObjectRelation(db, new Channel(), 'avatarPhoto', dataObject)
+  await disconnectSingleDataObjectRelation(db, new Channel(), 'coverPhoto', dataObject)
+
+  await disconnectSingleDataObjectRelation(db, new Video(), 'thumbnailPhoto', dataObject)
+  await disconnectSingleDataObjectRelation(db, new Video(), 'media', dataObject)
+}
+
+async function disconnectSingleDataObjectRelation<T extends Channel | Video>(
+  db: DatabaseManager,
+  type: T,
+  propertyName: string,
+  dataObject: DataObject
+) {
+  // prepare lookup condition
+  const condition = {
+    where: {
+      [propertyName + 'DataObject']: dataObject,
+    },
+  } // as FindConditions<T>
+
+  // NOTE: we don't need to retrieve multiple channels/videos via `db.getMany()` because dataObject
+  //       is allowed to be associated only with one channel/video in runtime
+
+  // in therory the following condition(s) can be generalized `... db.get(type, ...` but in practice it doesn't work :-\
+  const item = type instanceof Channel ? await db.get(Channel, condition) : await db.get(Video, condition)
+
+  // escape when no dataObject association found
+  if (!item) {
+    return
+  }
+
+  item[propertyName + 'Availability'] = AssetAvailability.INVALID
+  item[propertyName + 'DataObject'] = null
+
+  if (type instanceof Channel) {
+    await db.save<Channel>(item)
+
+    // emit log event
+    logger.info('Content has been disconnected from Channel', {
+      channelId: item.id.toString(),
+      joystreamContentId: dataObject.joystreamContentId,
+    })
+  } else {
+    // type instanceof Video
+    await db.save<Video>(item)
+
+    // emit log event
+    logger.info('Content has been disconnected from Video', {
+      videoId: item.id.toString(),
+      joystreamContentId: dataObject.joystreamContentId,
+    })
+  }
+}
+
+/// ///////////////// Helpers ////////////////////////////////////////////////////
+
+function convertStorageObjectOwner(objectOwner: StorageObjectOwner): typeof DataObjectOwner {
+  if (objectOwner.isMember) {
+    const owner = new DataObjectOwnerMember()
+    owner.member = objectOwner.asMember.toNumber()
+
+    return owner
+  }
+
+  if (objectOwner.isChannel) {
+    const owner = new DataObjectOwnerChannel()
+    owner.channel = objectOwner.asChannel.toNumber()
+
+    return owner
+  }
+
+  if (objectOwner.isDao) {
+    const owner = new DataObjectOwnerDao()
+    owner.dao = objectOwner.asDao.toNumber()
+
+    return owner
+  }
+
+  if (objectOwner.isCouncil) {
+    return new DataObjectOwnerCouncil()
+  }
+
+  if (objectOwner.isWorkingGroup) {
+    const owner = new DataObjectOwnerWorkingGroup()
+    owner.workingGroup = objectOwner.asWorkingGroup.toNumber()
+
+    return owner
+  }
+
+  logger.error('Not implemented StorageObjectOwner type', { objectOwner: objectOwner.toString() })
+  throw new Error('Not implemented StorageObjectOwner type')
+}
+
+function encodeContentId(contentId: ContentId) {
+  const customContentId = new Custom_ContentId(registry, contentId)
+
+  return customContentId.encode()
+}

+ 2 - 2
scripts/cargo-build.sh

@@ -1,5 +1,5 @@
 #!/usr/bin/env bash
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
-cargo build --release
+cargo +nightly-2021-02-20 build --release

+ 2 - 2
scripts/cargo-tests-with-networking.sh

@@ -1,7 +1,7 @@
 #!/bin/sh
 set -e
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 echo 'running all cargo tests'
-cargo test --release --all -- --ignored
+cargo +nightly-2021-02-20 test --release --all -- --ignored

+ 1 - 1
scripts/raspberry-cross-build.sh

@@ -9,7 +9,7 @@
 export WORKSPACE_ROOT=`cargo metadata --offline --no-deps --format-version 1 | jq .workspace_root -r`
 
 docker run \
-    -e WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 \
+    -e WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 \
     --volume ${WORKSPACE_ROOT}/:/home/cross/project \
     --volume ${HOME}/.cargo/registry:/home/cross/.cargo/registry \
     joystream/rust-raspberry \

+ 4 - 4
scripts/run-dev-chain.sh

@@ -1,13 +1,13 @@
 #!/usr/bin/env bash
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 # Build release binary
-cargo build --release
+cargo +nightly-2021-02-20 build --release
 
 # Purge existing local chain
-yes | cargo run --release -- purge-chain --dev
+yes | cargo +nightly-2021-02-20 run --release -- purge-chain --dev
 
 # Run local development chain -
 # No need to specify `-p joystream-node` it is the default bin crate in the cargo workspace
-cargo run --release -- --dev
+cargo +nightly-2021-02-20 run --release -- --dev

+ 4 - 4
setup.sh

@@ -3,6 +3,8 @@
 set -e
 
 if [[ "$OSTYPE" == "linux-gnu" ]]; then
+    # Prevent interactive prompts that would interrup the installation
+    export DEBIAN_FRONTEND=noninteractive
     # code build tools
     sudo apt-get update
     sudo apt-get install -y coreutils clang llvm jq curl gcc xz-utils sudo pkg-config unzip libc6-dev make libssl-dev python
@@ -27,10 +29,8 @@ curl https://getsubstrate.io -sSf | bash -s -- --fast
 
 source ~/.cargo/env
 
-rustup install nightly-2021-03-24
-rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
-
-rustup default nightly-2021-03-24
+rustup install nightly-2021-02-20
+rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
 
 rustup component add rustfmt clippy
 

+ 7 - 0
storage-node/packages/helios/README.md

@@ -2,6 +2,13 @@
 
 A basic tool to scan the joystream storage network to get a birds eye view of the health of the storage providers and content replication status.
 
+## Setup
+
+```
+yarn
+yarn workspace @joystream/types build
+```
+
 ## Scanning
 
 ```

+ 5 - 0
tests/network-tests/.env

@@ -56,3 +56,8 @@ STAKE_DECREMENT = 3
 MINT_CAPACITY_INCREMENT = 1000
 # Storage node address to download content from
 STORAGE_NODE_URL = http://localhost:3001/asset/v0
+# Mini-secret or mnemonic used in SURI for deterministic key derivation
+SURI_MINI_SECRET = ""
+# The starting key id to use when running a scenario. This will allow scenario
+# to be able to use all accounts generated in a prior scenario run against the same chain
+START_KEY_ID = 0

+ 2 - 0
tests/network-tests/.gitignore

@@ -0,0 +1,2 @@
+output.json
+

+ 13 - 13
tests/network-tests/run-tests.sh

@@ -27,17 +27,17 @@ echo "{
 }" > ${DATA_PATH}/initial-balances.json
 
 # Make Alice a member
-echo '
-  [{
-    "member_id":0,
-    "root_account":"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
-    "controller_account":"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
-    "handle":"alice",
-    "avatar_uri":"https://alice.com/avatar.png",
-    "about":"Alice",
-    "registered_at_time":0
-  }]
-' > ${DATA_PATH}/initial-members.json
+# echo '
+#   [{
+#     "member_id":0,
+#     "root_account":"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
+#     "controller_account":"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
+#     "handle":"alice",
+#     "avatar_uri":"https://alice.com/avatar.png",
+#     "about":"Alice",
+#     "registered_at_time":0
+#   }]
+# ' > ${DATA_PATH}/initial-members.json
 
 # Create a chain spec file
 docker run --rm -v ${DATA_PATH}:/data --entrypoint ./chain-spec-builder joystream/node:${RUNTIME} \
@@ -46,8 +46,8 @@ docker run --rm -v ${DATA_PATH}:/data --entrypoint ./chain-spec-builder joystrea
   --sudo-account  5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY \
   --deployment dev \
   --chain-spec-path /data/chain-spec.json \
-  --initial-balances-path /data/initial-balances.json \
-  --initial-members-path /data/initial-members.json
+  --initial-balances-path /data/initial-balances.json
+# --initial-members-path /data/initial-members.json
 
 # Convert the chain spec file to a raw chainspec file
 docker run --rm -v ${DATA_PATH}:/data joystream/node:${RUNTIME} build-spec \

+ 228 - 34
tests/network-tests/src/Api.ts

@@ -29,26 +29,42 @@ import {
   OpeningId,
 } from '@joystream/types/hiring'
 import { FillOpeningParameters, ProposalId } from '@joystream/types/proposals'
-import { v4 as uuid } from 'uuid'
 import { ContentId, DataObject } from '@joystream/types/storage'
 import { extendDebug } from './Debugger'
 import { InvertedPromise } from './InvertedPromise'
+import { VideoId } from '@joystream/types/content'
+import { ChannelId } from '@joystream/types/common'
+import { ChannelCategoryMetadata, VideoCategoryMetadata } from '@joystream/content-metadata-protobuf'
+import { assert } from 'chai'
 
 export enum WorkingGroups {
   StorageWorkingGroup = 'storageWorkingGroup',
   ContentDirectoryWorkingGroup = 'contentDirectoryWorkingGroup',
 }
 
+type AnyMetadata = {
+  serializeBinary(): Uint8Array
+}
+
 export class ApiFactory {
   private readonly api: ApiPromise
   private readonly keyring: Keyring
+  // number used as part of key derivation path
+  private keyId = 0
+  // mapping from account address to key id.
+  // To be able to re-derive keypair externally when mini-secret is known.
+  readonly addressesToKeyId: Map<string, number> = new Map()
+  // mini secret used in SURI key derivation path
+  private readonly miniSecret: string
+
   // source of funds for all new accounts
   private readonly treasuryAccount: string
 
   public static async create(
     provider: WsProvider,
     treasuryAccountUri: string,
-    sudoAccountUri: string
+    sudoAccountUri: string,
+    miniSecret: string
   ): Promise<ApiFactory> {
     const debug = extendDebug('api-factory')
     let connectAttempts = 0
@@ -65,7 +81,7 @@ export class ApiFactory {
         // Give it a few seconds to be ready.
         await Utils.wait(5000)
 
-        return new ApiFactory(api, treasuryAccountUri, sudoAccountUri)
+        return new ApiFactory(api, treasuryAccountUri, sudoAccountUri, miniSecret)
       } catch (err) {
         if (connectAttempts === 3) {
           throw new Error('Unable to connect to chain')
@@ -75,32 +91,60 @@ export class ApiFactory {
     }
   }
 
-  constructor(api: ApiPromise, treasuryAccountUri: string, sudoAccountUri: string) {
+  constructor(api: ApiPromise, treasuryAccountUri: string, sudoAccountUri: string, miniSecret: string) {
     this.api = api
     this.keyring = new Keyring({ type: 'sr25519' })
     this.treasuryAccount = this.keyring.addFromUri(treasuryAccountUri).address
     this.keyring.addFromUri(sudoAccountUri)
+    this.miniSecret = miniSecret
+    this.addressesToKeyId = new Map()
+    this.keyId = 0
   }
 
   public getApi(label: string): Api {
-    return new Api(this.api, this.treasuryAccount, this.keyring, label)
+    return new Api(this, this.api, this.treasuryAccount, this.keyring, label)
+  }
+
+  public createKeyPairs(n: number): { key: KeyringPair; id: number }[] {
+    const keys: { key: KeyringPair; id: number }[] = []
+    for (let i = 0; i < n; i++) {
+      const id = this.keyId++
+      const key = this.createCustomKeyPair(`${id}`)
+      keys.push({ key, id })
+      this.addressesToKeyId.set(key.address, id)
+    }
+    return keys
+  }
+
+  public createCustomKeyPair(customPath: string): KeyringPair {
+    const uri = `${this.miniSecret}//testing//${customPath}`
+    return this.keyring.addFromUri(uri)
+  }
+
+  public keyGenInfo(): { start: number; final: number } {
+    const start = 0
+    const final = this.keyId
+    return {
+      start,
+      final,
+    }
   }
 
-  // public close(): void {
-  //   this.api.disconnect()
-  // }
+  public getAllGeneratedAccounts(): { [k: string]: number } {
+    return Object.fromEntries(this.addressesToKeyId)
+  }
 }
 
 export class Api {
+  private readonly factory: ApiFactory
   private readonly api: ApiPromise
   private readonly sender: Sender
-  private readonly keyring: Keyring
   // source of funds for all new accounts
   private readonly treasuryAccount: string
 
-  constructor(api: ApiPromise, treasuryAccount: string, keyring: Keyring, label: string) {
+  constructor(factory: ApiFactory, api: ApiPromise, treasuryAccount: string, keyring: Keyring, label: string) {
+    this.factory = factory
     this.api = api
-    this.keyring = keyring
     this.treasuryAccount = treasuryAccount
     this.sender = new Sender(api, keyring, label)
   }
@@ -113,12 +157,24 @@ export class Api {
     this.sender.setLogLevel(LogLevel.Verbose)
   }
 
-  public createKeyPairs(n: number): KeyringPair[] {
-    const nKeyPairs: KeyringPair[] = []
-    for (let i = 0; i < n; i++) {
-      nKeyPairs.push(this.keyring.addFromUri(i + uuid().substring(0, 8)))
-    }
-    return nKeyPairs
+  public createKeyPairs(n: number): { key: KeyringPair; id: number }[] {
+    return this.factory.createKeyPairs(n)
+  }
+
+  public createCustomKeyPair(path: string): KeyringPair {
+    return this.factory.createCustomKeyPair(path)
+  }
+
+  public keyGenInfo(): { start: number; final: number } {
+    return this.factory.keyGenInfo()
+  }
+
+  public getAllgeneratedAccounts(): { [k: string]: number } {
+    return this.factory.getAllGeneratedAccounts()
+  }
+
+  public encodeMetadata(metadata: AnyMetadata): Bytes {
+    return this.api.createType('Bytes', '0x' + Buffer.from(metadata.serializeBinary()).toString('hex'))
   }
 
   // Well known WorkingGroup enum defined in runtime
@@ -138,6 +194,11 @@ export class Api {
     return this.sender.signAndSend(this.api.tx.sudo.sudo(tx), sudo)
   }
 
+  public async makeSudoAsCall(who: string, tx: SubmittableExtrinsic<'promise'>): Promise<ISubmittableResult> {
+    const sudo = await this.api.query.sudo.key()
+    return this.sender.signAndSend(this.api.tx.sudo.sudoAs(who, tx), sudo)
+  }
+
   public createPaidTermId(value: BN): PaidTermId {
     return this.api.createType('PaidTermId', value)
   }
@@ -149,8 +210,18 @@ export class Api {
     )
   }
 
-  public getMemberIds(address: string): Promise<MemberId[]> {
-    return this.api.query.members.memberIdsByControllerAccountId<Vec<MemberId>>(address)
+  // Many calls in the testing framework take an account id instead of a member id when an action
+  // is intended to be in the context of the member. This function is used to do a reverse lookup.
+  // There is an underlying assumption that each member has a unique controller account even
+  // though the runtime does not place that constraint. But for the purpose of the tests we throw
+  // if that condition is found to be false to esnure the tests do not fail. As long as all memberships
+  // are created through the membership fixture this should not happen.
+  public async getMemberId(address: string): Promise<MemberId> {
+    const ids = await this.api.query.members.memberIdsByControllerAccountId<Vec<MemberId>>(address)
+    if (ids.length > 1) {
+      throw new Error('More than one member with same controller account was detected')
+    }
+    return ids[0]
   }
 
   public async getBalance(address: string): Promise<Balance> {
@@ -631,7 +702,7 @@ export class Api {
     description: string,
     runtime: Bytes | string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createRuntimeUpgradeProposal(memberId, name, description, stake, runtime),
       account
@@ -645,7 +716,7 @@ export class Api {
     description: string,
     text: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createTextProposal(memberId, name, description, stake, text),
       account
@@ -660,7 +731,7 @@ export class Api {
     balance: BN,
     destination: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSpendingProposal(memberId, title, description, stake, balance, destination),
       account
@@ -674,7 +745,7 @@ export class Api {
     stake: BN,
     validatorCount: BN
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSetValidatorCountProposal(memberId, title, description, stake, validatorCount),
       account
@@ -695,7 +766,7 @@ export class Api {
     minCouncilStake: BN,
     minVotingStake: BN
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSetElectionParametersProposal(memberId, title, description, stake, {
         announcing_period: announcingPeriod,
@@ -719,7 +790,7 @@ export class Api {
     openingId: OpeningId,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createBeginReviewWorkingGroupLeaderApplicationsProposal(
         memberId,
@@ -741,7 +812,7 @@ export class Api {
     const councilAccounts = await this.getCouncilAccounts()
     return Promise.all(
       councilAccounts.map(async (account) => {
-        const memberId: MemberId = (await this.getMemberIds(account))[0]
+        const memberId: MemberId = await this.getMemberId(account)
         return this.approveProposal(account, memberId, proposal)
       })
     )
@@ -1156,7 +1227,7 @@ export class Api {
       ),
     })
 
-    const memberId: MemberId = (await this.getMemberIds(leaderOpening.account))[0]
+    const memberId: MemberId = await this.getMemberId(leaderOpening.account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createAddWorkingGroupLeaderOpeningProposal(
         memberId,
@@ -1186,7 +1257,7 @@ export class Api {
     payoutInterval: BN
     workingGroup: string
   }): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(fillOpening.account))[0]
+    const memberId: MemberId = await this.getMemberId(fillOpening.account)
 
     const fillOpeningParameters: FillOpeningParameters = this.api.createType('FillOpeningParameters', {
       opening_id: fillOpening.openingId,
@@ -1221,7 +1292,7 @@ export class Api {
     slash: boolean,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createTerminateWorkingGroupLeaderRoleProposal(
         memberId,
@@ -1248,7 +1319,7 @@ export class Api {
     rewardAmount: BN,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSetWorkingGroupLeaderRewardProposal(
         memberId,
@@ -1272,7 +1343,7 @@ export class Api {
     rewardAmount: BN,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createDecreaseWorkingGroupLeaderStakeProposal(
         memberId,
@@ -1296,7 +1367,7 @@ export class Api {
     rewardAmount: BN,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSlashWorkingGroupLeaderStakeProposal(
         memberId,
@@ -1319,7 +1390,7 @@ export class Api {
     mintCapacity: BN,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSetWorkingGroupMintCapacityProposal(
         memberId,
@@ -1372,7 +1443,7 @@ export class Api {
     text: string,
     module: WorkingGroups
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx[module].applyOnOpening(memberId, openingId, roleAccountAddress, roleStake, applicantStake, text),
       account
@@ -1703,4 +1774,127 @@ export class Api {
     const dataObject = await this.api.query.dataDirectory.dataByContentId<Option<DataObject>>(contentId)
     return dataObject.unwrapOr(null)
   }
+
+  async getMemberControllerAccount(memberId: number): Promise<string | undefined> {
+    return (await this.api.query.members.membershipById(memberId))?.controller_account.toString()
+  }
+
+  async createMockChannel(memberId: number, memberControllerAccount?: string): Promise<ChannelId | null> {
+    memberControllerAccount = memberControllerAccount || (await this.getMemberControllerAccount(memberId))
+
+    if (!memberControllerAccount) {
+      throw new Error('invalid member id')
+    }
+
+    // Create a channel without any assets
+    const tx = this.api.tx.content.createChannel(
+      { Member: memberId },
+      {
+        assets: [],
+        meta: null,
+        reward_account: null,
+      }
+    )
+
+    const result = await this.sender.signAndSend(tx, memberControllerAccount)
+
+    const record = this.findEventRecord(result.events, 'content', 'ChannelCreated')
+    if (record) {
+      return record.event.data[1] as ChannelId
+    }
+
+    return null
+  }
+
+  async createMockVideo(
+    memberId: number,
+    channelId: number,
+    memberControllerAccount?: string
+  ): Promise<VideoId | null> {
+    memberControllerAccount = memberControllerAccount || (await this.getMemberControllerAccount(memberId))
+
+    if (!memberControllerAccount) {
+      throw new Error('invalid member id')
+    }
+
+    // Create a video without any assets
+    const tx = this.api.tx.content.createVideo({ Member: memberId }, channelId, {
+      assets: [],
+      meta: null,
+    })
+
+    const result = await this.sender.signAndSend(tx, memberControllerAccount)
+
+    const record = this.findEventRecord(result.events, 'content', 'VideoCreated')
+    if (record) {
+      return record.event.data[2] as VideoId
+    }
+
+    return null
+  }
+
+  async createChannelCategoryAsLead(name: string): Promise<ISubmittableResult> {
+    const lead = await this.getGroupLead(WorkingGroups.ContentDirectoryWorkingGroup)
+
+    if (!lead) {
+      throw new Error('No Content Lead asigned, cannot create channel category')
+    }
+
+    const account = lead?.role_account_id
+    const meta = new ChannelCategoryMetadata()
+    meta.setName(name)
+    return this.sender.signAndSend(
+      this.api.tx.content.createChannelCategory({ Lead: null }, { meta: this.encodeMetadata(meta) }),
+      account?.toString()
+    )
+  }
+
+  async createVideoCategoryAsLead(name: string): Promise<ISubmittableResult> {
+    const lead = await this.getGroupLead(WorkingGroups.ContentDirectoryWorkingGroup)
+
+    if (!lead) {
+      throw new Error('No Content Lead asigned, cannot create channel category')
+    }
+
+    const account = lead?.role_account_id
+    const meta = new VideoCategoryMetadata()
+    meta.setName(name)
+    return this.sender.signAndSend(
+      this.api.tx.content.createVideoCategory({ Lead: null }, { meta: this.encodeMetadata(meta) }),
+      account?.toString()
+    )
+  }
+
+  async assignWorkerRoleAccount(
+    group: WorkingGroups,
+    workerId: WorkerId,
+    account: string
+  ): Promise<ISubmittableResult> {
+    if (!(await this.isWorker(workerId, group))) {
+      throw new Error('Worker not found')
+    }
+    const worker = await this.getWorkerById(workerId, group)
+
+    const memberController = await this.getMemberControllerAccount(worker.member_id.toNumber())
+    // there cannot be a worker associated with member that does not exist
+    assert(memberController, 'Member controller not found')
+
+    // Expect membercontroller key is already added to keyring
+    // Is is responsibility of caller to ensure this is the case!
+
+    const updateRoleAccountCall = this.api.tx[group].updateRoleAccount(workerId, account)
+    return this.makeSudoAsCall(memberController!, updateRoleAccountCall)
+  }
+
+  async assignWorkerWellknownAccount(group: WorkingGroups, workerId: WorkerId): Promise<ISubmittableResult> {
+    // path to append to base SURI
+    const uri = `worker//${this.getWorkingGroupString(group)}//${workerId.toNumber()}`
+    const account = this.createCustomKeyPair(uri).address
+    return this.assignWorkerRoleAccount(group, workerId, account)
+  }
+
+  async assignCouncil(accounts: string[]): Promise<ISubmittableResult> {
+    const setCouncilCall = this.api.tx.council.setCouncil(accounts)
+    return this.makeSudoCall(setCouncilCall)
+  }
 }

+ 30 - 4
tests/network-tests/src/Scenario.ts

@@ -9,6 +9,7 @@ import { Job } from './Job'
 import { JobManager } from './JobManager'
 import { ResourceManager } from './Resources'
 import fetch from 'cross-fetch'
+import fs from 'fs'
 
 export type ScenarioProps = {
   env: NodeJS.ProcessEnv
@@ -24,13 +25,22 @@ export async function scenario(scene: (props: ScenarioProps) => Promise<void>):
   // Connect api to the chain
   const nodeUrl: string = env.NODE_URL || 'ws://127.0.0.1:9944'
   const provider = new WsProvider(nodeUrl)
-
+  const miniSecret = env.SURI_MINI_SECRET || ''
   const apiFactory = await ApiFactory.create(
     provider,
     env.TREASURY_ACCOUNT_URI || '//Alice',
-    env.SUDO_ACCOUNT_URI || '//Alice'
+    env.SUDO_ACCOUNT_URI || '//Alice',
+    miniSecret
   )
 
+  const api = apiFactory.getApi('Key Generation')
+
+  // Generate all key ids before START_KEY_ID
+  const startKeyId = parseInt(env.START_KEY_ID || '0')
+  if (startKeyId) {
+    api.createKeyPairs(startKeyId)
+  }
+
   const queryNodeUrl: string = env.QUERY_NODE_URL || 'http://127.0.0.1:8081/graphql'
 
   const queryNodeProvider = new ApolloClient({
@@ -49,18 +59,34 @@ export async function scenario(scene: (props: ScenarioProps) => Promise<void>):
 
   const resources = new ResourceManager()
 
+  let exitCode = 0
+
   try {
     await jobs.run(resources)
   } catch (err) {
     console.error(err)
-    process.exit(-1)
+    exitCode = -1
   }
 
+  // account to key ids
+  const accounts = api.getAllgeneratedAccounts()
+
+  // first and last key id used to generate keys in this scenario
+  const keyIds = api.keyGenInfo()
+
+  const output = {
+    accounts,
+    keyIds,
+    miniSecret,
+  }
+
+  fs.writeFileSync('output.json', JSON.stringify(output, undefined, 2))
+
   // Note: disconnecting and then reconnecting to the chain in the same process
   // doesn't seem to work!
   // Disconnecting is causing error to be thrown:
   // RPC-CORE: getStorage(key: StorageKey, at?: BlockHash): StorageData:: disconnected from ws://127.0.0.1:9944: 1000:: Normal connection closure
   // Are there subsciptions somewhere?
   // apiFactory.close()
-  process.exit()
+  process.exit(exitCode)
 }

+ 25 - 0
tests/network-tests/src/fixtures/councilAssignment.ts

@@ -0,0 +1,25 @@
+import { assert } from 'chai'
+import { Api } from '../Api'
+import { BaseFixture } from '../Fixture'
+
+export class AssignCouncilFixture extends BaseFixture {
+  private members: string[]
+
+  public constructor(api: Api, members: string[]) {
+    super(api)
+    this.members = members
+  }
+
+  public async execute(): Promise<void> {
+    // Assert no council exists
+    if ((await this.api.getCouncil()).length) {
+      return this.error(new Error('Council assignment fixture expects no council seats to be filled'))
+    }
+
+    await this.api.assignCouncil(this.members)
+
+    // Assert council was set
+    const councilSize = (await this.api.getCouncil()).length
+    assert.equal(councilSize, this.members.length, 'Not Expected council size after assignment')
+  }
+}

+ 9 - 4
tests/network-tests/src/fixtures/membershipModule.ts

@@ -33,6 +33,8 @@ export class BuyMembershipHappyCaseFixture extends BaseFixture {
 
     this.api.treasuryTransferBalanceToAccounts(this.accounts, membershipTransactionFee.add(new BN(membershipFee)))
 
+    // Note: Member alias is dervied from the account so if it is not unique the member registration
+    // will fail with HandleAlreadyRegistered error
     this.memberIds = (
       await Promise.all(
         this.accounts.map((account) =>
@@ -46,6 +48,7 @@ export class BuyMembershipHappyCaseFixture extends BaseFixture {
     this.debug(`Registered ${this.memberIds.length} new members`)
 
     assert.equal(this.memberIds.length, this.accounts.length)
+    // log the member id and corresponding key id
   }
 }
 
@@ -60,10 +63,12 @@ export class BuyMembershipWithInsufficienFundsFixture extends BaseFixture {
   }
 
   async execute(): Promise<void> {
-    // Assertions
-    const membership = await this.api.getMemberIds(this.account)
-
-    assert(membership.length === 0, 'Account must not be associated with a member')
+    try {
+      await this.api.getMemberId(this.account)
+      assert(false, 'Account must not be associated with a member')
+    } catch (err) {
+      // member id not found
+    }
 
     // Fee estimation and transfer
     const membershipFee: BN = await this.api.getMembershipFee(this.paidTerms)

+ 1 - 1
tests/network-tests/src/fixtures/proposalsModule.ts

@@ -551,7 +551,7 @@ export class SpendingProposalFixture extends BaseFixture {
 
     await this.api.sudoSetCouncilMintCapacity(this.mintCapacity)
 
-    const fundingRecipient = this.api.createKeyPairs(1)[0].address
+    const fundingRecipient = this.api.createKeyPairs(1)[0].key.address
 
     // Proposal creation
     const result = await this.api.proposeSpending(

+ 2 - 2
tests/network-tests/src/fixtures/workingGroupModule.ts

@@ -485,7 +485,7 @@ export class UpdateRewardAccountFixture extends BaseFixture {
     this.api.treasuryTransferBalance(workerRoleAccount, updateRewardAccountFee)
 
     // Update reward account
-    const createdAccount: KeyringPair = this.api.createKeyPairs(1)[0]
+    const createdAccount: KeyringPair = this.api.createKeyPairs(1)[0].key
     await this.api.updateRewardAccount(workerRoleAccount, this.workerId, createdAccount.address, this.module)
     const newRewardAccount: string = await this.api.getWorkerRewardAccount(this.workerId, this.module)
     assert(
@@ -514,7 +514,7 @@ export class UpdateRoleAccountFixture extends BaseFixture {
     this.api.treasuryTransferBalance(workerRoleAccount, updateRoleAccountFee)
 
     // Update role account
-    const createdAccount: KeyringPair = this.api.createKeyPairs(1)[0]
+    const createdAccount: KeyringPair = this.api.createKeyPairs(1)[0].key
     await this.api.updateRoleAccount(workerRoleAccount, this.workerId, createdAccount.address, this.module)
     const newRoleAccount: string = (await this.api.getWorkerById(this.workerId, this.module)).role_account_id.toString()
     assert(

+ 48 - 0
tests/network-tests/src/flows/council/assign.ts

@@ -0,0 +1,48 @@
+import BN from 'bn.js'
+import { PaidTermId } from '@joystream/types/members'
+import { FlowProps } from '../../Flow'
+import { AssignCouncilFixture } from '../../fixtures/councilAssignment'
+import { BuyMembershipHappyCaseFixture } from '../../fixtures/membershipModule'
+import { extendDebug } from '../../Debugger'
+import { FixtureRunner } from '../../Fixture'
+import { Resource } from '../../Resources'
+
+export default function createAssignCouncil(size = 1) {
+  return async function (props: FlowProps): Promise<void> {
+    return assignCouncil(props, size)
+  }
+}
+
+async function assignCouncil({ api, env, lock }: FlowProps, size: number): Promise<void> {
+  const label = 'assignCouncil'
+  const debug = extendDebug(`flow:${label}`)
+
+  debug('Started')
+
+  await lock(Resource.Council)
+
+  // Skip creating council if already elected
+  if ((await api.getCouncil()).length) {
+    return debug('Skipping council setup. A Council is already elected')
+  }
+
+  const councilSize = size || (await api.getCouncilSize()).toNumber()
+
+  debug('Assigning new council of size', councilSize)
+
+  const council = []
+
+  for (let i = 0; i < councilSize; i++) {
+    council.push(api.createCustomKeyPair(`CouncilMember//${i}`).address)
+  }
+
+  const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
+
+  const createMembersFixture = new BuyMembershipHappyCaseFixture(api, council, paidTerms)
+  await new FixtureRunner(createMembersFixture).run()
+
+  const councilAssignment = new AssignCouncilFixture(api, council)
+  await new FixtureRunner(councilAssignment).run()
+
+  debug('Done')
+}

+ 2 - 2
tests/network-tests/src/flows/council/setup.ts

@@ -23,8 +23,8 @@ export default async function councilSetup({ api, env, lock }: FlowProps): Promi
   debug('Electing new council')
 
   const numberOfApplicants = (await api.getCouncilSize()).toNumber() * 2
-  const applicants = api.createKeyPairs(numberOfApplicants).map((key) => key.address)
-  const voters = api.createKeyPairs(5).map((key) => key.address)
+  const applicants = api.createKeyPairs(numberOfApplicants).map(({ key }) => key.address)
+  const voters = api.createKeyPairs(5).map(({ key }) => key.address)
 
   const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
   const K: number = +env.COUNCIL_ELECTION_K!

+ 2 - 2
tests/network-tests/src/flows/membership/creatingMemberships.ts

@@ -15,8 +15,8 @@ export default async function membershipCreation({ api, env }: FlowProps): Promi
 
   const N: number = +env.MEMBERSHIP_CREATION_N!
   assert(N > 0)
-  const nAccounts = api.createKeyPairs(N).map((key) => key.address)
-  const aAccount = api.createKeyPairs(1)[0].address
+  const nAccounts = api.createKeyPairs(N).map(({ key }) => key.address)
+  const aAccount = api.createKeyPairs(1)[0].key.address
   const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
 
   // Assert membership can be bought if sufficient funds are available

+ 1 - 1
tests/network-tests/src/flows/proposals/manageLeaderRole.ts

@@ -36,7 +36,7 @@ async function manageLeaderRole(api: Api, env: NodeJS.ProcessEnv, group: Working
   debug('Started')
   await lock(Resource.Proposals)
 
-  const leaderAccount = api.createKeyPairs(1)[0].address
+  const leaderAccount = api.createKeyPairs(1)[0].key.address
 
   const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
   const applicationStake: BN = new BN(env.WORKING_GROUP_APPLICATION_STAKE!)

+ 1 - 1
tests/network-tests/src/flows/proposals/updateRuntime.ts

@@ -28,7 +28,7 @@ export default async function updateRuntime({ api, env, lock }: FlowProps): Prom
   // Some tests after runtime update
   const createMembershipsFixture = new BuyMembershipHappyCaseFixture(
     api,
-    api.createKeyPairs(1).map((key) => key.address),
+    api.createKeyPairs(1).map(({ key }) => key.address),
     paidTerms
   )
   await new FixtureRunner(createMembershipsFixture).run()

+ 1 - 1
tests/network-tests/src/flows/workingGroup/leaderSetup.ts

@@ -25,7 +25,7 @@ async function leaderSetup(api: Api, env: NodeJS.ProcessEnv, group: WorkingGroup
   const existingLead = await api.getGroupLead(group)
   assert.equal(existingLead, undefined, 'Lead is already set')
 
-  const leadKeyPair = api.createKeyPairs(1)[0]
+  const leadKeyPair = api.createKeyPairs(1)[0].key
   const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
   const applicationStake: BN = new BN(env.WORKING_GROUP_APPLICATION_STAKE!)
   const roleStake: BN = new BN(env.WORKING_GROUP_ROLE_STAKE!)

Nem az összes módosított fájl került megjelenítésre, mert túl sok fájl változott