Browse Source

storage-node: Fix files format.

Shamil Gadelshin 4 years ago
parent
commit
70901582ec
42 changed files with 4305 additions and 4313 deletions
  1. 183 183
      storage-node/packages/cli/bin/cli.js
  2. 72 72
      storage-node/packages/cli/bin/dev.js
  3. 187 187
      storage-node/packages/colossus/bin/cli.js
  4. 28 28
      storage-node/packages/colossus/lib/app.js
  5. 27 27
      storage-node/packages/colossus/lib/discovery.js
  6. 18 18
      storage-node/packages/colossus/lib/middleware/file_uploads.js
  7. 35 35
      storage-node/packages/colossus/lib/middleware/validate_responses.js
  8. 70 70
      storage-node/packages/colossus/lib/sync.js
  9. 313 317
      storage-node/packages/colossus/paths/asset/v0/{id}.js
  10. 75 75
      storage-node/packages/colossus/paths/discover/v0/{id}.js
  11. 139 139
      storage-node/packages/discovery/discover.js
  12. 22 22
      storage-node/packages/discovery/example.js
  13. 2 2
      storage-node/packages/discovery/index.js
  14. 39 39
      storage-node/packages/discovery/publish.js
  15. 173 175
      storage-node/packages/helios/bin/cli.js
  16. 150 150
      storage-node/packages/runtime-api/assets.js
  17. 43 43
      storage-node/packages/runtime-api/balances.js
  18. 54 54
      storage-node/packages/runtime-api/discovery.js
  19. 198 198
      storage-node/packages/runtime-api/identities.js
  20. 259 259
      storage-node/packages/runtime-api/index.js
  21. 19 19
      storage-node/packages/runtime-api/test/assets.js
  22. 21 21
      storage-node/packages/runtime-api/test/balances.js
  23. 53 53
      storage-node/packages/runtime-api/test/identities.js
  24. 4 4
      storage-node/packages/runtime-api/test/index.js
  25. 254 256
      storage-node/packages/runtime-api/workers.js
  26. 72 72
      storage-node/packages/storage/filter.js
  27. 1 1
      storage-node/packages/storage/index.js
  28. 311 311
      storage-node/packages/storage/storage.js
  29. 179 179
      storage-node/packages/storage/test/storage.js
  30. 7 7
      storage-node/packages/util/externalPromise.js
  31. 23 23
      storage-node/packages/util/fs/resolve.js
  32. 98 98
      storage-node/packages/util/fs/walk.js
  33. 75 75
      storage-node/packages/util/lru.js
  34. 118 118
      storage-node/packages/util/pagination.js
  35. 374 374
      storage-node/packages/util/ranges.js
  36. 4 4
      storage-node/packages/util/stripEndingSlash.js
  37. 31 31
      storage-node/packages/util/test/fs/resolve.js
  38. 25 25
      storage-node/packages/util/test/fs/walk.js
  39. 93 93
      storage-node/packages/util/test/lru.js
  40. 86 86
      storage-node/packages/util/test/pagination.js
  41. 364 364
      storage-node/packages/util/test/ranges.js
  42. 6 6
      storage-node/packages/util/test/stripEndingSlash.js

+ 183 - 183
storage-node/packages/cli/bin/cli.js

@@ -30,11 +30,11 @@ const dev = require('./dev')
 
 // Parse CLI
 const FLAG_DEFINITIONS = {
-	// TODO
+  // TODO
 }
 
 const cli = meow(
-	`
+  `
   Usage:
     $ storage-cli command [arguments..] [key_file] [passphrase]
 
@@ -55,199 +55,199 @@ const cli = meow(
     dev-init          Setup chain with Alice as lead and storage provider.
     dev-check         Check the chain is setup with Alice as lead and storage provider.
   `,
-	{ flags: FLAG_DEFINITIONS }
+  { flags: FLAG_DEFINITIONS }
 )
 
 function assertFile(name, filename) {
-	assert(filename, `Need a ${name} parameter to proceed!`)
-	assert(fs.statSync(filename).isFile(), `Path "${filename}" is not a file, aborting!`)
+  assert(filename, `Need a ${name} parameter to proceed!`)
+  assert(fs.statSync(filename).isFile(), `Path "${filename}" is not a file, aborting!`)
 }
 
 function load_identity(api, filename, passphrase) {
-	if (filename) {
-		assertFile('keyfile', filename)
-		api.identities.loadUnlock(filename, passphrase)
-	} else {
-		debug('Loading Alice as identity')
-		api.identities.useKeyPair(dev.aliceKeyPair(api))
-	}
+  if (filename) {
+    assertFile('keyfile', filename)
+    api.identities.loadUnlock(filename, passphrase)
+  } else {
+    debug('Loading Alice as identity')
+    api.identities.useKeyPair(dev.aliceKeyPair(api))
+  }
 }
 
 const commands = {
-	// add Alice well known account as storage provider
-	'dev-init': async (api) => {
-		// dev accounts are automatically loaded, no need to add explicitly to keyring
-		// load_identity(api)
-		const dev = require('./dev')
-		return dev.init(api)
-	},
-	// Checks that the setup done by dev-init command was successful.
-	'dev-check': async (api) => {
-		// dev accounts are automatically loaded, no need to add explicitly to keyring
-		// load_identity(api)
-		const dev = require('./dev')
-		return dev.check(api)
-	},
-	// The upload method is not correctly implemented
-	// needs to get the liaison after creating a data object,
-	// resolve the ipns id to the asset put api url of the storage-node
-	// before uploading..
-	upload: async (api, url, filename, doTypeId, keyfile, passphrase) => {
-		load_identity(keyfile, passphrase)
-		// Check parameters
-		assertFile('file', filename)
-
-		const size = fs.statSync(filename).size
-		debug(`File "${filename}" is ${chalk.green(size)} Bytes.`)
-
-		if (!doTypeId) {
-			doTypeId = 1
-		}
-
-		debug('Data Object Type ID is: ' + chalk.green(doTypeId))
-
-		// Generate content ID
-		// FIXME this require path is like this because of
-		// https://github.com/Joystream/apps/issues/207
-		const { ContentId } = require('@joystream/types/media')
-		let cid = ContentId.generate()
-		cid = cid.encode().toString()
-		debug('Generated content ID: ' + chalk.green(cid))
-
-		// Create Data Object
-		await api.assets.createDataObject(api.identities.key.address, cid, doTypeId, size)
-		debug('Data object created.')
-
-		// TODO in future, optionally contact liaison here?
-		const request = require('request')
-		url = `${url}asset/v0/${cid}`
-		debug('Uploading to URL', chalk.green(url))
-
-		const f = fs.createReadStream(filename)
-		const opts = {
-			url,
-			headers: {
-				'content-type': '',
-				'content-length': `${size}`,
-			},
-			json: true,
-		}
-		return new Promise((resolve, reject) => {
-			const r = request.put(opts, (error, response, body) => {
-				if (error) {
-					reject(error)
-					return
-				}
-
-				if (response.statusCode / 100 !== 2) {
-					reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
-					return
-				}
-				debug('Upload successful:', body.message)
-				resolve()
-			})
-			f.pipe(r)
-		})
-	},
-	// needs to be updated to take a content id and resolve it a potential set
-	// of providers that has it, and select one (possibly try more than one provider)
-	// to fetch it from the get api url of a provider..
-	download: async (api, url, contentId, filename) => {
-		const request = require('request')
-		url = `${url}asset/v0/${contentId}`
-		debug('Downloading URL', chalk.green(url), 'to', chalk.green(filename))
-
-		const f = fs.createWriteStream(filename)
-		const opts = {
-			url,
-			json: true,
-		}
-		return new Promise((resolve, reject) => {
-			const r = request.get(opts, (error, response, body) => {
-				if (error) {
-					reject(error)
-					return
-				}
-
-				debug(
-					'Downloading',
-					chalk.green(response.headers['content-type']),
-					'of size',
-					chalk.green(response.headers['content-length']),
-					'...'
-				)
-
-				f.on('error', (err) => {
-					reject(err)
-				})
-
-				f.on('finish', () => {
-					if (response.statusCode / 100 !== 2) {
-						reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
-						return
-					}
-					debug('Download completed.')
-					resolve()
-				})
-			})
-			r.pipe(f)
-		})
-	},
-	// similar to 'download' function
-	head: async (api, url, contentId) => {
-		const request = require('request')
-		url = `${url}asset/v0/${contentId}`
-		debug('Checking URL', chalk.green(url), '...')
-
-		const opts = {
-			url,
-			json: true,
-		}
-		return new Promise((resolve, reject) => {
-			request.head(opts, (error, response, body) => {
-				if (error) {
-					reject(error)
-					return
-				}
-
-				if (response.statusCode / 100 !== 2) {
-					reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
-					return
-				}
-
-				for (const propname in response.headers) {
-					debug(`  ${chalk.yellow(propname)}: ${response.headers[propname]}`)
-				}
-
-				resolve()
-			})
-		})
-	},
+  // add Alice well known account as storage provider
+  'dev-init': async api => {
+    // dev accounts are automatically loaded, no need to add explicitly to keyring
+    // load_identity(api)
+    const dev = require('./dev')
+    return dev.init(api)
+  },
+  // Checks that the setup done by dev-init command was successful.
+  'dev-check': async api => {
+    // dev accounts are automatically loaded, no need to add explicitly to keyring
+    // load_identity(api)
+    const dev = require('./dev')
+    return dev.check(api)
+  },
+  // The upload method is not correctly implemented
+  // needs to get the liaison after creating a data object,
+  // resolve the ipns id to the asset put api url of the storage-node
+  // before uploading..
+  upload: async (api, url, filename, doTypeId, keyfile, passphrase) => {
+    load_identity(keyfile, passphrase)
+    // Check parameters
+    assertFile('file', filename)
+
+    const size = fs.statSync(filename).size
+    debug(`File "${filename}" is ${chalk.green(size)} Bytes.`)
+
+    if (!doTypeId) {
+      doTypeId = 1
+    }
+
+    debug('Data Object Type ID is: ' + chalk.green(doTypeId))
+
+    // Generate content ID
+    // FIXME this require path is like this because of
+    // https://github.com/Joystream/apps/issues/207
+    const { ContentId } = require('@joystream/types/media')
+    let cid = ContentId.generate()
+    cid = cid.encode().toString()
+    debug('Generated content ID: ' + chalk.green(cid))
+
+    // Create Data Object
+    await api.assets.createDataObject(api.identities.key.address, cid, doTypeId, size)
+    debug('Data object created.')
+
+    // TODO in future, optionally contact liaison here?
+    const request = require('request')
+    url = `${url}asset/v0/${cid}`
+    debug('Uploading to URL', chalk.green(url))
+
+    const f = fs.createReadStream(filename)
+    const opts = {
+      url,
+      headers: {
+        'content-type': '',
+        'content-length': `${size}`,
+      },
+      json: true,
+    }
+    return new Promise((resolve, reject) => {
+      const r = request.put(opts, (error, response, body) => {
+        if (error) {
+          reject(error)
+          return
+        }
+
+        if (response.statusCode / 100 !== 2) {
+          reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
+          return
+        }
+        debug('Upload successful:', body.message)
+        resolve()
+      })
+      f.pipe(r)
+    })
+  },
+  // needs to be updated to take a content id and resolve it a potential set
+  // of providers that has it, and select one (possibly try more than one provider)
+  // to fetch it from the get api url of a provider..
+  download: async (api, url, contentId, filename) => {
+    const request = require('request')
+    url = `${url}asset/v0/${contentId}`
+    debug('Downloading URL', chalk.green(url), 'to', chalk.green(filename))
+
+    const f = fs.createWriteStream(filename)
+    const opts = {
+      url,
+      json: true,
+    }
+    return new Promise((resolve, reject) => {
+      const r = request.get(opts, (error, response, body) => {
+        if (error) {
+          reject(error)
+          return
+        }
+
+        debug(
+          'Downloading',
+          chalk.green(response.headers['content-type']),
+          'of size',
+          chalk.green(response.headers['content-length']),
+          '...'
+        )
+
+        f.on('error', err => {
+          reject(err)
+        })
+
+        f.on('finish', () => {
+          if (response.statusCode / 100 !== 2) {
+            reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
+            return
+          }
+          debug('Download completed.')
+          resolve()
+        })
+      })
+      r.pipe(f)
+    })
+  },
+  // similar to 'download' function
+  head: async (api, url, contentId) => {
+    const request = require('request')
+    url = `${url}asset/v0/${contentId}`
+    debug('Checking URL', chalk.green(url), '...')
+
+    const opts = {
+      url,
+      json: true,
+    }
+    return new Promise((resolve, reject) => {
+      request.head(opts, (error, response, body) => {
+        if (error) {
+          reject(error)
+          return
+        }
+
+        if (response.statusCode / 100 !== 2) {
+          reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
+          return
+        }
+
+        for (const propname in response.headers) {
+          debug(`  ${chalk.yellow(propname)}: ${response.headers[propname]}`)
+        }
+
+        resolve()
+      })
+    })
+  },
 }
 
 async function main() {
-	const api = await RuntimeApi.create()
-
-	// Simple CLI commands
-	const command = cli.input[0]
-	if (!command) {
-		throw new Error('Need a command to run!')
-	}
-
-	if (commands.hasOwnProperty(command)) {
-		// Command recognized
-		const args = _.clone(cli.input).slice(1)
-		await commands[command](api, ...args)
-	} else {
-		throw new Error(`Command "${command}" not recognized, aborting!`)
-	}
+  const api = await RuntimeApi.create()
+
+  // Simple CLI commands
+  const command = cli.input[0]
+  if (!command) {
+    throw new Error('Need a command to run!')
+  }
+
+  if (commands.hasOwnProperty(command)) {
+    // Command recognized
+    const args = _.clone(cli.input).slice(1)
+    await commands[command](api, ...args)
+  } else {
+    throw new Error(`Command "${command}" not recognized, aborting!`)
+  }
 }
 
 main()
-	.then(() => {
-		process.exit(0)
-	})
-	.catch((err) => {
-		console.error(chalk.red(err.stack))
-		process.exit(-1)
-	})
+  .then(() => {
+    process.exit(0)
+  })
+  .catch(err => {
+    console.error(chalk.red(err.stack))
+    process.exit(-1)
+  })

+ 72 - 72
storage-node/packages/cli/bin/dev.js

@@ -10,118 +10,118 @@ const ALICE_URI = '//Alice'
 const ROLE_ACCOUNT_URI = '//Colossus'
 
 function aliceKeyPair(api) {
-	return api.identities.keyring.addFromUri(ALICE_URI, null, 'sr25519')
+  return api.identities.keyring.addFromUri(ALICE_URI, null, 'sr25519')
 }
 
 function roleKeyPair(api) {
-	return api.identities.keyring.addFromUri(ROLE_ACCOUNT_URI, null, 'sr25519')
+  return api.identities.keyring.addFromUri(ROLE_ACCOUNT_URI, null, 'sr25519')
 }
 
 function developmentPort() {
-	return 3001
+  return 3001
 }
 
-const check = async (api) => {
-	const roleAccountId = roleKeyPair(api).address
-	const providerId = await api.workers.findProviderIdByRoleAccount(roleAccountId)
+const check = async api => {
+  const roleAccountId = roleKeyPair(api).address
+  const providerId = await api.workers.findProviderIdByRoleAccount(roleAccountId)
 
-	if (providerId === null) {
-		throw new Error('Dev storage provider not found on chain!')
-	}
+  if (providerId === null) {
+    throw new Error('Dev storage provider not found on chain!')
+  }
 
-	console.log(`
+  console.log(`
   Chain is setup with Dev storage provider:
     providerId = ${providerId}
     roleAccountId = ${roleAccountId}
     roleKey = ${ROLE_ACCOUNT_URI}
   `)
 
-	return providerId
+  return providerId
 }
 
 // Setup Alice account on a developement chain as
 // a member, storage lead, and a storage provider using a deterministic
 // development key for the role account
-const init = async (api) => {
-	try {
-		await check(api)
-		return
-	} catch (err) {
-		// We didn't find a storage provider with expected role account
-	}
+const init = async api => {
+  try {
+    await check(api)
+    return
+  } catch (err) {
+    // We didn't find a storage provider with expected role account
+  }
 
-	const alice = aliceKeyPair(api).address
-	const roleAccount = roleKeyPair(api).address
+  const alice = aliceKeyPair(api).address
+  const roleAccount = roleKeyPair(api).address
 
-	debug(`Ensuring Alice is sudo`)
+  debug(`Ensuring Alice is sudo`)
 
-	// make sure alice is sudo - indirectly checking this is a dev chain
-	const sudo = await api.identities.getSudoAccount()
+  // make sure alice is sudo - indirectly checking this is a dev chain
+  const sudo = await api.identities.getSudoAccount()
 
-	if (!sudo.eq(alice)) {
-		throw new Error('Setup requires Alice to be sudo. Are you sure you are running a devchain?')
-	}
+  if (!sudo.eq(alice)) {
+    throw new Error('Setup requires Alice to be sudo. Are you sure you are running a devchain?')
+  }
 
-	console.log('Running setup')
+  console.log('Running setup')
 
-	// set localhost colossus as discovery provider
-	// assuming pioneer dev server is running on port 3000 we should run
-	// the storage dev server on a different port than the default for colossus which is also
-	// 3000
-	debug('Setting Local development node as bootstrap endpoint')
-	await api.discovery.setBootstrapEndpoints(alice, [`http://localhost:${developmentPort()}/`])
+  // set localhost colossus as discovery provider
+  // assuming pioneer dev server is running on port 3000 we should run
+  // the storage dev server on a different port than the default for colossus which is also
+  // 3000
+  debug('Setting Local development node as bootstrap endpoint')
+  await api.discovery.setBootstrapEndpoints(alice, [`http://localhost:${developmentPort()}/`])
 
-	debug('Transferring tokens to storage role account')
-	// Give role account some tokens to work with
-	api.balances.transfer(alice, roleAccount, 100000)
+  debug('Transferring tokens to storage role account')
+  // Give role account some tokens to work with
+  api.balances.transfer(alice, roleAccount, 100000)
 
-	debug('Ensuring Alice is as member..')
-	let aliceMemberId = await api.identities.firstMemberIdOf(alice)
+  debug('Ensuring Alice is as member..')
+  let aliceMemberId = await api.identities.firstMemberIdOf(alice)
 
-	if (aliceMemberId === undefined) {
-		debug('Registering Alice as member..')
-		aliceMemberId = await api.identities.registerMember(alice, {
-			handle: 'alice',
-		})
-	} else {
-		debug('Alice is already a member')
-	}
+  if (aliceMemberId === undefined) {
+    debug('Registering Alice as member..')
+    aliceMemberId = await api.identities.registerMember(alice, {
+      handle: 'alice',
+    })
+  } else {
+    debug('Alice is already a member')
+  }
 
-	// Make alice the storage lead
-	debug('Making Alice the storage Lead')
-	const leadOpeningId = await api.workers.devAddStorageLeadOpening()
-	const leadApplicationId = await api.workers.devApplyOnOpening(leadOpeningId, aliceMemberId, alice, alice)
-	api.workers.devBeginLeadOpeningReview(leadOpeningId)
-	await api.workers.devFillLeadOpening(leadOpeningId, leadApplicationId)
+  // Make alice the storage lead
+  debug('Making Alice the storage Lead')
+  const leadOpeningId = await api.workers.devAddStorageLeadOpening()
+  const leadApplicationId = await api.workers.devApplyOnOpening(leadOpeningId, aliceMemberId, alice, alice)
+  api.workers.devBeginLeadOpeningReview(leadOpeningId)
+  await api.workers.devFillLeadOpening(leadOpeningId, leadApplicationId)
 
-	const leadAccount = await api.workers.getLeadRoleAccount()
-	if (!leadAccount.eq(alice)) {
-		throw new Error('Setting alice as lead failed')
-	}
+  const leadAccount = await api.workers.getLeadRoleAccount()
+  if (!leadAccount.eq(alice)) {
+    throw new Error('Setting alice as lead failed')
+  }
 
-	// Create a storage openinging, apply, start review, and fill opening
-	debug(`Making ${ROLE_ACCOUNT_URI} account a storage provider`)
+  // Create a storage openinging, apply, start review, and fill opening
+  debug(`Making ${ROLE_ACCOUNT_URI} account a storage provider`)
 
-	const openingId = await api.workers.devAddStorageOpening()
-	debug(`created new storage opening: ${openingId}`)
+  const openingId = await api.workers.devAddStorageOpening()
+  debug(`created new storage opening: ${openingId}`)
 
-	const applicationId = await api.workers.devApplyOnOpening(openingId, aliceMemberId, alice, roleAccount)
-	debug(`applied with application id: ${applicationId}`)
+  const applicationId = await api.workers.devApplyOnOpening(openingId, aliceMemberId, alice, roleAccount)
+  debug(`applied with application id: ${applicationId}`)
 
-	api.workers.devBeginStorageOpeningReview(openingId)
+  api.workers.devBeginStorageOpeningReview(openingId)
 
-	debug(`filling storage opening`)
-	const providerId = await api.workers.devFillStorageOpening(openingId, applicationId)
+  debug(`filling storage opening`)
+  const providerId = await api.workers.devFillStorageOpening(openingId, applicationId)
 
-	debug(`Assigned storage provider id: ${providerId}`)
+  debug(`Assigned storage provider id: ${providerId}`)
 
-	return check(api)
+  return check(api)
 }
 
 module.exports = {
-	init,
-	check,
-	aliceKeyPair,
-	roleKeyPair,
-	developmentPort,
+  init,
+  check,
+  aliceKeyPair,
+  roleKeyPair,
+  developmentPort,
 }

+ 187 - 187
storage-node/packages/colossus/bin/cli.js

@@ -22,42 +22,42 @@ const SYNC_PERIOD_MS = 300000 // 5min
 
 // Parse CLI
 const FLAG_DEFINITIONS = {
-	port: {
-		type: 'number',
-		alias: 'p',
-		default: 3000,
-	},
-	keyFile: {
-		type: 'string',
-		isRequired: (flags) => {
-			return !flags.dev
-		},
-	},
-	publicUrl: {
-		type: 'string',
-		alias: 'u',
-		isRequired: (flags) => {
-			return !flags.dev
-		},
-	},
-	passphrase: {
-		type: 'string',
-	},
-	wsProvider: {
-		type: 'string',
-		default: 'ws://localhost:9944',
-	},
-	providerId: {
-		type: 'number',
-		alias: 'i',
-		isRequired: (flags) => {
-			return !flags.dev
-		},
-	},
+  port: {
+    type: 'number',
+    alias: 'p',
+    default: 3000,
+  },
+  keyFile: {
+    type: 'string',
+    isRequired: flags => {
+      return !flags.dev
+    },
+  },
+  publicUrl: {
+    type: 'string',
+    alias: 'u',
+    isRequired: flags => {
+      return !flags.dev
+    },
+  },
+  passphrase: {
+    type: 'string',
+  },
+  wsProvider: {
+    type: 'string',
+    default: 'ws://localhost:9944',
+  },
+  providerId: {
+    type: 'number',
+    alias: 'i',
+    isRequired: flags => {
+      return !flags.dev
+    },
+  },
 }
 
 const cli = meow(
-	`
+  `
   Usage:
     $ colossus [command] [arguments]
 
@@ -77,225 +77,225 @@ const cli = meow(
     --port=PORT, -p PORT    Port number to listen on, defaults to 3000.
     --ws-provider WS_URL    Joystream-node websocket provider, defaults to ws://localhost:9944
   `,
-	{ flags: FLAG_DEFINITIONS }
+  { flags: FLAG_DEFINITIONS }
 )
 
 // All-important banner!
 function banner() {
-	console.log(chalk.blue(figlet.textSync('joystream', 'Speed')))
+  console.log(chalk.blue(figlet.textSync('joystream', 'Speed')))
 }
 
 function startExpressApp(app, port) {
-	const http = require('http')
-	const server = http.createServer(app)
-
-	return new Promise((resolve, reject) => {
-		server.on('error', reject)
-		server.on('close', (...args) => {
-			console.log('Server closed, shutting down...')
-			resolve(...args)
-		})
-		server.on('listening', () => {
-			console.log('API server started.', server.address())
-		})
-		server.listen(port, '::')
-		console.log('Starting API server...')
-	})
+  const http = require('http')
+  const server = http.createServer(app)
+
+  return new Promise((resolve, reject) => {
+    server.on('error', reject)
+    server.on('close', (...args) => {
+      console.log('Server closed, shutting down...')
+      resolve(...args)
+    })
+    server.on('listening', () => {
+      console.log('API server started.', server.address())
+    })
+    server.listen(port, '::')
+    console.log('Starting API server...')
+  })
 }
 
 // Start app
 function start_all_services({ store, api, port }) {
-	const app = require('../lib/app')(PROJECT_ROOT, store, api) // reduce falgs to only needed values
-	return startExpressApp(app, port)
+  const app = require('../lib/app')(PROJECT_ROOT, store, api) // reduce falgs to only needed values
+  return startExpressApp(app, port)
 }
 
 // Start discovery service app only
 function start_discovery_service({ api, port }) {
-	const app = require('../lib/discovery')(PROJECT_ROOT, api) // reduce flags to only needed values
-	return startExpressApp(app, port)
+  const app = require('../lib/discovery')(PROJECT_ROOT, api) // reduce flags to only needed values
+  return startExpressApp(app, port)
 }
 
 // Get an initialized storage instance
 function get_storage(runtimeApi) {
-	// TODO at some point, we can figure out what backend-specific connection
-	// options make sense. For now, just don't use any configuration.
-	const { Storage } = require('@joystream/storage-node-backend')
-
-	const options = {
-		resolve_content_id: async (contentId) => {
-			// Resolve via API
-			const obj = await runtimeApi.assets.getDataObject(contentId)
-			if (!obj || obj.isNone) {
-				return
-			}
-			// if obj.liaison_judgement !== Accepted .. throw ?
-			return obj.unwrap().ipfs_content_id.toString()
-		},
-	}
-
-	return Storage.create(options)
+  // TODO at some point, we can figure out what backend-specific connection
+  // options make sense. For now, just don't use any configuration.
+  const { Storage } = require('@joystream/storage-node-backend')
+
+  const options = {
+    resolve_content_id: async contentId => {
+      // Resolve via API
+      const obj = await runtimeApi.assets.getDataObject(contentId)
+      if (!obj || obj.isNone) {
+        return
+      }
+      // if obj.liaison_judgement !== Accepted .. throw ?
+      return obj.unwrap().ipfs_content_id.toString()
+    },
+  }
+
+  return Storage.create(options)
 }
 
 async function init_api_production({ wsProvider, providerId, keyFile, passphrase }) {
-	// Load key information
-	const { RuntimeApi } = require('@joystream/storage-runtime-api')
+  // Load key information
+  const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
-	if (!keyFile) {
-		throw new Error('Must specify a --key-file argument for running a storage node.')
-	}
+  if (!keyFile) {
+    throw new Error('Must specify a --key-file argument for running a storage node.')
+  }
 
-	if (providerId === undefined) {
-		throw new Error('Must specify a --provider-id argument for running a storage node')
-	}
+  if (providerId === undefined) {
+    throw new Error('Must specify a --provider-id argument for running a storage node')
+  }
 
-	const api = await RuntimeApi.create({
-		account_file: keyFile,
-		passphrase,
-		provider_url: wsProvider,
-		storageProviderId: providerId,
-	})
+  const api = await RuntimeApi.create({
+    account_file: keyFile,
+    passphrase,
+    provider_url: wsProvider,
+    storageProviderId: providerId,
+  })
 
-	if (!api.identities.key) {
-		throw new Error('Failed to unlock storage provider account')
-	}
+  if (!api.identities.key) {
+    throw new Error('Failed to unlock storage provider account')
+  }
 
-	if (!(await api.workers.isRoleAccountOfStorageProvider(api.storageProviderId, api.identities.key.address))) {
-		throw new Error('storage provider role account and storageProviderId are not associated with a worker')
-	}
+  if (!(await api.workers.isRoleAccountOfStorageProvider(api.storageProviderId, api.identities.key.address))) {
+    throw new Error('storage provider role account and storageProviderId are not associated with a worker')
+  }
 
-	return api
+  return api
 }
 
 async function init_api_development() {
-	// Load key information
-	const { RuntimeApi } = require('@joystream/storage-runtime-api')
+  // Load key information
+  const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
-	const wsProvider = 'ws://localhost:9944'
+  const wsProvider = 'ws://localhost:9944'
 
-	const api = await RuntimeApi.create({
-		provider_url: wsProvider,
-	})
+  const api = await RuntimeApi.create({
+    provider_url: wsProvider,
+  })
 
-	const dev = require('../../cli/bin/dev')
+  const dev = require('../../cli/bin/dev')
 
-	api.identities.useKeyPair(dev.roleKeyPair(api))
+  api.identities.useKeyPair(dev.roleKeyPair(api))
 
-	api.storageProviderId = await dev.check(api)
+  api.storageProviderId = await dev.check(api)
 
-	return api
+  return api
 }
 
 function get_service_information(publicUrl) {
-	// For now assume we run all services on the same endpoint
-	return {
-		asset: {
-			version: 1, // spec version
-			endpoint: publicUrl,
-		},
-		discover: {
-			version: 1, // spec version
-			endpoint: publicUrl,
-		},
-	}
+  // For now assume we run all services on the same endpoint
+  return {
+    asset: {
+      version: 1, // spec version
+      endpoint: publicUrl,
+    },
+    discover: {
+      version: 1, // spec version
+      endpoint: publicUrl,
+    },
+  }
 }
 
 async function announce_public_url(api, publicUrl) {
-	// re-announce in future
-	const reannounce = function (timeoutMs) {
-		setTimeout(announce_public_url, timeoutMs, api, publicUrl)
-	}
+  // re-announce in future
+  const reannounce = function (timeoutMs) {
+    setTimeout(announce_public_url, timeoutMs, api, publicUrl)
+  }
 
-	debug('announcing public url')
-	const { publish } = require('@joystream/service-discovery')
+  debug('announcing public url')
+  const { publish } = require('@joystream/service-discovery')
 
-	try {
-		const serviceInformation = get_service_information(publicUrl)
+  try {
+    const serviceInformation = get_service_information(publicUrl)
 
-		const keyId = await publish.publish(serviceInformation)
+    const keyId = await publish.publish(serviceInformation)
 
-		await api.discovery.setAccountInfo(keyId)
+    await api.discovery.setAccountInfo(keyId)
 
-		debug('publishing complete, scheduling next update')
+    debug('publishing complete, scheduling next update')
 
-		// >> sometimes after tx is finalized.. we are not reaching here!
+    // >> sometimes after tx is finalized.. we are not reaching here!
 
-		// Reannounce before expiery. Here we are concerned primarily
-		// with keeping the account information refreshed and 'available' in
-		// the ipfs network. our record on chain is valid for 24hr
-		reannounce(50 * 60 * 1000) // in 50 minutes
-	} catch (err) {
-		debug(`announcing public url failed: ${err.stack}`)
+    // Reannounce before expiery. Here we are concerned primarily
+    // with keeping the account information refreshed and 'available' in
+    // the ipfs network. our record on chain is valid for 24hr
+    reannounce(50 * 60 * 1000) // in 50 minutes
+  } catch (err) {
+    debug(`announcing public url failed: ${err.stack}`)
 
-		// On failure retry sooner
-		debug(`announcing failed, retrying in: 2 minutes`)
-		reannounce(120 * 1000)
-	}
+    // On failure retry sooner
+    debug(`announcing failed, retrying in: 2 minutes`)
+    reannounce(120 * 1000)
+  }
 }
 
 // Simple CLI commands
 let command = cli.input[0]
 if (!command) {
-	command = 'server'
+  command = 'server'
 }
 
 async function start_colossus({ api, publicUrl, port, flags }) {
-	// TODO: check valid url, and valid port number
-	const store = get_storage(api)
-	banner()
-	const { startSyncing } = require('../lib/sync')
-	startSyncing(api, { syncPeriod: SYNC_PERIOD_MS }, store)
-	announce_public_url(api, publicUrl)
-	return start_all_services({ store, api, port, flags }) // dont pass all flags only required values
+  // TODO: check valid url, and valid port number
+  const store = get_storage(api)
+  banner()
+  const { startSyncing } = require('../lib/sync')
+  startSyncing(api, { syncPeriod: SYNC_PERIOD_MS }, store)
+  announce_public_url(api, publicUrl)
+  return start_all_services({ store, api, port, flags }) // dont pass all flags only required values
 }
 
 const commands = {
-	server: async () => {
-		let publicUrl, port, api
-
-		if (cli.flags.dev) {
-			const dev = require('../../cli/bin/dev')
-			api = await init_api_development()
-			port = dev.developmentPort()
-			publicUrl = `http://localhost:${port}/`
-		} else {
-			api = await init_api_production(cli.flags)
-			publicUrl = cli.flags.publicUrl
-			port = cli.flags.port
-		}
-
-		return start_colossus({ api, publicUrl, port })
-	},
-	discovery: async () => {
-		debug('Starting Joystream Discovery Service')
-		const { RuntimeApi } = require('@joystream/storage-runtime-api')
-		const wsProvider = cli.flags.wsProvider
-		const api = await RuntimeApi.create({ provider_url: wsProvider })
-		const port = cli.flags.port
-		await start_discovery_service({ api, port })
-	},
+  server: async () => {
+    let publicUrl, port, api
+
+    if (cli.flags.dev) {
+      const dev = require('../../cli/bin/dev')
+      api = await init_api_development()
+      port = dev.developmentPort()
+      publicUrl = `http://localhost:${port}/`
+    } else {
+      api = await init_api_production(cli.flags)
+      publicUrl = cli.flags.publicUrl
+      port = cli.flags.port
+    }
+
+    return start_colossus({ api, publicUrl, port })
+  },
+  discovery: async () => {
+    debug('Starting Joystream Discovery Service')
+    const { RuntimeApi } = require('@joystream/storage-runtime-api')
+    const wsProvider = cli.flags.wsProvider
+    const api = await RuntimeApi.create({ provider_url: wsProvider })
+    const port = cli.flags.port
+    await start_discovery_service({ api, port })
+  },
 }
 
 async function main() {
-	// Simple CLI commands
-	let command = cli.input[0]
-	if (!command) {
-		command = 'server'
-	}
-
-	if (commands.hasOwnProperty(command)) {
-		// Command recognized
-		const args = _.clone(cli.input).slice(1)
-		await commands[command](...args)
-	} else {
-		throw new Error(`Command '${command}' not recognized, aborting!`)
-	}
+  // Simple CLI commands
+  let command = cli.input[0]
+  if (!command) {
+    command = 'server'
+  }
+
+  if (commands.hasOwnProperty(command)) {
+    // Command recognized
+    const args = _.clone(cli.input).slice(1)
+    await commands[command](...args)
+  } else {
+    throw new Error(`Command '${command}' not recognized, aborting!`)
+  }
 }
 
 main()
-	.then(() => {
-		process.exit(0)
-	})
-	.catch((err) => {
-		console.error(chalk.red(err.stack))
-		process.exit(-1)
-	})
+  .then(() => {
+    process.exit(0)
+  })
+  .catch(err => {
+    console.error(chalk.red(err.stack))
+    process.exit(-1)
+  })

+ 28 - 28
storage-node/packages/colossus/lib/app.js

@@ -36,39 +36,39 @@ const pagination = require('@joystream/storage-utils/pagination')
 
 // Configure app
 function create_app(projectRoot, storage, runtime) {
-	const app = express()
-	app.use(cors())
-	app.use(bodyParser.json())
-	// FIXME app.use(bodyParser.urlencoded({ extended: true }));
+  const app = express()
+  app.use(cors())
+  app.use(bodyParser.json())
+  // FIXME app.use(bodyParser.urlencoded({ extended: true }));
 
-	// Load & extend/configure API docs
-	let api = yaml.safeLoad(fs.readFileSync(path.resolve(projectRoot, 'api-base.yml')))
-	api['x-express-openapi-additional-middleware'] = [validateResponses]
-	api['x-express-openapi-validation-strict'] = true
+  // Load & extend/configure API docs
+  let api = yaml.safeLoad(fs.readFileSync(path.resolve(projectRoot, 'api-base.yml')))
+  api['x-express-openapi-additional-middleware'] = [validateResponses]
+  api['x-express-openapi-validation-strict'] = true
 
-	api = pagination.openapi(api)
+  api = pagination.openapi(api)
 
-	openapi.initialize({
-		apiDoc: api,
-		app,
-		paths: path.resolve(projectRoot, 'paths'),
-		docsPath: '/swagger.json',
-		consumesMiddleware: {
-			'multipart/form-data': fileUploads,
-		},
-		dependencies: {
-			storage,
-			runtime,
-		},
-	})
+  openapi.initialize({
+    apiDoc: api,
+    app,
+    paths: path.resolve(projectRoot, 'paths'),
+    docsPath: '/swagger.json',
+    consumesMiddleware: {
+      'multipart/form-data': fileUploads,
+    },
+    dependencies: {
+      storage,
+      runtime,
+    },
+  })
 
-	// If no other handler gets triggered (errors), respond with the
-	// error serialized to JSON.
-	app.use(function (err, req, res) {
-		res.status(err.status).json(err)
-	})
+  // If no other handler gets triggered (errors), respond with the
+  // error serialized to JSON.
+  app.use(function (err, req, res) {
+    res.status(err.status).json(err)
+  })
 
-	return app
+  return app
 }
 
 module.exports = create_app

+ 27 - 27
storage-node/packages/colossus/lib/discovery.js

@@ -34,37 +34,37 @@ const validateResponses = require('./middleware/validate_responses')
 
 // Configure app
 function create_app(projectRoot, runtime) {
-	const app = express()
-	app.use(cors())
-	app.use(bodyParser.json())
-	// FIXME app.use(bodyParser.urlencoded({ extended: true }));
+  const app = express()
+  app.use(cors())
+  app.use(bodyParser.json())
+  // FIXME app.use(bodyParser.urlencoded({ extended: true }));
 
-	// Load & extend/configure API docs
-	const api = yaml.safeLoad(fs.readFileSync(path.resolve(projectRoot, 'api-base.yml')))
-	api['x-express-openapi-additional-middleware'] = [validateResponses]
-	api['x-express-openapi-validation-strict'] = true
+  // Load & extend/configure API docs
+  const api = yaml.safeLoad(fs.readFileSync(path.resolve(projectRoot, 'api-base.yml')))
+  api['x-express-openapi-additional-middleware'] = [validateResponses]
+  api['x-express-openapi-validation-strict'] = true
 
-	openapi.initialize({
-		apiDoc: api,
-		app,
-		// paths: path.resolve(projectRoot, 'discovery_app_paths'),
-		paths: {
-			path: '/discover/v0/{id}',
-			module: require('../paths/discover/v0/{id}'),
-		},
-		docsPath: '/swagger.json',
-		dependencies: {
-			runtime,
-		},
-	})
+  openapi.initialize({
+    apiDoc: api,
+    app,
+    // paths: path.resolve(projectRoot, 'discovery_app_paths'),
+    paths: {
+      path: '/discover/v0/{id}',
+      module: require('../paths/discover/v0/{id}'),
+    },
+    docsPath: '/swagger.json',
+    dependencies: {
+      runtime,
+    },
+  })
 
-	// If no other handler gets triggered (errors), respond with the
-	// error serialized to JSON.
-	app.use(function (err, req, res) {
-		res.status(err.status).json(err)
-	})
+  // If no other handler gets triggered (errors), respond with the
+  // error serialized to JSON.
+  app.use(function (err, req, res) {
+    res.status(err.status).json(err)
+  })
 
-	return app
+  return app
 }
 
 module.exports = create_app

+ 18 - 18
storage-node/packages/colossus/lib/middleware/file_uploads.js

@@ -22,22 +22,22 @@ const multer = require('multer')
 
 // Taken from express-openapi examples
 module.exports = function (req, res, next) {
-	multer().any()(req, res, function (err) {
-		if (err) {
-			return next(err)
-		}
-		// Handle both single and multiple files
-		const filesMap = req.files.reduce(
-			(acc, f) =>
-				Object.assign(acc, {
-					[f.fieldname]: (acc[f.fieldname] || []).concat(f),
-				}),
-			{}
-		)
-		Object.keys(filesMap).forEach((fieldname) => {
-			const files = filesMap[fieldname]
-			req.body[fieldname] = files.length > 1 ? files.map(() => '') : ''
-		})
-		return next()
-	})
+  multer().any()(req, res, function (err) {
+    if (err) {
+      return next(err)
+    }
+    // Handle both single and multiple files
+    const filesMap = req.files.reduce(
+      (acc, f) =>
+        Object.assign(acc, {
+          [f.fieldname]: (acc[f.fieldname] || []).concat(f),
+        }),
+      {}
+    )
+    Object.keys(filesMap).forEach(fieldname => {
+      const files = filesMap[fieldname]
+      req.body[fieldname] = files.length > 1 ? files.map(() => '') : ''
+    })
+    return next()
+  })
 }

+ 35 - 35
storage-node/packages/colossus/lib/middleware/validate_responses.js

@@ -22,40 +22,40 @@ const debug = require('debug')('joystream:middleware:validate')
 
 // Function taken directly from https://github.com/kogosoftwarellc/open-api/tree/master/packages/express-openapi
 module.exports = function (req, res, next) {
-	const strictValidation = !!req.apiDoc['x-express-openapi-validation-strict']
-	if (typeof res.validateResponse === 'function') {
-		const send = res.send
-		res.send = function expressOpenAPISend(...args) {
-			const onlyWarn = !strictValidation
-			if (res.get('x-express-openapi-validation-error-for') !== undefined) {
-				return send.apply(res, args)
-			}
-			if (res.get('x-express-openapi-validation-for') !== undefined) {
-				return send.apply(res, args)
-			}
+  const strictValidation = !!req.apiDoc['x-express-openapi-validation-strict']
+  if (typeof res.validateResponse === 'function') {
+    const send = res.send
+    res.send = function expressOpenAPISend(...args) {
+      const onlyWarn = !strictValidation
+      if (res.get('x-express-openapi-validation-error-for') !== undefined) {
+        return send.apply(res, args)
+      }
+      if (res.get('x-express-openapi-validation-for') !== undefined) {
+        return send.apply(res, args)
+      }
 
-			const body = args[0]
-			let validation = res.validateResponse(res.statusCode, body)
-			let validationMessage
-			if (validation === undefined) {
-				validation = { message: undefined, errors: undefined }
-			}
-			if (validation.errors) {
-				const errorList = Array.from(validation.errors)
-					.map((_) => _.message)
-					.join(',')
-				validationMessage = `Invalid response for status code ${res.statusCode}: ${errorList}`
-				debug(validationMessage)
-				// Set to avoid a loop, and to provide the original status code
-				res.set('x-express-openapi-validation-error-for', res.statusCode.toString())
-			}
-			if ((onlyWarn || !validation.errors) && res.statusCode) {
-				res.set('x-express-openapi-validation-for', res.statusCode.toString())
-				return send.apply(res, args)
-			}
-			res.status(500)
-			return res.json({ error: validationMessage })
-		}
-	}
-	next()
+      const body = args[0]
+      let validation = res.validateResponse(res.statusCode, body)
+      let validationMessage
+      if (validation === undefined) {
+        validation = { message: undefined, errors: undefined }
+      }
+      if (validation.errors) {
+        const errorList = Array.from(validation.errors)
+          .map(_ => _.message)
+          .join(',')
+        validationMessage = `Invalid response for status code ${res.statusCode}: ${errorList}`
+        debug(validationMessage)
+        // Set to avoid a loop, and to provide the original status code
+        res.set('x-express-openapi-validation-error-for', res.statusCode.toString())
+      }
+      if ((onlyWarn || !validation.errors) && res.statusCode) {
+        res.set('x-express-openapi-validation-for', res.statusCode.toString())
+        return send.apply(res, args)
+      }
+      res.status(500)
+      return res.json({ error: validationMessage })
+    }
+  }
+  next()
 }

+ 70 - 70
storage-node/packages/colossus/lib/sync.js

@@ -21,90 +21,90 @@
 const debug = require('debug')('joystream:sync')
 
 async function syncCallback(api, storage) {
-	// The first step is to gather all data objects from chain.
-	// TODO: in future, limit to a configured tranche
-	// FIXME this isn't actually on chain yet, so we'll fake it.
-	const knownContentIds = (await api.assets.getKnownContentIds()) || []
+  // The first step is to gather all data objects from chain.
+  // TODO: in future, limit to a configured tranche
+  // FIXME this isn't actually on chain yet, so we'll fake it.
+  const knownContentIds = (await api.assets.getKnownContentIds()) || []
 
-	const roleAddress = api.identities.key.address
-	const providerId = api.storageProviderId
+  const roleAddress = api.identities.key.address
+  const providerId = api.storageProviderId
 
-	// Iterate over all sync objects, and ensure they're synced.
-	const allChecks = knownContentIds.map(async (contentId) => {
-		/* eslint-disable prefer-const */
-		let { relationship, relationshipId } = await api.assets.getStorageRelationshipAndId(providerId, contentId)
+  // Iterate over all sync objects, and ensure they're synced.
+  const allChecks = knownContentIds.map(async contentId => {
+    /* eslint-disable prefer-const */
+    let { relationship, relationshipId } = await api.assets.getStorageRelationshipAndId(providerId, contentId)
 
-		// get the data object
-		// make sure the data object was Accepted by the liaison,
-		// don't just blindly attempt to fetch them
+    // get the data object
+    // make sure the data object was Accepted by the liaison,
+    // don't just blindly attempt to fetch them
 
-		let fileLocal
-		try {
-			// check if we have content or not
-			const stats = await storage.stat(contentId)
-			fileLocal = stats.local
-		} catch (err) {
-			// on error stating or timeout
-			debug(err.message)
-			// we don't have content if we can't stat it
-			fileLocal = false
-		}
+    let fileLocal
+    try {
+      // check if we have content or not
+      const stats = await storage.stat(contentId)
+      fileLocal = stats.local
+    } catch (err) {
+      // on error stating or timeout
+      debug(err.message)
+      // we don't have content if we can't stat it
+      fileLocal = false
+    }
 
-		if (!fileLocal) {
-			try {
-				await storage.synchronize(contentId)
-			} catch (err) {
-				// duplicate logging
-				// debug(err.message)
-				return
-			}
-			// why are we returning, if we synced the file
-			return
-		}
+    if (!fileLocal) {
+      try {
+        await storage.synchronize(contentId)
+      } catch (err) {
+        // duplicate logging
+        // debug(err.message)
+        return
+      }
+      // why are we returning, if we synced the file
+      return
+    }
 
-		if (!relationship) {
-			// create relationship
-			debug(`Creating new storage relationship for ${contentId.encode()}`)
-			try {
-				relationshipId = await api.assets.createAndReturnStorageRelationship(roleAddress, providerId, contentId)
-				await api.assets.toggleStorageRelationshipReady(roleAddress, providerId, relationshipId, true)
-			} catch (err) {
-				debug(`Error creating new storage relationship ${contentId.encode()}: ${err.stack}`)
-				return
-			}
-		} else if (!relationship.ready) {
-			debug(`Updating storage relationship to ready for ${contentId.encode()}`)
-			// update to ready. (Why would there be a relationship set to ready: false?)
-			try {
-				await api.assets.toggleStorageRelationshipReady(roleAddress, providerId, relationshipId, true)
-			} catch (err) {
-				debug(`Error setting relationship ready ${contentId.encode()}: ${err.stack}`)
-			}
-		} else {
-			// we already have content and a ready relationship set. No need to do anything
-			// debug(`content already stored locally ${contentId.encode()}`);
-		}
-	})
+    if (!relationship) {
+      // create relationship
+      debug(`Creating new storage relationship for ${contentId.encode()}`)
+      try {
+        relationshipId = await api.assets.createAndReturnStorageRelationship(roleAddress, providerId, contentId)
+        await api.assets.toggleStorageRelationshipReady(roleAddress, providerId, relationshipId, true)
+      } catch (err) {
+        debug(`Error creating new storage relationship ${contentId.encode()}: ${err.stack}`)
+        return
+      }
+    } else if (!relationship.ready) {
+      debug(`Updating storage relationship to ready for ${contentId.encode()}`)
+      // update to ready. (Why would there be a relationship set to ready: false?)
+      try {
+        await api.assets.toggleStorageRelationshipReady(roleAddress, providerId, relationshipId, true)
+      } catch (err) {
+        debug(`Error setting relationship ready ${contentId.encode()}: ${err.stack}`)
+      }
+    } else {
+      // we already have content and a ready relationship set. No need to do anything
+      // debug(`content already stored locally ${contentId.encode()}`);
+    }
+  })
 
-	return Promise.all(allChecks)
+  return Promise.all(allChecks)
 }
 
 async function syncPeriodic(api, flags, storage) {
-	try {
-		debug('Starting sync run...')
-		await syncCallback(api, storage)
-		debug('sync run complete')
-	} catch (err) {
-		debug(`Error in syncPeriodic ${err.stack}`)
-	}
-	// always try again
-	setTimeout(syncPeriodic, flags.syncPeriod, api, flags, storage)
+  try {
+    debug('Starting sync run...')
+    await syncCallback(api, storage)
+    debug('sync run complete')
+  } catch (err) {
+    debug(`Error in syncPeriodic ${err.stack}`)
+  }
+  // always try again
+  setTimeout(syncPeriodic, flags.syncPeriod, api, flags, storage)
 }
 
 function startSyncing(api, flags, storage) {
-	syncPeriodic(api, flags, storage)
+  syncPeriodic(api, flags, storage)
 }
 
 module.exports = {
-	startSyncing,
+  startSyncing,
 }

+ 313 - 317
storage-node/packages/colossus/paths/asset/v0/{id}.js

@@ -26,324 +26,320 @@ const utilRanges = require('@joystream/storage-utils/ranges')
 const filter = require('@joystream/storage-node-backend/filter')
 
 function errorHandler(response, err, code) {
-	debug(err)
-	response.status(err.code || code || 500).send({ message: err.toString() })
+  debug(err)
+  response.status(err.code || code || 500).send({ message: err.toString() })
 }
 
 module.exports = function (storage, runtime) {
-	const doc = {
-		// parameters for all operations in this path
-		parameters: [
-			{
-				name: 'id',
-				in: 'path',
-				required: true,
-				description: 'Joystream Content ID',
-				schema: {
-					type: 'string',
-				},
-			},
-		],
-
-		// Head: report that ranges are OK
-		async head(req, res) {
-			const id = req.params.id
-
-			// Open file
-			try {
-				const size = await storage.size(id)
-				const stream = await storage.open(id, 'r')
-				const type = stream.fileInfo.mimeType
-
-				// Close the stream; we don't need to fetch the file (if we haven't
-				// already). Then return result.
-				stream.destroy()
-
-				res.status(200)
-				res.contentType(type)
-				res.header('Content-Disposition', 'inline')
-				res.header('Content-Transfer-Encoding', 'binary')
-				res.header('Accept-Ranges', 'bytes')
-				if (size > 0) {
-					res.header('Content-Length', size)
-				}
-				res.send()
-			} catch (err) {
-				errorHandler(res, err, err.code)
-			}
-		},
-
-		// Put for uploads
-		async put(req, res) {
-			const id = req.params.id // content id
-
-			// First check if we're the liaison for the name, otherwise we can bail
-			// out already.
-			const roleAddress = runtime.identities.key.address
-			const providerId = runtime.storageProviderId
-			let dataObject
-			try {
-				debug('calling checkLiaisonForDataObject')
-				dataObject = await runtime.assets.checkLiaisonForDataObject(providerId, id)
-				debug('called checkLiaisonForDataObject')
-			} catch (err) {
-				errorHandler(res, err, 403)
-				return
-			}
-
-			// We'll open a write stream to the backend, but reserve the right to
-			// abort upload if the filters don't smell right.
-			let stream
-			try {
-				stream = await storage.open(id, 'w')
-
-				// We don't know whether the filtering occurs before or after the
-				// stream was finished, and can only commit if both passed.
-				let finished = false
-				let accepted = false
-				const possiblyCommit = () => {
-					if (finished && accepted) {
-						debug('Stream is finished and passed filters; committing.')
-						stream.commit()
-					}
-				}
-
-				stream.on('fileInfo', async (info) => {
-					try {
-						debug('Detected file info:', info)
-
-						// Filter
-						const filterResult = filter({}, req.headers, info.mimeType)
-						if (200 !== filterResult.code) {
-							debug('Rejecting content', filterResult.message)
-							stream.end()
-							res.status(filterResult.code).send({ message: filterResult.message })
-
-							// Reject the content
-							await runtime.assets.rejectContent(roleAddress, providerId, id)
-							return
-						}
-						debug('Content accepted.')
-						accepted = true
-
-						// We may have to commit the stream.
-						possiblyCommit()
-					} catch (err) {
-						errorHandler(res, err)
-					}
-				})
-
-				stream.on('finish', () => {
-					try {
-						finished = true
-						possiblyCommit()
-					} catch (err) {
-						errorHandler(res, err)
-					}
-				})
-
-				stream.on('committed', async (hash) => {
-					console.log('commited', dataObject)
-					try {
-						if (hash !== dataObject.ipfs_content_id.toString()) {
-							debug('Rejecting content. IPFS hash does not match value in objectId')
-							await runtime.assets.rejectContent(roleAddress, providerId, id)
-							res.status(400).send({ message: "Uploaded content doesn't match IPFS hash" })
-							return
-						}
-
-						debug('accepting Content')
-						await runtime.assets.acceptContent(roleAddress, providerId, id)
-
-						debug('creating storage relationship for newly uploaded content')
-						// Create storage relationship and flip it to ready.
-						const dosrId = await runtime.assets.createAndReturnStorageRelationship(
-							roleAddress,
-							providerId,
-							id
-						)
-
-						debug('toggling storage relationship for newly uploaded content')
-						await runtime.assets.toggleStorageRelationshipReady(roleAddress, providerId, dosrId, true)
-
-						debug('Sending OK response.')
-						res.status(200).send({ message: 'Asset uploaded.' })
-					} catch (err) {
-						debug(`${err.message}`)
-						errorHandler(res, err)
-					}
-				})
-
-				stream.on('error', (err) => errorHandler(res, err))
-				req.pipe(stream)
-			} catch (err) {
-				errorHandler(res, err)
-				return
-			}
-		},
-
-		// Get content
-		async get(req, res) {
-			const id = req.params.id
-			const download = req.query.download
-
-			// Parse range header
-			let ranges
-			if (!download) {
-				try {
-					const rangeHeader = req.headers.range
-					ranges = utilRanges.parse(rangeHeader)
-				} catch (err) {
-					// Do nothing; it's ok to ignore malformed ranges and respond with the
-					// full content according to https://www.rfc-editor.org/rfc/rfc7233.txt
-				}
-				if (ranges && ranges.unit !== 'bytes') {
-					// Ignore ranges that are not byte units.
-					ranges = undefined
-				}
-			}
-			debug('Requested range(s) is/are', ranges)
-
-			// Open file
-			try {
-				const size = await storage.size(id)
-				const stream = await storage.open(id, 'r')
-
-				// Add a file extension to download requests if necessary. If the file
-				// already contains an extension, don't add one.
-				let sendName = id
-				const type = stream.fileInfo.mimeType
-				if (download) {
-					let ext = path.extname(sendName)
-					if (!ext) {
-						ext = stream.fileInfo.ext
-						if (ext) {
-							sendName = `${sendName}.${ext}`
-						}
-					}
-				}
-
-				const opts = {
-					name: sendName,
-					type,
-					size,
-					ranges,
-					download,
-				}
-				utilRanges.send(res, stream, opts)
-			} catch (err) {
-				errorHandler(res, err, err.code)
-			}
-		},
-	}
-
-	// OpenAPI specs
-	doc.get.apiDoc = {
-		description: 'Download an asset.',
-		operationId: 'assetData',
-		tags: ['asset', 'data'],
-		parameters: [
-			{
-				name: 'download',
-				in: 'query',
-				description: 'Download instead of streaming inline.',
-				required: false,
-				allowEmptyValue: true,
-				schema: {
-					type: 'boolean',
-					default: false,
-				},
-			},
-		],
-		responses: {
-			200: {
-				description: 'Asset download.',
-				content: {
-					default: {
-						schema: {
-							type: 'string',
-							format: 'binary',
-						},
-					},
-				},
-			},
-			default: {
-				description: 'Unexpected error',
-				content: {
-					'application/json': {
-						schema: {
-							$ref: '#/components/schemas/Error',
-						},
-					},
-				},
-			},
-		},
-	}
-
-	doc.put.apiDoc = {
-		description: 'Asset upload.',
-		operationId: 'assetUpload',
-		tags: ['asset', 'data'],
-		requestBody: {
-			content: {
-				'*/*': {
-					schema: {
-						type: 'string',
-						format: 'binary',
-					},
-				},
-			},
-		},
-		responses: {
-			200: {
-				description: 'Asset upload.',
-				content: {
-					'application/json': {
-						schema: {
-							type: 'object',
-							required: ['message'],
-							properties: {
-								message: {
-									type: 'string',
-								},
-							},
-						},
-					},
-				},
-			},
-			default: {
-				description: 'Unexpected error',
-				content: {
-					'application/json': {
-						schema: {
-							$ref: '#/components/schemas/Error',
-						},
-					},
-				},
-			},
-		},
-	}
-
-	doc.head.apiDoc = {
-		description: 'Asset download information.',
-		operationId: 'assetInfo',
-		tags: ['asset', 'metadata'],
-		responses: {
-			200: {
-				description: 'Asset info.',
-			},
-			default: {
-				description: 'Unexpected error',
-				content: {
-					'application/json': {
-						schema: {
-							$ref: '#/components/schemas/Error',
-						},
-					},
-				},
-			},
-		},
-	}
-
-	return doc
+  const doc = {
+    // parameters for all operations in this path
+    parameters: [
+      {
+        name: 'id',
+        in: 'path',
+        required: true,
+        description: 'Joystream Content ID',
+        schema: {
+          type: 'string',
+        },
+      },
+    ],
+
+    // Head: report that ranges are OK
+    async head(req, res) {
+      const id = req.params.id
+
+      // Open file
+      try {
+        const size = await storage.size(id)
+        const stream = await storage.open(id, 'r')
+        const type = stream.fileInfo.mimeType
+
+        // Close the stream; we don't need to fetch the file (if we haven't
+        // already). Then return result.
+        stream.destroy()
+
+        res.status(200)
+        res.contentType(type)
+        res.header('Content-Disposition', 'inline')
+        res.header('Content-Transfer-Encoding', 'binary')
+        res.header('Accept-Ranges', 'bytes')
+        if (size > 0) {
+          res.header('Content-Length', size)
+        }
+        res.send()
+      } catch (err) {
+        errorHandler(res, err, err.code)
+      }
+    },
+
+    // Put for uploads
+    async put(req, res) {
+      const id = req.params.id // content id
+
+      // First check if we're the liaison for the name, otherwise we can bail
+      // out already.
+      const roleAddress = runtime.identities.key.address
+      const providerId = runtime.storageProviderId
+      let dataObject
+      try {
+        debug('calling checkLiaisonForDataObject')
+        dataObject = await runtime.assets.checkLiaisonForDataObject(providerId, id)
+        debug('called checkLiaisonForDataObject')
+      } catch (err) {
+        errorHandler(res, err, 403)
+        return
+      }
+
+      // We'll open a write stream to the backend, but reserve the right to
+      // abort upload if the filters don't smell right.
+      let stream
+      try {
+        stream = await storage.open(id, 'w')
+
+        // We don't know whether the filtering occurs before or after the
+        // stream was finished, and can only commit if both passed.
+        let finished = false
+        let accepted = false
+        const possiblyCommit = () => {
+          if (finished && accepted) {
+            debug('Stream is finished and passed filters; committing.')
+            stream.commit()
+          }
+        }
+
+        stream.on('fileInfo', async info => {
+          try {
+            debug('Detected file info:', info)
+
+            // Filter
+            const filterResult = filter({}, req.headers, info.mimeType)
+            if (200 !== filterResult.code) {
+              debug('Rejecting content', filterResult.message)
+              stream.end()
+              res.status(filterResult.code).send({ message: filterResult.message })
+
+              // Reject the content
+              await runtime.assets.rejectContent(roleAddress, providerId, id)
+              return
+            }
+            debug('Content accepted.')
+            accepted = true
+
+            // We may have to commit the stream.
+            possiblyCommit()
+          } catch (err) {
+            errorHandler(res, err)
+          }
+        })
+
+        stream.on('finish', () => {
+          try {
+            finished = true
+            possiblyCommit()
+          } catch (err) {
+            errorHandler(res, err)
+          }
+        })
+
+        stream.on('committed', async hash => {
+          console.log('commited', dataObject)
+          try {
+            if (hash !== dataObject.ipfs_content_id.toString()) {
+              debug('Rejecting content. IPFS hash does not match value in objectId')
+              await runtime.assets.rejectContent(roleAddress, providerId, id)
+              res.status(400).send({ message: "Uploaded content doesn't match IPFS hash" })
+              return
+            }
+
+            debug('accepting Content')
+            await runtime.assets.acceptContent(roleAddress, providerId, id)
+
+            debug('creating storage relationship for newly uploaded content')
+            // Create storage relationship and flip it to ready.
+            const dosrId = await runtime.assets.createAndReturnStorageRelationship(roleAddress, providerId, id)
+
+            debug('toggling storage relationship for newly uploaded content')
+            await runtime.assets.toggleStorageRelationshipReady(roleAddress, providerId, dosrId, true)
+
+            debug('Sending OK response.')
+            res.status(200).send({ message: 'Asset uploaded.' })
+          } catch (err) {
+            debug(`${err.message}`)
+            errorHandler(res, err)
+          }
+        })
+
+        stream.on('error', err => errorHandler(res, err))
+        req.pipe(stream)
+      } catch (err) {
+        errorHandler(res, err)
+        return
+      }
+    },
+
+    // Get content
+    async get(req, res) {
+      const id = req.params.id
+      const download = req.query.download
+
+      // Parse range header
+      let ranges
+      if (!download) {
+        try {
+          const rangeHeader = req.headers.range
+          ranges = utilRanges.parse(rangeHeader)
+        } catch (err) {
+          // Do nothing; it's ok to ignore malformed ranges and respond with the
+          // full content according to https://www.rfc-editor.org/rfc/rfc7233.txt
+        }
+        if (ranges && ranges.unit !== 'bytes') {
+          // Ignore ranges that are not byte units.
+          ranges = undefined
+        }
+      }
+      debug('Requested range(s) is/are', ranges)
+
+      // Open file
+      try {
+        const size = await storage.size(id)
+        const stream = await storage.open(id, 'r')
+
+        // Add a file extension to download requests if necessary. If the file
+        // already contains an extension, don't add one.
+        let sendName = id
+        const type = stream.fileInfo.mimeType
+        if (download) {
+          let ext = path.extname(sendName)
+          if (!ext) {
+            ext = stream.fileInfo.ext
+            if (ext) {
+              sendName = `${sendName}.${ext}`
+            }
+          }
+        }
+
+        const opts = {
+          name: sendName,
+          type,
+          size,
+          ranges,
+          download,
+        }
+        utilRanges.send(res, stream, opts)
+      } catch (err) {
+        errorHandler(res, err, err.code)
+      }
+    },
+  }
+
+  // OpenAPI specs
+  doc.get.apiDoc = {
+    description: 'Download an asset.',
+    operationId: 'assetData',
+    tags: ['asset', 'data'],
+    parameters: [
+      {
+        name: 'download',
+        in: 'query',
+        description: 'Download instead of streaming inline.',
+        required: false,
+        allowEmptyValue: true,
+        schema: {
+          type: 'boolean',
+          default: false,
+        },
+      },
+    ],
+    responses: {
+      200: {
+        description: 'Asset download.',
+        content: {
+          default: {
+            schema: {
+              type: 'string',
+              format: 'binary',
+            },
+          },
+        },
+      },
+      default: {
+        description: 'Unexpected error',
+        content: {
+          'application/json': {
+            schema: {
+              $ref: '#/components/schemas/Error',
+            },
+          },
+        },
+      },
+    },
+  }
+
+  doc.put.apiDoc = {
+    description: 'Asset upload.',
+    operationId: 'assetUpload',
+    tags: ['asset', 'data'],
+    requestBody: {
+      content: {
+        '*/*': {
+          schema: {
+            type: 'string',
+            format: 'binary',
+          },
+        },
+      },
+    },
+    responses: {
+      200: {
+        description: 'Asset upload.',
+        content: {
+          'application/json': {
+            schema: {
+              type: 'object',
+              required: ['message'],
+              properties: {
+                message: {
+                  type: 'string',
+                },
+              },
+            },
+          },
+        },
+      },
+      default: {
+        description: 'Unexpected error',
+        content: {
+          'application/json': {
+            schema: {
+              $ref: '#/components/schemas/Error',
+            },
+          },
+        },
+      },
+    },
+  }
+
+  doc.head.apiDoc = {
+    description: 'Asset download information.',
+    operationId: 'assetInfo',
+    tags: ['asset', 'metadata'],
+    responses: {
+      200: {
+        description: 'Asset info.',
+      },
+      default: {
+        description: 'Unexpected error',
+        content: {
+          'application/json': {
+            schema: {
+              $ref: '#/components/schemas/Error',
+            },
+          },
+        },
+      },
+    },
+  }
+
+  return doc
 }

+ 75 - 75
storage-node/packages/colossus/paths/discover/v0/{id}.js

@@ -5,86 +5,86 @@ const MAX_CACHE_AGE = 30 * 60 * 1000
 const USE_CACHE = true
 
 module.exports = function (runtime) {
-	const doc = {
-		// parameters for all operations in this path
-		parameters: [
-			{
-				name: 'id',
-				in: 'path',
-				required: true,
-				description: 'Actor accouuntId',
-				schema: {
-					type: 'string', // integer ?
-				},
-			},
-		],
+  const doc = {
+    // parameters for all operations in this path
+    parameters: [
+      {
+        name: 'id',
+        in: 'path',
+        required: true,
+        description: 'Actor accouuntId',
+        schema: {
+          type: 'string', // integer ?
+        },
+      },
+    ],
 
-		// Resolve Service Information
-		async get(req, res) {
-			let parsedId
-			try {
-				parsedId = parseInt(req.params.id)
-			} catch (err) {
-				return res.status(400).end()
-			}
+    // Resolve Service Information
+    async get(req, res) {
+      let parsedId
+      try {
+        parsedId = parseInt(req.params.id)
+      } catch (err) {
+        return res.status(400).end()
+      }
 
-			const id = parsedId
-			let cacheMaxAge = req.query.max_age
+      const id = parsedId
+      let cacheMaxAge = req.query.max_age
 
-			if (cacheMaxAge) {
-				try {
-					cacheMaxAge = parseInt(cacheMaxAge)
-				} catch (err) {
-					cacheMaxAge = MAX_CACHE_AGE
-				}
-			} else {
-				cacheMaxAge = 0
-			}
+      if (cacheMaxAge) {
+        try {
+          cacheMaxAge = parseInt(cacheMaxAge)
+        } catch (err) {
+          cacheMaxAge = MAX_CACHE_AGE
+        }
+      } else {
+        cacheMaxAge = 0
+      }
 
-			// todo - validate id before querying
+      // todo - validate id before querying
 
-			try {
-				debug(`resolving ${id}`)
-				const info = await discover.discover(id, runtime, USE_CACHE, cacheMaxAge)
-				if (info === null) {
-					debug('info not found')
-					res.status(404).end()
-				} else {
-					res.status(200).send(info)
-				}
-			} catch (err) {
-				debug(`${err}`)
-				res.status(404).end()
-			}
-		},
-	}
+      try {
+        debug(`resolving ${id}`)
+        const info = await discover.discover(id, runtime, USE_CACHE, cacheMaxAge)
+        if (info === null) {
+          debug('info not found')
+          res.status(404).end()
+        } else {
+          res.status(200).send(info)
+        }
+      } catch (err) {
+        debug(`${err}`)
+        res.status(404).end()
+      }
+    },
+  }
 
-	// OpenAPI specs
-	doc.get.apiDoc = {
-		description: 'Resolve Service Information',
-		operationId: 'discover',
-		// tags: ['asset', 'data'],
-		responses: {
-			200: {
-				description: 'Wrapped JSON Service Information',
-				content: {
-					'application/json': {
-						schema: {
-							required: ['serialized'],
-							properties: {
-								serialized: {
-									type: 'string',
-								},
-								signature: {
-									type: 'string',
-								},
-							},
-						},
-					},
-				},
-			},
-		},
-	}
+  // OpenAPI specs
+  doc.get.apiDoc = {
+    description: 'Resolve Service Information',
+    operationId: 'discover',
+    // tags: ['asset', 'data'],
+    responses: {
+      200: {
+        description: 'Wrapped JSON Service Information',
+        content: {
+          'application/json': {
+            schema: {
+              required: ['serialized'],
+              properties: {
+                serialized: {
+                  type: 'string',
+                },
+                signature: {
+                  type: 'string',
+                },
+              },
+            },
+          },
+        },
+      },
+    },
+  }
 
-	return doc
+  return doc
 }

+ 139 - 139
storage-node/packages/discovery/discover.js

@@ -11,7 +11,7 @@ const { newExternallyControlledPromise } = require('@joystream/storage-utils/ext
  * @return {boolean} returns result check.
  */
 function inBrowser() {
-	return typeof window !== 'undefined'
+  return typeof window !== 'undefined'
 }
 
 /**
@@ -40,15 +40,15 @@ const CACHE_TTL = 60 * 60 * 1000
  * @returns { Promise<string | null> } - ipns multiformat address
  */
 async function getIpnsIdentity(storageProviderId, runtimeApi) {
-	storageProviderId = new BN(storageProviderId)
-	// lookup ipns identity from chain corresponding to storageProviderId
-	const info = await runtimeApi.discovery.getAccountInfo(storageProviderId)
-
-	if (info === null) {
-		// no identity found on chain for account
-		return null
-	}
-	return info.identity.toString()
+  storageProviderId = new BN(storageProviderId)
+  // lookup ipns identity from chain corresponding to storageProviderId
+  const info = await runtimeApi.discovery.getAccountInfo(storageProviderId)
+
+  if (info === null) {
+    // no identity found on chain for account
+    return null
+  }
+  return info.identity.toString()
 }
 
 /**
@@ -62,27 +62,27 @@ async function getIpnsIdentity(storageProviderId, runtimeApi) {
  * @returns { Promise<object> } - the published service information
  */
 async function discover_over_ipfs_http_gateway(storageProviderId, runtimeApi, gateway = 'http://localhost:8080') {
-	storageProviderId = new BN(storageProviderId)
-	const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
+  storageProviderId = new BN(storageProviderId)
+  const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
 
-	if (!isProvider) {
-		throw new Error('Cannot discover non storage providers')
-	}
+  if (!isProvider) {
+    throw new Error('Cannot discover non storage providers')
+  }
 
-	const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
+  const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
 
-	if (identity === null) {
-		// dont waste time trying to resolve if no identity was found
-		throw new Error('no identity to resolve')
-	}
+  if (identity === null) {
+    // dont waste time trying to resolve if no identity was found
+    throw new Error('no identity to resolve')
+  }
 
-	gateway = stripEndingSlash(gateway)
+  gateway = stripEndingSlash(gateway)
 
-	const url = `${gateway}/ipns/${identity}`
+  const url = `${gateway}/ipns/${identity}`
 
-	const response = await axios.get(url)
+  const response = await axios.get(url)
 
-	return response.data
+  return response.data
 }
 
 /**
@@ -96,37 +96,37 @@ async function discover_over_ipfs_http_gateway(storageProviderId, runtimeApi, ga
  * @returns { Promise<object> } - the published service information
  */
 async function discover_over_joystream_discovery_service(storageProviderId, runtimeApi, discoverApiEndpoint) {
-	storageProviderId = new BN(storageProviderId)
-	const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
+  storageProviderId = new BN(storageProviderId)
+  const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
 
-	if (!isProvider) {
-		throw new Error('Cannot discover non storage providers')
-	}
+  if (!isProvider) {
+    throw new Error('Cannot discover non storage providers')
+  }
 
-	const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
+  const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
 
-	// dont waste time trying to resolve if no identity was found
-	if (identity === null) {
-		throw new Error('no identity to resolve')
-	}
+  // dont waste time trying to resolve if no identity was found
+  if (identity === null) {
+    throw new Error('no identity to resolve')
+  }
 
-	if (!discoverApiEndpoint) {
-		// Use bootstrap nodes
-		const discoveryBootstrapNodes = await runtimeApi.discovery.getBootstrapEndpoints()
+  if (!discoverApiEndpoint) {
+    // Use bootstrap nodes
+    const discoveryBootstrapNodes = await runtimeApi.discovery.getBootstrapEndpoints()
 
-		if (discoveryBootstrapNodes.length) {
-			discoverApiEndpoint = stripEndingSlash(discoveryBootstrapNodes[0].toString())
-		} else {
-			throw new Error('No known discovery bootstrap nodes found on network')
-		}
-	}
+    if (discoveryBootstrapNodes.length) {
+      discoverApiEndpoint = stripEndingSlash(discoveryBootstrapNodes[0].toString())
+    } else {
+      throw new Error('No known discovery bootstrap nodes found on network')
+    }
+  }
 
-	const url = `${discoverApiEndpoint}/discover/v0/${storageProviderId.toNumber()}`
+  const url = `${discoverApiEndpoint}/discover/v0/${storageProviderId.toNumber()}`
 
-	// should have parsed if data was json?
-	const response = await axios.get(url)
+  // should have parsed if data was json?
+  const response = await axios.get(url)
 
-	return response.data
+  return response.data
 }
 
 /**
@@ -138,37 +138,37 @@ async function discover_over_joystream_discovery_service(storageProviderId, runt
  * @returns { Promise<object> } - the published service information
  */
 async function discover_over_local_ipfs_node(storageProviderId, runtimeApi) {
-	storageProviderId = new BN(storageProviderId)
-	const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
+  storageProviderId = new BN(storageProviderId)
+  const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
 
-	if (!isProvider) {
-		throw new Error('Cannot discover non storage providers')
-	}
+  if (!isProvider) {
+    throw new Error('Cannot discover non storage providers')
+  }
 
-	const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
+  const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
 
-	if (identity === null) {
-		// dont waste time trying to resolve if no identity was found
-		throw new Error('no identity to resolve')
-	}
+  if (identity === null) {
+    // dont waste time trying to resolve if no identity was found
+    throw new Error('no identity to resolve')
+  }
 
-	const ipns_address = `/ipns/${identity}/`
+  const ipns_address = `/ipns/${identity}/`
 
-	debug('resolved ipns to ipfs object')
-	// Can this call hang forever!? can/should we set a timeout?
-	const ipfs_name = await ipfs.name.resolve(ipns_address, {
-		// don't recurse, there should only be one indirection to the service info file
-		recursive: false,
-		nocache: false,
-	})
+  debug('resolved ipns to ipfs object')
+  // Can this call hang forever!? can/should we set a timeout?
+  const ipfs_name = await ipfs.name.resolve(ipns_address, {
+    // don't recurse, there should only be one indirection to the service info file
+    recursive: false,
+    nocache: false,
+  })
 
-	debug('getting ipfs object', ipfs_name)
-	const data = await ipfs.get(ipfs_name) // this can sometimes hang forever!?! can we set a timeout?
+  debug('getting ipfs object', ipfs_name)
+  const data = await ipfs.get(ipfs_name) // this can sometimes hang forever!?! can we set a timeout?
 
-	// there should only be one file published under the resolved path
-	const content = data[0].content
+  // there should only be one file published under the resolved path
+  const content = data[0].content
 
-	return JSON.parse(content)
+  return JSON.parse(content)
 }
 
 /**
@@ -186,25 +186,25 @@ async function discover_over_local_ipfs_node(storageProviderId, runtimeApi) {
  * @returns { Promise<object | null> } - the published service information
  */
 async function discover(storageProviderId, runtimeApi, useCachedValue = false, maxCacheAge = 0) {
-	storageProviderId = new BN(storageProviderId)
-	const id = storageProviderId.toNumber()
-	const cached = accountInfoCache[id]
-
-	if (cached && useCachedValue) {
-		if (maxCacheAge > 0) {
-			// get latest value
-			if (Date.now() > cached.updated + maxCacheAge) {
-				return _discover(storageProviderId, runtimeApi)
-			}
-		}
-		// refresh if cache if stale, new value returned on next cached query
-		if (Date.now() > cached.updated + CACHE_TTL) {
-			_discover(storageProviderId, runtimeApi)
-		}
-		// return best known value
-		return cached.value
-	}
-	return _discover(storageProviderId, runtimeApi)
+  storageProviderId = new BN(storageProviderId)
+  const id = storageProviderId.toNumber()
+  const cached = accountInfoCache[id]
+
+  if (cached && useCachedValue) {
+    if (maxCacheAge > 0) {
+      // get latest value
+      if (Date.now() > cached.updated + maxCacheAge) {
+        return _discover(storageProviderId, runtimeApi)
+      }
+    }
+    // refresh if cache if stale, new value returned on next cached query
+    if (Date.now() > cached.updated + CACHE_TTL) {
+      _discover(storageProviderId, runtimeApi)
+    }
+    // return best known value
+    return cached.value
+  }
+  return _discover(storageProviderId, runtimeApi)
 }
 
 /**
@@ -216,57 +216,57 @@ async function discover(storageProviderId, runtimeApi, useCachedValue = false, m
  * @returns { Promise<object | null> } - the published service information
  */
 async function _discover(storageProviderId, runtimeApi) {
-	storageProviderId = new BN(storageProviderId)
-	const id = storageProviderId.toNumber()
-
-	const discoveryResult = activeDiscoveries[id]
-	if (discoveryResult) {
-		debug('discovery in progress waiting for result for', id)
-		return discoveryResult
-	}
-
-	debug('starting new discovery for', id)
-	const deferredDiscovery = newExternallyControlledPromise()
-	activeDiscoveries[id] = deferredDiscovery.promise
-
-	let result
-	try {
-		if (inBrowser()) {
-			result = await discover_over_joystream_discovery_service(storageProviderId, runtimeApi)
-		} else {
-			result = await discover_over_local_ipfs_node(storageProviderId, runtimeApi)
-		}
-
-		debug(result)
-		result = JSON.stringify(result)
-		accountInfoCache[id] = {
-			value: result,
-			updated: Date.now(),
-		}
-
-		deferredDiscovery.resolve(result)
-		delete activeDiscoveries[id]
-		return result
-	} catch (err) {
-		// we catch the error so we can update all callers
-		// and throw again to inform the first caller.
-		debug(err.message)
-		delete activeDiscoveries[id]
-		// deferredDiscovery.reject(err)
-		deferredDiscovery.resolve(null) // resolve to null until we figure out the issue below
-		// throw err // <-- throwing but this isn't being
-		// caught correctly in express server! Is it because there is an uncaught promise somewhere
-		// in the prior .reject() call ?
-		// I've only seen this behaviour when error is from ipfs-client
-		// ... is this unique to errors thrown from ipfs-client?
-		// Problem is its crashing the node so just return null for now
-		return null
-	}
+  storageProviderId = new BN(storageProviderId)
+  const id = storageProviderId.toNumber()
+
+  const discoveryResult = activeDiscoveries[id]
+  if (discoveryResult) {
+    debug('discovery in progress waiting for result for', id)
+    return discoveryResult
+  }
+
+  debug('starting new discovery for', id)
+  const deferredDiscovery = newExternallyControlledPromise()
+  activeDiscoveries[id] = deferredDiscovery.promise
+
+  let result
+  try {
+    if (inBrowser()) {
+      result = await discover_over_joystream_discovery_service(storageProviderId, runtimeApi)
+    } else {
+      result = await discover_over_local_ipfs_node(storageProviderId, runtimeApi)
+    }
+
+    debug(result)
+    result = JSON.stringify(result)
+    accountInfoCache[id] = {
+      value: result,
+      updated: Date.now(),
+    }
+
+    deferredDiscovery.resolve(result)
+    delete activeDiscoveries[id]
+    return result
+  } catch (err) {
+    // we catch the error so we can update all callers
+    // and throw again to inform the first caller.
+    debug(err.message)
+    delete activeDiscoveries[id]
+    // deferredDiscovery.reject(err)
+    deferredDiscovery.resolve(null) // resolve to null until we figure out the issue below
+    // throw err // <-- throwing but this isn't being
+    // caught correctly in express server! Is it because there is an uncaught promise somewhere
+    // in the prior .reject() call ?
+    // I've only seen this behaviour when error is from ipfs-client
+    // ... is this unique to errors thrown from ipfs-client?
+    // Problem is its crashing the node so just return null for now
+    return null
+  }
 }
 
 module.exports = {
-	discover,
-	discover_over_joystream_discovery_service,
-	discover_over_ipfs_http_gateway,
-	discover_over_local_ipfs_node,
+  discover,
+  discover_over_joystream_discovery_service,
+  discover_over_ipfs_http_gateway,
+  discover_over_local_ipfs_node,
 }

+ 22 - 22
storage-node/packages/discovery/example.js

@@ -3,35 +3,35 @@ const { RuntimeApi } = require('@joystream/storage-runtime-api')
 const { discover, publish } = require('./')
 
 async function main() {
-	// The assigned storage-provider id
-	const providerId = 0
+  // The assigned storage-provider id
+  const providerId = 0
 
-	const runtimeApi = await RuntimeApi.create({
-		// Path to the role account key file of the provider
-		account_file: '/path/to/role_account_key_file.json',
-		storageProviderId: providerId,
-	})
+  const runtimeApi = await RuntimeApi.create({
+    // Path to the role account key file of the provider
+    account_file: '/path/to/role_account_key_file.json',
+    storageProviderId: providerId,
+  })
 
-	const ipnsId = await publish.publish(
-		{
-			asset: {
-				version: 1,
-				endpoint: 'http://endpoint.com',
-			},
-		},
-		runtimeApi
-	)
+  const ipnsId = await publish.publish(
+    {
+      asset: {
+        version: 1,
+        endpoint: 'http://endpoint.com',
+      },
+    },
+    runtimeApi
+  )
 
-	console.log(ipnsId)
+  console.log(ipnsId)
 
-	// register ipnsId on chain
-	await runtimeApi.setAccountInfo(ipnsId)
+  // register ipnsId on chain
+  await runtimeApi.setAccountInfo(ipnsId)
 
-	const serviceInfo = await discover.discover(providerId, runtimeApi)
+  const serviceInfo = await discover.discover(providerId, runtimeApi)
 
-	console.log(serviceInfo)
+  console.log(serviceInfo)
 
-	runtimeApi.api.disconnect()
+  runtimeApi.api.disconnect()
 }
 
 main()

+ 2 - 2
storage-node/packages/discovery/index.js

@@ -1,4 +1,4 @@
 module.exports = {
-	discover: require('./discover'),
-	publish: require('./publish'),
+  discover: require('./discover'),
+  publish: require('./publish'),
 }

+ 39 - 39
storage-node/packages/discovery/publish.js

@@ -18,7 +18,7 @@ const PUBLISH_KEY = 'self'
  * @returns {Buffer} returns buffer from UTF-8 json
  */
 function bufferFrom(data) {
-	return Buffer.from(JSON.stringify(data), 'utf-8')
+  return Buffer.from(JSON.stringify(data), 'utf-8')
 }
 
 /**
@@ -28,9 +28,9 @@ function bufferFrom(data) {
  * @returns {Buffer} return buffer.
  */
 function encodeServiceInfo(info) {
-	return bufferFrom({
-		serialized: JSON.stringify(info),
-	})
+  return bufferFrom({
+    serialized: JSON.stringify(info),
+  })
 }
 
 /**
@@ -41,48 +41,48 @@ function encodeServiceInfo(info) {
  * @returns {string} - the ipns id
  */
 async function publish(serviceInfo) {
-	const keys = await ipfs.key.list()
-	let servicesKey = keys.find((key) => key.name === PUBLISH_KEY)
+  const keys = await ipfs.key.list()
+  let servicesKey = keys.find(key => key.name === PUBLISH_KEY)
 
-	// An ipfs node will always have the self key.
-	// If the publish key is specified as anything else and it doesn't exist
-	// we create it.
-	if (PUBLISH_KEY !== 'self' && !servicesKey) {
-		debug('generating ipns services key')
-		servicesKey = await ipfs.key.gen(PUBLISH_KEY, {
-			type: 'rsa',
-			size: 2048,
-		})
-	}
+  // An ipfs node will always have the self key.
+  // If the publish key is specified as anything else and it doesn't exist
+  // we create it.
+  if (PUBLISH_KEY !== 'self' && !servicesKey) {
+    debug('generating ipns services key')
+    servicesKey = await ipfs.key.gen(PUBLISH_KEY, {
+      type: 'rsa',
+      size: 2048,
+    })
+  }
 
-	if (!servicesKey) {
-		throw new Error('No IPFS publishing key available!')
-	}
+  if (!servicesKey) {
+    throw new Error('No IPFS publishing key available!')
+  }
 
-	debug('adding service info file to node')
-	const files = await ipfs.add(encodeServiceInfo(serviceInfo))
+  debug('adding service info file to node')
+  const files = await ipfs.add(encodeServiceInfo(serviceInfo))
 
-	debug('publishing...')
-	const published = await ipfs.name.publish(files[0].hash, {
-		key: PUBLISH_KEY,
-		resolve: false,
-		// lifetime: // string - Time duration of the record. Default: 24h
-		// ttl:      // string - Time duration this record should be cached
-	})
+  debug('publishing...')
+  const published = await ipfs.name.publish(files[0].hash, {
+    key: PUBLISH_KEY,
+    resolve: false,
+    // lifetime: // string - Time duration of the record. Default: 24h
+    // ttl:      // string - Time duration this record should be cached
+  })
 
-	// The name and ipfs hash of the published service information file, eg.
-	// {
-	//   name: 'QmUNQCkaU1TRnc1WGixqEP3Q3fazM8guSdFRsdnSJTN36A',
-	//   value: '/ipfs/QmcSjtVMfDSSNYCxNAb9PxNpEigCw7h1UZ77gip3ghfbnA'
-	// }
-	// .. The name is equivalent to the key id that was used.
-	debug(published)
+  // The name and ipfs hash of the published service information file, eg.
+  // {
+  //   name: 'QmUNQCkaU1TRnc1WGixqEP3Q3fazM8guSdFRsdnSJTN36A',
+  //   value: '/ipfs/QmcSjtVMfDSSNYCxNAb9PxNpEigCw7h1UZ77gip3ghfbnA'
+  // }
+  // .. The name is equivalent to the key id that was used.
+  debug(published)
 
-	// Return the key id under which the content was published. Which is used
-	// to lookup the actual ipfs content id of the published service information
-	return servicesKey.id
+  // Return the key id under which the content was published. Which is used
+  // to lookup the actual ipfs content id of the published service information
+  return servicesKey.id
 }
 
 module.exports = {
-	publish,
+  publish,
 }

+ 173 - 175
storage-node/packages/helios/bin/cli.js

@@ -7,194 +7,192 @@ const axios = require('axios')
 const stripEndingSlash = require('@joystream/storage-utils/stripEndingSlash')
 
 async function main() {
-	const runtime = await RuntimeApi.create()
-	const { api } = runtime
-
-	// get current blockheight
-	const currentHeader = await api.rpc.chain.getHeader()
-	const currentHeight = currentHeader.number.toBn()
-
-	// get all providers
-	const { ids: storageProviders } = await runtime.workers.getAllProviders()
-	console.log(`Found ${storageProviders.length} staked providers`)
-
-	const storageProviderAccountInfos = await Promise.all(
-		storageProviders.map(async (providerId) => {
-			return {
-				providerId,
-				info: await runtime.discovery.getAccountInfo(providerId),
-			}
-		})
-	)
-
-	// providers that have updated their account info and published ipfs id
-	// considered live if the record hasn't expired yet
-	const liveProviders = storageProviderAccountInfos.filter(({ info }) => {
-		return info && info.expires_at.gte(currentHeight)
-	})
-
-	const downProviders = storageProviderAccountInfos.filter(({ info }) => {
-		return info === null
-	})
-
-	const expiredTtlProviders = storageProviderAccountInfos.filter(({ info }) => {
-		return info && currentHeight.gte(info.expires_at)
-	})
-
-	const providersStatuses = mapInfoToStatus(liveProviders, currentHeight)
-	console.log('\n== Live Providers\n', providersStatuses)
-
-	const expiredProviderStatuses = mapInfoToStatus(expiredTtlProviders, currentHeight)
-	console.log('\n== Expired Providers\n', expiredProviderStatuses)
-
-	console.log(
-		'\n== Down Providers!\n',
-		downProviders.map((provider) => {
-			return {
-				providerId: provider.providerId,
-			}
-		})
-	)
-
-	// Resolve IPNS identities of providers
-	console.log('\nResolving live provider API Endpoints...')
-	const endpoints = await Promise.all(
-		providersStatuses.map(async ({ providerId }) => {
-			try {
-				const serviceInfo = await discover.discover_over_joystream_discovery_service(providerId, runtime)
-
-				if (serviceInfo === null) {
-					console.log(`provider ${providerId} has not published service information`)
-					return { providerId, endpoint: null }
-				}
-
-				const info = JSON.parse(serviceInfo.serialized)
-				console.log(`${providerId} -> ${info.asset.endpoint}`)
-				return { providerId, endpoint: info.asset.endpoint }
-			} catch (err) {
-				console.log('resolve failed for id', providerId, err.message)
-				return { providerId, endpoint: null }
-			}
-		})
-	)
-
-	console.log('\nChecking API Endpoints are online')
-	await Promise.all(
-		endpoints.map(async (provider) => {
-			if (!provider.endpoint) {
-				console.log('skipping', provider.address)
-				return
-			}
-			const swaggerUrl = `${stripEndingSlash(provider.endpoint)}/swagger.json`
-			let error
-			try {
-				await axios.get(swaggerUrl)
-				// maybe print out api version information to detect which version of colossus is running?
-				// or add anothe api endpoint for diagnostics information
-			} catch (err) {
-				error = err
-			}
-			console.log(`${provider.endpoint} - ${error ? error.message : 'OK'}`)
-		})
-	)
-
-	const knownContentIds = await runtime.assets.getKnownContentIds()
-	console.log(`\nData Directory has ${knownContentIds.length} assets`)
-
-	// Check which providers are reporting a ready relationship for each asset
-	await Promise.all(
-		knownContentIds.map(async (contentId) => {
-			const [relationshipsCount, judgement] = await assetRelationshipState(api, contentId, storageProviders)
-			console.log(
-				`${encodeAddress(contentId)} replication ${relationshipsCount}/${
-					storageProviders.length
-				} - ${judgement}`
-			)
-		})
-	)
-
-	// interesting disconnect doesn't work unless an explicit provider was created
-	// for underlying api instance
-	// We no longer need a connection to the chain
-	api.disconnect()
-
-	console.log(`\nChecking available assets on providers (this can take some time)...`)
-	endpoints.forEach(async ({ providerId, endpoint }) => {
-		if (!endpoint) {
-			return
-		}
-		const total = knownContentIds.length
-		const { found } = await countContentAvailability(knownContentIds, endpoint)
-		console.log(`provider ${providerId}: has ${found} out of ${total}`)
-	})
+  const runtime = await RuntimeApi.create()
+  const { api } = runtime
+
+  // get current blockheight
+  const currentHeader = await api.rpc.chain.getHeader()
+  const currentHeight = currentHeader.number.toBn()
+
+  // get all providers
+  const { ids: storageProviders } = await runtime.workers.getAllProviders()
+  console.log(`Found ${storageProviders.length} staked providers`)
+
+  const storageProviderAccountInfos = await Promise.all(
+    storageProviders.map(async providerId => {
+      return {
+        providerId,
+        info: await runtime.discovery.getAccountInfo(providerId),
+      }
+    })
+  )
+
+  // providers that have updated their account info and published ipfs id
+  // considered live if the record hasn't expired yet
+  const liveProviders = storageProviderAccountInfos.filter(({ info }) => {
+    return info && info.expires_at.gte(currentHeight)
+  })
+
+  const downProviders = storageProviderAccountInfos.filter(({ info }) => {
+    return info === null
+  })
+
+  const expiredTtlProviders = storageProviderAccountInfos.filter(({ info }) => {
+    return info && currentHeight.gte(info.expires_at)
+  })
+
+  const providersStatuses = mapInfoToStatus(liveProviders, currentHeight)
+  console.log('\n== Live Providers\n', providersStatuses)
+
+  const expiredProviderStatuses = mapInfoToStatus(expiredTtlProviders, currentHeight)
+  console.log('\n== Expired Providers\n', expiredProviderStatuses)
+
+  console.log(
+    '\n== Down Providers!\n',
+    downProviders.map(provider => {
+      return {
+        providerId: provider.providerId,
+      }
+    })
+  )
+
+  // Resolve IPNS identities of providers
+  console.log('\nResolving live provider API Endpoints...')
+  const endpoints = await Promise.all(
+    providersStatuses.map(async ({ providerId }) => {
+      try {
+        const serviceInfo = await discover.discover_over_joystream_discovery_service(providerId, runtime)
+
+        if (serviceInfo === null) {
+          console.log(`provider ${providerId} has not published service information`)
+          return { providerId, endpoint: null }
+        }
+
+        const info = JSON.parse(serviceInfo.serialized)
+        console.log(`${providerId} -> ${info.asset.endpoint}`)
+        return { providerId, endpoint: info.asset.endpoint }
+      } catch (err) {
+        console.log('resolve failed for id', providerId, err.message)
+        return { providerId, endpoint: null }
+      }
+    })
+  )
+
+  console.log('\nChecking API Endpoints are online')
+  await Promise.all(
+    endpoints.map(async provider => {
+      if (!provider.endpoint) {
+        console.log('skipping', provider.address)
+        return
+      }
+      const swaggerUrl = `${stripEndingSlash(provider.endpoint)}/swagger.json`
+      let error
+      try {
+        await axios.get(swaggerUrl)
+        // maybe print out api version information to detect which version of colossus is running?
+        // or add anothe api endpoint for diagnostics information
+      } catch (err) {
+        error = err
+      }
+      console.log(`${provider.endpoint} - ${error ? error.message : 'OK'}`)
+    })
+  )
+
+  const knownContentIds = await runtime.assets.getKnownContentIds()
+  console.log(`\nData Directory has ${knownContentIds.length} assets`)
+
+  // Check which providers are reporting a ready relationship for each asset
+  await Promise.all(
+    knownContentIds.map(async contentId => {
+      const [relationshipsCount, judgement] = await assetRelationshipState(api, contentId, storageProviders)
+      console.log(
+        `${encodeAddress(contentId)} replication ${relationshipsCount}/${storageProviders.length} - ${judgement}`
+      )
+    })
+  )
+
+  // interesting disconnect doesn't work unless an explicit provider was created
+  // for underlying api instance
+  // We no longer need a connection to the chain
+  api.disconnect()
+
+  console.log(`\nChecking available assets on providers (this can take some time)...`)
+  endpoints.forEach(async ({ providerId, endpoint }) => {
+    if (!endpoint) {
+      return
+    }
+    const total = knownContentIds.length
+    const { found } = await countContentAvailability(knownContentIds, endpoint)
+    console.log(`provider ${providerId}: has ${found} out of ${total}`)
+  })
 }
 
 function mapInfoToStatus(providers, currentHeight) {
-	return providers.map(({ providerId, info }) => {
-		if (info) {
-			return {
-				providerId,
-				identity: info.identity.toString(),
-				expiresIn: info.expires_at.sub(currentHeight).toNumber(),
-				expired: currentHeight.gte(info.expires_at),
-			}
-		}
-		return {
-			providerId,
-			identity: null,
-			status: 'down',
-		}
-	})
+  return providers.map(({ providerId, info }) => {
+    if (info) {
+      return {
+        providerId,
+        identity: info.identity.toString(),
+        expiresIn: info.expires_at.sub(currentHeight).toNumber(),
+        expired: currentHeight.gte(info.expires_at),
+      }
+    }
+    return {
+      providerId,
+      identity: null,
+      status: 'down',
+    }
+  })
 }
 
 // HTTP HEAD with axios all known content ids on each provider
 async function countContentAvailability(contentIds, source) {
-	const content = {}
-	let found = 0
-	let missing = 0
-	for (let i = 0; i < contentIds.length; i++) {
-		const assetUrl = makeAssetUrl(contentIds[i], source)
-		try {
-			const info = await axios.head(assetUrl)
-			content[encodeAddress(contentIds[i])] = {
-				type: info.headers['content-type'],
-				bytes: info.headers['content-length'],
-			}
-			// TODO: cross check against dataobject size
-			found++
-		} catch (err) {
-			missing++
-		}
-	}
-
-	return { found, missing, content }
+  const content = {}
+  let found = 0
+  let missing = 0
+  for (let i = 0; i < contentIds.length; i++) {
+    const assetUrl = makeAssetUrl(contentIds[i], source)
+    try {
+      const info = await axios.head(assetUrl)
+      content[encodeAddress(contentIds[i])] = {
+        type: info.headers['content-type'],
+        bytes: info.headers['content-length'],
+      }
+      // TODO: cross check against dataobject size
+      found++
+    } catch (err) {
+      missing++
+    }
+  }
+
+  return { found, missing, content }
 }
 
 function makeAssetUrl(contentId, source) {
-	source = stripEndingSlash(source)
-	return `${source}/asset/v0/${encodeAddress(contentId)}`
+  source = stripEndingSlash(source)
+  return `${source}/asset/v0/${encodeAddress(contentId)}`
 }
 
 async function assetRelationshipState(api, contentId, providers) {
-	const dataObject = await api.query.dataDirectory.dataObjectByContentId(contentId)
-
-	const relationshipIds = await api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
-
-	// how many relationships associated with active providers and in ready state
-	const activeRelationships = await Promise.all(
-		relationshipIds.map(async (id) => {
-			let relationship = await api.query.dataObjectStorageRegistry.relationships(id)
-			relationship = relationship.unwrap()
-			// only interested in ready relationships
-			if (!relationship.ready) {
-				return undefined
-			}
-			// Does the relationship belong to an active provider ?
-			return providers.find((provider) => relationship.storage_provider.eq(provider))
-		})
-	)
-
-	return [activeRelationships.filter((active) => active).length, dataObject.unwrap().liaison_judgement]
+  const dataObject = await api.query.dataDirectory.dataObjectByContentId(contentId)
+
+  const relationshipIds = await api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
+
+  // how many relationships associated with active providers and in ready state
+  const activeRelationships = await Promise.all(
+    relationshipIds.map(async id => {
+      let relationship = await api.query.dataObjectStorageRegistry.relationships(id)
+      relationship = relationship.unwrap()
+      // only interested in ready relationships
+      if (!relationship.ready) {
+        return undefined
+      }
+      // Does the relationship belong to an active provider ?
+      return providers.find(provider => relationship.storage_provider.eq(provider))
+    })
+  )
+
+  return [activeRelationships.filter(active => active).length, dataObject.unwrap().liaison_judgement]
 }
 
 main()

+ 150 - 150
storage-node/packages/runtime-api/assets.js

@@ -4,163 +4,163 @@ const debug = require('debug')('joystream:runtime:assets')
 const { decodeAddress } = require('@polkadot/keyring')
 
 function parseContentId(contentId) {
-	try {
-		return decodeAddress(contentId)
-	} catch (err) {
-		return contentId
-	}
+  try {
+    return decodeAddress(contentId)
+  } catch (err) {
+    return contentId
+  }
 }
 
 /*
  * Add asset related functionality to the substrate API.
  */
 class AssetsApi {
-	static async create(base) {
-		const ret = new AssetsApi()
-		ret.base = base
-		await AssetsApi.init()
-		return ret
-	}
-
-	static async init() {
-		debug('Init')
-	}
-
-	/*
-	 * Create and return a data object.
-	 */
-	async createDataObject(accountId, memberId, contentId, doTypeId, size, ipfsCid) {
-		contentId = parseContentId(contentId)
-		const tx = this.base.api.tx.dataDirectory.addContent(memberId, contentId, doTypeId, size, ipfsCid)
-		await this.base.signAndSend(accountId, tx)
-
-		// If the data object constructed properly, we should now be able to return
-		// the data object from the state.
-		return this.getDataObject(contentId)
-	}
-
-	/*
-	 * Return the Data Object for a contendId
-	 */
-	async getDataObject(contentId) {
-		contentId = parseContentId(contentId)
-		return this.base.api.query.dataDirectory.dataObjectByContentId(contentId)
-	}
-
-	/*
-	 * Verify the liaison state for a DataObject:
-	 * - Check the content ID has a DataObject
-	 * - Check the storageProviderId is the liaison
-	 * - Check the liaison state is Pending
-	 *
-	 * Each failure errors out, success returns the data object.
-	 */
-	async checkLiaisonForDataObject(storageProviderId, contentId) {
-		contentId = parseContentId(contentId)
-
-		let obj = await this.getDataObject(contentId)
-
-		if (obj.isNone) {
-			throw new Error(`No DataObject created for content ID: ${contentId}`)
-		}
-
-		obj = obj.unwrap()
-
-		if (!obj.liaison.eq(storageProviderId)) {
-			throw new Error(`This storage node is not liaison for the content ID: ${contentId}`)
-		}
-
-		if (obj.liaison_judgement.type !== 'Pending') {
-			throw new Error(`Expected Pending judgement, but found: ${obj.liaison_judgement.type}`)
-		}
-
-		return obj
-	}
-
-	/*
-	 * Sets the data object liaison judgement to Accepted
-	 */
-	async acceptContent(providerAccoundId, storageProviderId, contentId) {
-		contentId = parseContentId(contentId)
-		const tx = this.base.api.tx.dataDirectory.acceptContent(storageProviderId, contentId)
-		return this.base.signAndSend(providerAccoundId, tx)
-	}
-
-	/*
-	 * Sets the data object liaison judgement to Rejected
-	 */
-	async rejectContent(providerAccountId, storageProviderId, contentId) {
-		contentId = parseContentId(contentId)
-		const tx = this.base.api.tx.dataDirectory.rejectContent(storageProviderId, contentId)
-		return this.base.signAndSend(providerAccountId, tx)
-	}
-
-	/*
-	 * Creates storage relationship for a data object and provider
-	 */
-	async createStorageRelationship(providerAccountId, storageProviderId, contentId, callback) {
-		contentId = parseContentId(contentId)
-		const tx = this.base.api.tx.dataObjectStorageRegistry.addRelationship(storageProviderId, contentId)
-
-		const subscribed = [['dataObjectStorageRegistry', 'DataObjectStorageRelationshipAdded']]
-		return this.base.signAndSend(providerAccountId, tx, 3, subscribed, callback)
-	}
-
-	/*
-	 * Gets storage relationship for contentId for the given provider
-	 */
-	async getStorageRelationshipAndId(storageProviderId, contentId) {
-		contentId = parseContentId(contentId)
-		const rids = await this.base.api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
-
-		while (rids.length) {
-			const relationshipId = rids.shift()
-			let relationship = await this.base.api.query.dataObjectStorageRegistry.relationships(relationshipId)
-			relationship = relationship.unwrap()
-			if (relationship.storage_provider.eq(storageProviderId)) {
-				return { relationship, relationshipId }
-			}
-		}
-
-		return {}
-	}
-
-	/*
-	 * Creates storage relationship for a data object and provider and returns the relationship id
-	 */
-	async createAndReturnStorageRelationship(providerAccountId, storageProviderId, contentId) {
-		contentId = parseContentId(contentId)
-		return new Promise(async (resolve, reject) => {
-			try {
-				await this.createStorageRelationship(providerAccountId, storageProviderId, contentId, (events) => {
-					events.forEach((event) => {
-						resolve(event[1].DataObjectStorageRelationshipId)
-					})
-				})
-			} catch (err) {
-				reject(err)
-			}
-		})
-	}
-
-	/*
-	 * Set the ready state for a data object storage relationship to the new value
-	 */
-	async toggleStorageRelationshipReady(providerAccountId, storageProviderId, dosrId, ready) {
-		const tx = ready
-			? this.base.api.tx.dataObjectStorageRegistry.setRelationshipReady(storageProviderId, dosrId)
-			: this.base.api.tx.dataObjectStorageRegistry.unsetRelationshipReady(storageProviderId, dosrId)
-		return this.base.signAndSend(providerAccountId, tx)
-	}
-
-	/*
-	 * Returns array of know content ids
-	 */
-	async getKnownContentIds() {
-		return this.base.api.query.dataDirectory.knownContentIds()
-	}
+  static async create(base) {
+    const ret = new AssetsApi()
+    ret.base = base
+    await AssetsApi.init()
+    return ret
+  }
+
+  static async init() {
+    debug('Init')
+  }
+
+  /*
+   * Create and return a data object.
+   */
+  async createDataObject(accountId, memberId, contentId, doTypeId, size, ipfsCid) {
+    contentId = parseContentId(contentId)
+    const tx = this.base.api.tx.dataDirectory.addContent(memberId, contentId, doTypeId, size, ipfsCid)
+    await this.base.signAndSend(accountId, tx)
+
+    // If the data object constructed properly, we should now be able to return
+    // the data object from the state.
+    return this.getDataObject(contentId)
+  }
+
+  /*
+   * Return the Data Object for a contendId
+   */
+  async getDataObject(contentId) {
+    contentId = parseContentId(contentId)
+    return this.base.api.query.dataDirectory.dataObjectByContentId(contentId)
+  }
+
+  /*
+   * Verify the liaison state for a DataObject:
+   * - Check the content ID has a DataObject
+   * - Check the storageProviderId is the liaison
+   * - Check the liaison state is Pending
+   *
+   * Each failure errors out, success returns the data object.
+   */
+  async checkLiaisonForDataObject(storageProviderId, contentId) {
+    contentId = parseContentId(contentId)
+
+    let obj = await this.getDataObject(contentId)
+
+    if (obj.isNone) {
+      throw new Error(`No DataObject created for content ID: ${contentId}`)
+    }
+
+    obj = obj.unwrap()
+
+    if (!obj.liaison.eq(storageProviderId)) {
+      throw new Error(`This storage node is not liaison for the content ID: ${contentId}`)
+    }
+
+    if (obj.liaison_judgement.type !== 'Pending') {
+      throw new Error(`Expected Pending judgement, but found: ${obj.liaison_judgement.type}`)
+    }
+
+    return obj
+  }
+
+  /*
+   * Sets the data object liaison judgement to Accepted
+   */
+  async acceptContent(providerAccoundId, storageProviderId, contentId) {
+    contentId = parseContentId(contentId)
+    const tx = this.base.api.tx.dataDirectory.acceptContent(storageProviderId, contentId)
+    return this.base.signAndSend(providerAccoundId, tx)
+  }
+
+  /*
+   * Sets the data object liaison judgement to Rejected
+   */
+  async rejectContent(providerAccountId, storageProviderId, contentId) {
+    contentId = parseContentId(contentId)
+    const tx = this.base.api.tx.dataDirectory.rejectContent(storageProviderId, contentId)
+    return this.base.signAndSend(providerAccountId, tx)
+  }
+
+  /*
+   * Creates storage relationship for a data object and provider
+   */
+  async createStorageRelationship(providerAccountId, storageProviderId, contentId, callback) {
+    contentId = parseContentId(contentId)
+    const tx = this.base.api.tx.dataObjectStorageRegistry.addRelationship(storageProviderId, contentId)
+
+    const subscribed = [['dataObjectStorageRegistry', 'DataObjectStorageRelationshipAdded']]
+    return this.base.signAndSend(providerAccountId, tx, 3, subscribed, callback)
+  }
+
+  /*
+   * Gets storage relationship for contentId for the given provider
+   */
+  async getStorageRelationshipAndId(storageProviderId, contentId) {
+    contentId = parseContentId(contentId)
+    const rids = await this.base.api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
+
+    while (rids.length) {
+      const relationshipId = rids.shift()
+      let relationship = await this.base.api.query.dataObjectStorageRegistry.relationships(relationshipId)
+      relationship = relationship.unwrap()
+      if (relationship.storage_provider.eq(storageProviderId)) {
+        return { relationship, relationshipId }
+      }
+    }
+
+    return {}
+  }
+
+  /*
+   * Creates storage relationship for a data object and provider and returns the relationship id
+   */
+  async createAndReturnStorageRelationship(providerAccountId, storageProviderId, contentId) {
+    contentId = parseContentId(contentId)
+    return new Promise(async (resolve, reject) => {
+      try {
+        await this.createStorageRelationship(providerAccountId, storageProviderId, contentId, events => {
+          events.forEach(event => {
+            resolve(event[1].DataObjectStorageRelationshipId)
+          })
+        })
+      } catch (err) {
+        reject(err)
+      }
+    })
+  }
+
+  /*
+   * Set the ready state for a data object storage relationship to the new value
+   */
+  async toggleStorageRelationshipReady(providerAccountId, storageProviderId, dosrId, ready) {
+    const tx = ready
+      ? this.base.api.tx.dataObjectStorageRegistry.setRelationshipReady(storageProviderId, dosrId)
+      : this.base.api.tx.dataObjectStorageRegistry.unsetRelationshipReady(storageProviderId, dosrId)
+    return this.base.signAndSend(providerAccountId, tx)
+  }
+
+  /*
+   * Returns array of know content ids
+   */
+  async getKnownContentIds() {
+    return this.base.api.query.dataDirectory.knownContentIds()
+  }
 }
 
 module.exports = {
-	AssetsApi,
+  AssetsApi,
 }

+ 43 - 43
storage-node/packages/runtime-api/balances.js

@@ -24,56 +24,56 @@ const debug = require('debug')('joystream:runtime:balances')
  * Bundle API calls related to account balances.
  */
 class BalancesApi {
-	static async create(base) {
-		const ret = new BalancesApi()
-		ret.base = base
-		await BalancesApi.init()
-		return ret
-	}
+  static async create(base) {
+    const ret = new BalancesApi()
+    ret.base = base
+    await BalancesApi.init()
+    return ret
+  }
 
-	static async init() {
-		debug('Init')
-	}
+  static async init() {
+    debug('Init')
+  }
 
-	/*
-	 * Return true/false if the account has the minimum balance given.
-	 */
-	async hasMinimumBalanceOf(accountId, min) {
-		const balance = await this.freeBalance(accountId)
-		if (typeof min === 'number') {
-			return balance.cmpn(min) >= 0
-		}
-		return balance.cmp(min) >= 0
-	}
+  /*
+   * Return true/false if the account has the minimum balance given.
+   */
+  async hasMinimumBalanceOf(accountId, min) {
+    const balance = await this.freeBalance(accountId)
+    if (typeof min === 'number') {
+      return balance.cmpn(min) >= 0
+    }
+    return balance.cmp(min) >= 0
+  }
 
-	/*
-	 * Return the account's current free balance.
-	 */
-	async freeBalance(accountId) {
-		const decoded = this.base.identities.keyring.decodeAddress(accountId, true)
-		return this.base.api.query.balances.freeBalance(decoded)
-	}
+  /*
+   * Return the account's current free balance.
+   */
+  async freeBalance(accountId) {
+    const decoded = this.base.identities.keyring.decodeAddress(accountId, true)
+    return this.base.api.query.balances.freeBalance(decoded)
+  }
 
-	/*
-	 * Return the base transaction fee.
-	 */
-	baseTransactionFee() {
-		return this.base.api.consts.transactionPayment.transactionBaseFee
-	}
+  /*
+   * Return the base transaction fee.
+   */
+  baseTransactionFee() {
+    return this.base.api.consts.transactionPayment.transactionBaseFee
+  }
 
-	/*
-	 * Transfer amount currency from one address to another. The sending
-	 * address must be an unlocked key pair!
-	 */
-	async transfer(from, to, amount) {
-		const decode = require('@polkadot/keyring').decodeAddress
-		const toDecoded = decode(to, true)
+  /*
+   * Transfer amount currency from one address to another. The sending
+   * address must be an unlocked key pair!
+   */
+  async transfer(from, to, amount) {
+    const decode = require('@polkadot/keyring').decodeAddress
+    const toDecoded = decode(to, true)
 
-		const tx = this.base.api.tx.balances.transfer(toDecoded, amount)
-		return this.base.signAndSend(from, tx)
-	}
+    const tx = this.base.api.tx.balances.transfer(toDecoded, amount)
+    return this.base.signAndSend(from, tx)
+  }
 }
 
 module.exports = {
-	BalancesApi,
+  BalancesApi,
 }

+ 54 - 54
storage-node/packages/runtime-api/discovery.js

@@ -6,67 +6,67 @@ const debug = require('debug')('joystream:runtime:discovery')
  * Add discovery related functionality to the substrate API.
  */
 class DiscoveryApi {
-	static async create(base) {
-		const ret = new DiscoveryApi()
-		ret.base = base
-		await DiscoveryApi.init()
-		return ret
-	}
+  static async create(base) {
+    const ret = new DiscoveryApi()
+    ret.base = base
+    await DiscoveryApi.init()
+    return ret
+  }
 
-	static async init() {
-		debug('Init')
-	}
+  static async init() {
+    debug('Init')
+  }
 
-	/*
-	 * Get Bootstrap endpoints
-	 */
-	async getBootstrapEndpoints() {
-		return this.base.api.query.discovery.bootstrapEndpoints()
-	}
+  /*
+   * Get Bootstrap endpoints
+   */
+  async getBootstrapEndpoints() {
+    return this.base.api.query.discovery.bootstrapEndpoints()
+  }
 
-	/*
-	 * Set Bootstrap endpoints, requires the sudo account to be provided and unlocked
-	 */
-	async setBootstrapEndpoints(sudoAccount, endpoints) {
-		const tx = this.base.api.tx.discovery.setBootstrapEndpoints(endpoints)
-		// make sudo call
-		return this.base.signAndSend(sudoAccount, this.base.api.tx.sudo.sudo(tx))
-	}
+  /*
+   * Set Bootstrap endpoints, requires the sudo account to be provided and unlocked
+   */
+  async setBootstrapEndpoints(sudoAccount, endpoints) {
+    const tx = this.base.api.tx.discovery.setBootstrapEndpoints(endpoints)
+    // make sudo call
+    return this.base.signAndSend(sudoAccount, this.base.api.tx.sudo.sudo(tx))
+  }
 
-	/*
-	 * Get AccountInfo of a storage provider
-	 */
-	async getAccountInfo(storageProviderId) {
-		const info = await this.base.api.query.discovery.accountInfoByStorageProviderId(storageProviderId)
-		// Not an Option so we use default value check to know if info was found
-		return info.expires_at.eq(0) ? null : info
-	}
+  /*
+   * Get AccountInfo of a storage provider
+   */
+  async getAccountInfo(storageProviderId) {
+    const info = await this.base.api.query.discovery.accountInfoByStorageProviderId(storageProviderId)
+    // Not an Option so we use default value check to know if info was found
+    return info.expires_at.eq(0) ? null : info
+  }
 
-	/*
-	 * Set AccountInfo of our storage provider
-	 */
-	async setAccountInfo(ipnsId) {
-		const roleAccountId = this.base.identities.key.address
-		const storageProviderId = this.base.storageProviderId
-		const isProvider = await this.base.workers.isStorageProvider(storageProviderId)
-		if (isProvider) {
-			const tx = this.base.api.tx.discovery.setIpnsId(storageProviderId, ipnsId)
-			return this.base.signAndSend(roleAccountId, tx)
-		}
-		throw new Error('Cannot set AccountInfo, id is not a storage provider')
-	}
+  /*
+   * Set AccountInfo of our storage provider
+   */
+  async setAccountInfo(ipnsId) {
+    const roleAccountId = this.base.identities.key.address
+    const storageProviderId = this.base.storageProviderId
+    const isProvider = await this.base.workers.isStorageProvider(storageProviderId)
+    if (isProvider) {
+      const tx = this.base.api.tx.discovery.setIpnsId(storageProviderId, ipnsId)
+      return this.base.signAndSend(roleAccountId, tx)
+    }
+    throw new Error('Cannot set AccountInfo, id is not a storage provider')
+  }
 
-	/*
-	 * Clear AccountInfo of our storage provider
-	 */
-	async unsetAccountInfo() {
-		const roleAccountId = this.base.identities.key.address
-		const storageProviderId = this.base.storageProviderId
-		const tx = this.base.api.tx.discovery.unsetIpnsId(storageProviderId)
-		return this.base.signAndSend(roleAccountId, tx)
-	}
+  /*
+   * Clear AccountInfo of our storage provider
+   */
+  async unsetAccountInfo() {
+    const roleAccountId = this.base.identities.key.address
+    const storageProviderId = this.base.storageProviderId
+    const tx = this.base.api.tx.discovery.unsetIpnsId(storageProviderId)
+    return this.base.signAndSend(roleAccountId, tx)
+  }
 }
 
 module.exports = {
-	DiscoveryApi,
+  DiscoveryApi,
 }

+ 198 - 198
storage-node/packages/runtime-api/identities.js

@@ -30,205 +30,205 @@ const utilCrypto = require('@polkadot/util-crypto')
  * This loosely groups: accounts, key management, and membership.
  */
 class IdentitiesApi {
-	static async create(base, { accountFile, passphrase, canPromptForPassphrase }) {
-		const ret = new IdentitiesApi()
-		ret.base = base
-		await ret.init(accountFile, passphrase, canPromptForPassphrase)
-		return ret
-	}
-
-	async init(accountFile, passphrase, canPromptForPassphrase) {
-		debug('Init')
-
-		// Creatre keyring
-		this.keyring = new Keyring()
-
-		this.canPromptForPassphrase = canPromptForPassphrase || false
-
-		// Load account file, if possible.
-		try {
-			this.key = await this.loadUnlock(accountFile, passphrase)
-		} catch (err) {
-			debug('Error loading account file:', err.message)
-		}
-	}
-
-	/*
-	 * Load a key file and unlock it if necessary.
-	 */
-	async loadUnlock(accountFile, passphrase) {
-		const fullname = path.resolve(accountFile)
-		debug('Initializing key from', fullname)
-		const key = this.keyring.addFromJson(require(fullname))
-		await this.tryUnlock(key, passphrase)
-		debug('Successfully initialized with address', key.address)
-		return key
-	}
-
-	/*
-	 * Try to unlock a key if it isn't already unlocked.
-	 * passphrase should be supplied as argument.
-	 */
-	async tryUnlock(key, passphrase) {
-		if (!key.isLocked) {
-			debug('Key is not locked, not attempting to unlock')
-			return
-		}
-
-		// First try with an empty passphrase - for convenience
-		try {
-			key.decodePkcs8('')
-
-			if (passphrase) {
-				debug('Key was not encrypted, supplied passphrase was ignored')
-			}
-
-			return
-		} catch (err) {
-			// pass
-		}
-
-		// Then with supplied passphrase
-		try {
-			debug('Decrypting with supplied passphrase')
-			key.decodePkcs8(passphrase)
-			return
-		} catch (err) {
-			// pass
-		}
-
-		// If that didn't work, ask for a passphrase if appropriate
-		if (this.canPromptForPassphrase) {
-			passphrase = await this.askForPassphrase(key.address)
-			key.decodePkcs8(passphrase)
-			return
-		}
-
-		throw new Error('invalid passphrase supplied')
-	}
-
-	/*
-	 * Ask for a passphrase
-	 */
-	askForPassphrase(address) {
-		// Query for passphrase
-		const prompt = require('password-prompt')
-		return prompt(`Enter passphrase for ${address}: `, { required: false })
-	}
-
-	/*
-	 * Return true if the account is a root account of a member
-	 */
-	async isMember(accountId) {
-		const memberIds = await this.memberIdsOf(accountId) // return array of member ids
-		return memberIds.length > 0 // true if at least one member id exists for the acccount
-	}
-
-	/*
-	 * Return all the member IDs of an account by the root account id
-	 */
-	async memberIdsOf(accountId) {
-		const decoded = this.keyring.decodeAddress(accountId)
-		return this.base.api.query.members.memberIdsByRootAccountId(decoded)
-	}
-
-	/*
-	 * Return the first member ID of an account, or undefined if not a member root account.
-	 */
-	async firstMemberIdOf(accountId) {
-		const decoded = this.keyring.decodeAddress(accountId)
-		const ids = await this.base.api.query.members.memberIdsByRootAccountId(decoded)
-		return ids[0]
-	}
-
-	/*
-	 * Export a key pair to JSON. Will ask for a passphrase.
-	 */
-	async exportKeyPair(accountId) {
-		const passphrase = await this.askForPassphrase(accountId)
-
-		// Produce JSON output
-		return this.keyring.toJson(accountId, passphrase)
-	}
-
-	/*
-	 * Export a key pair and write it to a JSON file with the account ID as the
-	 * name.
-	 */
-	async writeKeyPairExport(accountId, prefix) {
-		// Generate JSON
-		const data = await this.exportKeyPair(accountId)
-
-		// Write JSON
-		let filename = `${data.address}.json`
-
-		if (prefix) {
-			const path = require('path')
-			filename = path.resolve(prefix, filename)
-		}
-
-		fs.writeFileSync(filename, JSON.stringify(data), {
-			encoding: 'utf8',
-			mode: 0o600,
-		})
-
-		return filename
-	}
-
-	/*
-	 * Register account id with userInfo as a new member
-	 * using default policy 0, returns new member id
-	 */
-	async registerMember(accountId, userInfo) {
-		const tx = this.base.api.tx.members.buyMembership(0, userInfo)
-
-		return this.base.signAndSendThenGetEventResult(accountId, tx, {
-			eventModule: 'members',
-			eventName: 'MemberRegistered',
-			eventProperty: 'MemberId',
-		})
-	}
-
-	/*
-	 * Injects a keypair and sets it as the default identity
-	 */
-	useKeyPair(keyPair) {
-		this.key = this.keyring.addPair(keyPair)
-	}
-
-	/*
-	 * Create a new role key. If no name is given,
-	 * default to 'storage'.
-	 */
-	async createNewRoleKey(name) {
-		name = name || 'storage-provider'
-
-		// Generate new key pair
-		const keyPair = utilCrypto.naclKeypairFromRandom()
-
-		// Encode to an address.
-		const addr = this.keyring.encodeAddress(keyPair.publicKey)
-		debug('Generated new key pair with address', addr)
-
-		// Add to key wring. We set the meta to identify the account as
-		// a role key.
-		const meta = {
-			name: `${name} role account`,
-		}
-
-		const createPair = require('@polkadot/keyring/pair').default
-		const pair = createPair('ed25519', keyPair, meta)
-
-		this.keyring.addPair(pair)
-
-		return pair
-	}
-
-	getSudoAccount() {
-		return this.base.api.query.sudo.key()
-	}
+  static async create(base, { accountFile, passphrase, canPromptForPassphrase }) {
+    const ret = new IdentitiesApi()
+    ret.base = base
+    await ret.init(accountFile, passphrase, canPromptForPassphrase)
+    return ret
+  }
+
+  async init(accountFile, passphrase, canPromptForPassphrase) {
+    debug('Init')
+
+    // Creatre keyring
+    this.keyring = new Keyring()
+
+    this.canPromptForPassphrase = canPromptForPassphrase || false
+
+    // Load account file, if possible.
+    try {
+      this.key = await this.loadUnlock(accountFile, passphrase)
+    } catch (err) {
+      debug('Error loading account file:', err.message)
+    }
+  }
+
+  /*
+   * Load a key file and unlock it if necessary.
+   */
+  async loadUnlock(accountFile, passphrase) {
+    const fullname = path.resolve(accountFile)
+    debug('Initializing key from', fullname)
+    const key = this.keyring.addFromJson(require(fullname))
+    await this.tryUnlock(key, passphrase)
+    debug('Successfully initialized with address', key.address)
+    return key
+  }
+
+  /*
+   * Try to unlock a key if it isn't already unlocked.
+   * passphrase should be supplied as argument.
+   */
+  async tryUnlock(key, passphrase) {
+    if (!key.isLocked) {
+      debug('Key is not locked, not attempting to unlock')
+      return
+    }
+
+    // First try with an empty passphrase - for convenience
+    try {
+      key.decodePkcs8('')
+
+      if (passphrase) {
+        debug('Key was not encrypted, supplied passphrase was ignored')
+      }
+
+      return
+    } catch (err) {
+      // pass
+    }
+
+    // Then with supplied passphrase
+    try {
+      debug('Decrypting with supplied passphrase')
+      key.decodePkcs8(passphrase)
+      return
+    } catch (err) {
+      // pass
+    }
+
+    // If that didn't work, ask for a passphrase if appropriate
+    if (this.canPromptForPassphrase) {
+      passphrase = await this.askForPassphrase(key.address)
+      key.decodePkcs8(passphrase)
+      return
+    }
+
+    throw new Error('invalid passphrase supplied')
+  }
+
+  /*
+   * Ask for a passphrase
+   */
+  askForPassphrase(address) {
+    // Query for passphrase
+    const prompt = require('password-prompt')
+    return prompt(`Enter passphrase for ${address}: `, { required: false })
+  }
+
+  /*
+   * Return true if the account is a root account of a member
+   */
+  async isMember(accountId) {
+    const memberIds = await this.memberIdsOf(accountId) // return array of member ids
+    return memberIds.length > 0 // true if at least one member id exists for the acccount
+  }
+
+  /*
+   * Return all the member IDs of an account by the root account id
+   */
+  async memberIdsOf(accountId) {
+    const decoded = this.keyring.decodeAddress(accountId)
+    return this.base.api.query.members.memberIdsByRootAccountId(decoded)
+  }
+
+  /*
+   * Return the first member ID of an account, or undefined if not a member root account.
+   */
+  async firstMemberIdOf(accountId) {
+    const decoded = this.keyring.decodeAddress(accountId)
+    const ids = await this.base.api.query.members.memberIdsByRootAccountId(decoded)
+    return ids[0]
+  }
+
+  /*
+   * Export a key pair to JSON. Will ask for a passphrase.
+   */
+  async exportKeyPair(accountId) {
+    const passphrase = await this.askForPassphrase(accountId)
+
+    // Produce JSON output
+    return this.keyring.toJson(accountId, passphrase)
+  }
+
+  /*
+   * Export a key pair and write it to a JSON file with the account ID as the
+   * name.
+   */
+  async writeKeyPairExport(accountId, prefix) {
+    // Generate JSON
+    const data = await this.exportKeyPair(accountId)
+
+    // Write JSON
+    let filename = `${data.address}.json`
+
+    if (prefix) {
+      const path = require('path')
+      filename = path.resolve(prefix, filename)
+    }
+
+    fs.writeFileSync(filename, JSON.stringify(data), {
+      encoding: 'utf8',
+      mode: 0o600,
+    })
+
+    return filename
+  }
+
+  /*
+   * Register account id with userInfo as a new member
+   * using default policy 0, returns new member id
+   */
+  async registerMember(accountId, userInfo) {
+    const tx = this.base.api.tx.members.buyMembership(0, userInfo)
+
+    return this.base.signAndSendThenGetEventResult(accountId, tx, {
+      eventModule: 'members',
+      eventName: 'MemberRegistered',
+      eventProperty: 'MemberId',
+    })
+  }
+
+  /*
+   * Injects a keypair and sets it as the default identity
+   */
+  useKeyPair(keyPair) {
+    this.key = this.keyring.addPair(keyPair)
+  }
+
+  /*
+   * Create a new role key. If no name is given,
+   * default to 'storage'.
+   */
+  async createNewRoleKey(name) {
+    name = name || 'storage-provider'
+
+    // Generate new key pair
+    const keyPair = utilCrypto.naclKeypairFromRandom()
+
+    // Encode to an address.
+    const addr = this.keyring.encodeAddress(keyPair.publicKey)
+    debug('Generated new key pair with address', addr)
+
+    // Add to key wring. We set the meta to identify the account as
+    // a role key.
+    const meta = {
+      name: `${name} role account`,
+    }
+
+    const createPair = require('@polkadot/keyring/pair').default
+    const pair = createPair('ed25519', keyPair, meta)
+
+    this.keyring.addPair(pair)
+
+    return pair
+  }
+
+  getSudoAccount() {
+    return this.base.api.query.sudo.key()
+  }
 }
 
 module.exports = {
-	IdentitiesApi,
+  IdentitiesApi,
 }

+ 259 - 259
storage-node/packages/runtime-api/index.js

@@ -35,271 +35,271 @@ const { newExternallyControlledPromise } = require('@joystream/storage-utils/ext
  * Initialize runtime (substrate) API and keyring.
  */
 class RuntimeApi {
-	static async create(options) {
-		const runtimeApi = new RuntimeApi()
-		await runtimeApi.init(options || {})
-		return runtimeApi
-	}
-
-	async init(options) {
-		debug('Init')
-
-		options = options || {}
-
-		// Register joystream types
-		registerJoystreamTypes()
-
-		const provider = new WsProvider(options.provider_url || 'ws://localhost:9944')
-
-		// Create the API instrance
-		this.api = await ApiPromise.create({ provider })
-
-		this.asyncLock = new AsyncLock()
-
-		// Keep track locally of account nonces.
-		this.nonces = {}
-
-		// The storage provider id to use
-		this.storageProviderId = parseInt(options.storageProviderId) // u64 instead ?
-
-		// Ok, create individual APIs
-		this.identities = await IdentitiesApi.create(this, {
-			account_file: options.account_file,
-			passphrase: options.passphrase,
-			canPromptForPassphrase: options.canPromptForPassphrase,
-		})
-		this.balances = await BalancesApi.create(this)
-		this.workers = await WorkersApi.create(this)
-		this.assets = await AssetsApi.create(this)
-		this.discovery = await DiscoveryApi.create(this)
-	}
-
-	disconnect() {
-		this.api.disconnect()
-	}
-
-	executeWithAccountLock(accountId, func) {
-		return this.asyncLock.acquire(`${accountId}`, func)
-	}
-
-	/*
-	 * Wait for an event. Filters out any events that don't match the module and
-	 * event name.
-	 *
-	 * The result of the Promise is an array containing first the full event
-	 * name, and then the event fields as an object.
-	 */
-	async waitForEvent(module, name) {
-		return this.waitForEvents([[module, name]])
-	}
-
-	static matchingEvents(subscribed, events) {
-		debug(`Number of events: ${events.length} subscribed to ${subscribed}`)
-
-		const filtered = events.filter((record) => {
-			const { event, phase } = record
-
-			// Show what we are busy with
-			debug(`\t${event.section}:${event.method}:: (phase=${phase.toString()})`)
-			debug(`\t\t${event.meta.documentation.toString()}`)
-
-			// Skip events we're not interested in.
-			const matching = subscribed.filter((value) => {
-				return event.section === value[0] && event.method === value[1]
-			})
-			return matching.length > 0
-		})
-		debug(`Filtered: ${filtered.length}`)
-
-		const mapped = filtered.map((record) => {
-			const { event } = record
-			const types = event.typeDef
-
-			// Loop through each of the parameters, displaying the type and data
-			const payload = {}
-			event.data.forEach((data, index) => {
-				debug(`\t\t\t${types[index].type}: ${data.toString()}`)
-				payload[types[index].type] = data
-			})
-
-			const fullName = `${event.section}.${event.method}`
-			return [fullName, payload]
-		})
-		debug('Mapped', mapped)
-
-		return mapped
-	}
-
-	/*
-	 * Same as waitForEvent, but filter on multiple events. The parameter is an
-	 * array of arrays containing module and name. Calling waitForEvent is
-	 * identical to calling this with [[module, name]].
-	 *
-	 * Returns the first matched event *only*.
-	 */
-	async waitForEvents(subscribed) {
-		return new Promise((resolve) => {
-			this.api.query.system.events((events) => {
-				const matches = RuntimeApi.matchingEvents(subscribed, events)
-				if (matches && matches.length) {
-					resolve(matches)
-				}
-			})
-		})
-	}
-
-	/*
-	 * Nonce-aware signAndSend(). Also allows you to use the accountId instead
-	 * of the key, making calls a little simpler. Will lock to prevent concurrent
-	 * calls so correct nonce is used.
-	 *
-	 * If the subscribed events are given, and a callback as well, then the
-	 * callback is invoked with matching events.
-	 */
-	async signAndSend(accountId, tx, attempts, subscribed, callback) {
-		accountId = this.identities.keyring.encodeAddress(accountId)
-
-		// Key must be unlocked
-		const fromKey = this.identities.keyring.getPair(accountId)
-		if (fromKey.isLocked) {
-			throw new Error('Must unlock key before using it to sign!')
-		}
-
-		const finalizedPromise = newExternallyControlledPromise()
-
-		await this.executeWithAccountLock(accountId, async () => {
-			// Try to get the next nonce to use
-			let nonce = this.nonces[accountId]
-
-			let incrementNonce = () => {
-				// only increment once
-				incrementNonce = () => {
-					/* turn it into a no-op */
-				}
-				nonce = nonce.addn(1)
-				this.nonces[accountId] = nonce
-			}
-
-			// If the nonce isn't available, get it from chain.
-			if (!nonce) {
-				// current nonce
-				nonce = await this.api.query.system.accountNonce(accountId)
-				debug(`Got nonce for ${accountId} from chain: ${nonce}`)
-			}
-
-			return new Promise((resolve, reject) => {
-				debug('Signing and sending tx')
-				// send(statusUpdates) returns a function for unsubscribing from status updates
-				const unsubscribe = tx
-					.sign(fromKey, { nonce })
-					.send(({ events = [], status }) => {
-						debug(`TX status: ${status.type}`)
-
-						// Whatever events we get, process them if there's someone interested.
-						// It is critical that this event handling doesn't prevent
-						try {
-							if (subscribed && callback) {
-								const matched = RuntimeApi.matchingEvents(subscribed, events)
-								debug('Matching events:', matched)
-								if (matched.length) {
-									callback(matched)
-								}
-							}
-						} catch (err) {
-							debug(`Error handling events ${err.stack}`)
-						}
-
-						// We want to release lock as early as possible, sometimes Ready status
-						// doesn't occur, so we do it on Broadcast instead
-						if (status.isReady) {
-							debug('TX Ready.')
-							incrementNonce()
-							resolve(unsubscribe) // releases lock
-						} else if (status.isBroadcast) {
-							debug('TX Broadcast.')
-							incrementNonce()
-							resolve(unsubscribe) // releases lock
-						} else if (status.isFinalized) {
-							debug('TX Finalized.')
-							finalizedPromise.resolve(status)
-						} else if (status.isFuture) {
-							// comes before ready.
-							// does that mean it will remain in mempool or in api internal queue?
-							// nonce was set in the future. Treating it as an error for now.
-							debug('TX Future!')
-							// nonce is likely out of sync, delete it so we reload it from chain on next attempt
-							delete this.nonces[accountId]
-							const err = new Error('transaction nonce set in future')
-							finalizedPromise.reject(err)
-							reject(err)
-						}
-
-						/* why don't we see these status updates on local devchain (single node)
+  static async create(options) {
+    const runtimeApi = new RuntimeApi()
+    await runtimeApi.init(options || {})
+    return runtimeApi
+  }
+
+  async init(options) {
+    debug('Init')
+
+    options = options || {}
+
+    // Register joystream types
+    registerJoystreamTypes()
+
+    const provider = new WsProvider(options.provider_url || 'ws://localhost:9944')
+
+    // Create the API instrance
+    this.api = await ApiPromise.create({ provider })
+
+    this.asyncLock = new AsyncLock()
+
+    // Keep track locally of account nonces.
+    this.nonces = {}
+
+    // The storage provider id to use
+    this.storageProviderId = parseInt(options.storageProviderId) // u64 instead ?
+
+    // Ok, create individual APIs
+    this.identities = await IdentitiesApi.create(this, {
+      account_file: options.account_file,
+      passphrase: options.passphrase,
+      canPromptForPassphrase: options.canPromptForPassphrase,
+    })
+    this.balances = await BalancesApi.create(this)
+    this.workers = await WorkersApi.create(this)
+    this.assets = await AssetsApi.create(this)
+    this.discovery = await DiscoveryApi.create(this)
+  }
+
+  disconnect() {
+    this.api.disconnect()
+  }
+
+  executeWithAccountLock(accountId, func) {
+    return this.asyncLock.acquire(`${accountId}`, func)
+  }
+
+  /*
+   * Wait for an event. Filters out any events that don't match the module and
+   * event name.
+   *
+   * The result of the Promise is an array containing first the full event
+   * name, and then the event fields as an object.
+   */
+  async waitForEvent(module, name) {
+    return this.waitForEvents([[module, name]])
+  }
+
+  static matchingEvents(subscribed, events) {
+    debug(`Number of events: ${events.length} subscribed to ${subscribed}`)
+
+    const filtered = events.filter(record => {
+      const { event, phase } = record
+
+      // Show what we are busy with
+      debug(`\t${event.section}:${event.method}:: (phase=${phase.toString()})`)
+      debug(`\t\t${event.meta.documentation.toString()}`)
+
+      // Skip events we're not interested in.
+      const matching = subscribed.filter(value => {
+        return event.section === value[0] && event.method === value[1]
+      })
+      return matching.length > 0
+    })
+    debug(`Filtered: ${filtered.length}`)
+
+    const mapped = filtered.map(record => {
+      const { event } = record
+      const types = event.typeDef
+
+      // Loop through each of the parameters, displaying the type and data
+      const payload = {}
+      event.data.forEach((data, index) => {
+        debug(`\t\t\t${types[index].type}: ${data.toString()}`)
+        payload[types[index].type] = data
+      })
+
+      const fullName = `${event.section}.${event.method}`
+      return [fullName, payload]
+    })
+    debug('Mapped', mapped)
+
+    return mapped
+  }
+
+  /*
+   * Same as waitForEvent, but filter on multiple events. The parameter is an
+   * array of arrays containing module and name. Calling waitForEvent is
+   * identical to calling this with [[module, name]].
+   *
+   * Returns the first matched event *only*.
+   */
+  async waitForEvents(subscribed) {
+    return new Promise(resolve => {
+      this.api.query.system.events(events => {
+        const matches = RuntimeApi.matchingEvents(subscribed, events)
+        if (matches && matches.length) {
+          resolve(matches)
+        }
+      })
+    })
+  }
+
+  /*
+   * Nonce-aware signAndSend(). Also allows you to use the accountId instead
+   * of the key, making calls a little simpler. Will lock to prevent concurrent
+   * calls so correct nonce is used.
+   *
+   * If the subscribed events are given, and a callback as well, then the
+   * callback is invoked with matching events.
+   */
+  async signAndSend(accountId, tx, attempts, subscribed, callback) {
+    accountId = this.identities.keyring.encodeAddress(accountId)
+
+    // Key must be unlocked
+    const fromKey = this.identities.keyring.getPair(accountId)
+    if (fromKey.isLocked) {
+      throw new Error('Must unlock key before using it to sign!')
+    }
+
+    const finalizedPromise = newExternallyControlledPromise()
+
+    await this.executeWithAccountLock(accountId, async () => {
+      // Try to get the next nonce to use
+      let nonce = this.nonces[accountId]
+
+      let incrementNonce = () => {
+        // only increment once
+        incrementNonce = () => {
+          /* turn it into a no-op */
+        }
+        nonce = nonce.addn(1)
+        this.nonces[accountId] = nonce
+      }
+
+      // If the nonce isn't available, get it from chain.
+      if (!nonce) {
+        // current nonce
+        nonce = await this.api.query.system.accountNonce(accountId)
+        debug(`Got nonce for ${accountId} from chain: ${nonce}`)
+      }
+
+      return new Promise((resolve, reject) => {
+        debug('Signing and sending tx')
+        // send(statusUpdates) returns a function for unsubscribing from status updates
+        const unsubscribe = tx
+          .sign(fromKey, { nonce })
+          .send(({ events = [], status }) => {
+            debug(`TX status: ${status.type}`)
+
+            // Whatever events we get, process them if there's someone interested.
+            // It is critical that this event handling doesn't prevent
+            try {
+              if (subscribed && callback) {
+                const matched = RuntimeApi.matchingEvents(subscribed, events)
+                debug('Matching events:', matched)
+                if (matched.length) {
+                  callback(matched)
+                }
+              }
+            } catch (err) {
+              debug(`Error handling events ${err.stack}`)
+            }
+
+            // We want to release lock as early as possible, sometimes Ready status
+            // doesn't occur, so we do it on Broadcast instead
+            if (status.isReady) {
+              debug('TX Ready.')
+              incrementNonce()
+              resolve(unsubscribe) // releases lock
+            } else if (status.isBroadcast) {
+              debug('TX Broadcast.')
+              incrementNonce()
+              resolve(unsubscribe) // releases lock
+            } else if (status.isFinalized) {
+              debug('TX Finalized.')
+              finalizedPromise.resolve(status)
+            } else if (status.isFuture) {
+              // comes before ready.
+              // does that mean it will remain in mempool or in api internal queue?
+              // nonce was set in the future. Treating it as an error for now.
+              debug('TX Future!')
+              // nonce is likely out of sync, delete it so we reload it from chain on next attempt
+              delete this.nonces[accountId]
+              const err = new Error('transaction nonce set in future')
+              finalizedPromise.reject(err)
+              reject(err)
+            }
+
+            /* why don't we see these status updates on local devchain (single node)
             isUsurped
             isBroadcast
             isDropped
             isInvalid
             */
-					})
-					.catch((err) => {
-						// 1014 error: Most likely you are sending transaction with the same nonce,
-						// so it assumes you want to replace existing one, but the priority is too low to replace it (priority = fee = len(encoded_transaction) currently)
-						// Remember this can also happen if in the past we sent a tx with a future nonce, and the current nonce
-						// now matches it.
-						if (err) {
-							const errstr = err.toString()
-							// not the best way to check error code.
-							// https://github.com/polkadot-js/api/blob/master/packages/rpc-provider/src/coder/index.ts#L52
-							if (
-								errstr.indexOf('Error: 1014:') < 0 && // low priority
-								errstr.indexOf('Error: 1010:') < 0
-							) {
-								// bad transaction
-								// Error but not nonce related. (bad arguments maybe)
-								debug('TX error', err)
-							} else {
-								// nonce is likely out of sync, delete it so we reload it from chain on next attempt
-								delete this.nonces[accountId]
-							}
-						}
-
-						finalizedPromise.reject(err)
-						// releases lock
-						reject(err)
-					})
-			})
-		})
-
-		// when does it make sense to manyally unsubscribe?
-		// at this point unsubscribe.then and unsubscribe.catch have been deleted
-		// unsubscribe() // don't unsubscribe if we want to wait for additional status
-		// updates to know when the tx has been finalized
-		return finalizedPromise.promise
-	}
-
-	/*
-	 * Sign and send a transaction expect event from
-	 * module and return eventProperty from the event.
-	 */
-	async signAndSendThenGetEventResult(senderAccountId, tx, { eventModule, eventName, eventProperty }) {
-		// event from a module,
-		const subscribed = [[eventModule, eventName]]
-		return new Promise(async (resolve, reject) => {
-			try {
-				await this.signAndSend(senderAccountId, tx, 1, subscribed, (events) => {
-					events.forEach((event) => {
-						// fix - we may not necessarily want the first event
-						// if there are multiple events emitted,
-						resolve(event[1][eventProperty])
-					})
-				})
-			} catch (err) {
-				reject(err)
-			}
-		})
-	}
+          })
+          .catch(err => {
+            // 1014 error: Most likely you are sending transaction with the same nonce,
+            // so it assumes you want to replace existing one, but the priority is too low to replace it (priority = fee = len(encoded_transaction) currently)
+            // Remember this can also happen if in the past we sent a tx with a future nonce, and the current nonce
+            // now matches it.
+            if (err) {
+              const errstr = err.toString()
+              // not the best way to check error code.
+              // https://github.com/polkadot-js/api/blob/master/packages/rpc-provider/src/coder/index.ts#L52
+              if (
+                errstr.indexOf('Error: 1014:') < 0 && // low priority
+                errstr.indexOf('Error: 1010:') < 0
+              ) {
+                // bad transaction
+                // Error but not nonce related. (bad arguments maybe)
+                debug('TX error', err)
+              } else {
+                // nonce is likely out of sync, delete it so we reload it from chain on next attempt
+                delete this.nonces[accountId]
+              }
+            }
+
+            finalizedPromise.reject(err)
+            // releases lock
+            reject(err)
+          })
+      })
+    })
+
+    // when does it make sense to manyally unsubscribe?
+    // at this point unsubscribe.then and unsubscribe.catch have been deleted
+    // unsubscribe() // don't unsubscribe if we want to wait for additional status
+    // updates to know when the tx has been finalized
+    return finalizedPromise.promise
+  }
+
+  /*
+   * Sign and send a transaction expect event from
+   * module and return eventProperty from the event.
+   */
+  async signAndSendThenGetEventResult(senderAccountId, tx, { eventModule, eventName, eventProperty }) {
+    // event from a module,
+    const subscribed = [[eventModule, eventName]]
+    return new Promise(async (resolve, reject) => {
+      try {
+        await this.signAndSend(senderAccountId, tx, 1, subscribed, events => {
+          events.forEach(event => {
+            // fix - we may not necessarily want the first event
+            // if there are multiple events emitted,
+            resolve(event[1][eventProperty])
+          })
+        })
+      } catch (err) {
+        reject(err)
+      }
+    })
+  }
 }
 
 module.exports = {
-	RuntimeApi,
+  RuntimeApi,
 }

+ 19 - 19
storage-node/packages/runtime-api/test/assets.js

@@ -23,26 +23,26 @@ const expect = require('chai').expect
 const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
 describe('Assets', () => {
-	let api
-	before(async () => {
-		api = await RuntimeApi.create()
-		await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-	})
+  let api
+  before(async () => {
+    api = await RuntimeApi.create()
+    await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+  })
 
-	it('returns DataObjects for a content ID', async () => {
-		const obj = await api.assets.getDataObject('foo')
-		expect(obj.isNone).to.be.true
-	})
+  it('returns DataObjects for a content ID', async () => {
+    const obj = await api.assets.getDataObject('foo')
+    expect(obj.isNone).to.be.true
+  })
 
-	it('can check the liaison for a DataObject', async () => {
-		expect(async () => {
-			await api.assets.checkLiaisonForDataObject('foo', 'bar')
-		}).to.throw
-	})
+  it('can check the liaison for a DataObject', async () => {
+    expect(async () => {
+      await api.assets.checkLiaisonForDataObject('foo', 'bar')
+    }).to.throw
+  })
 
-	// Needs properly staked accounts
-	it('can accept content')
-	it('can reject content')
-	it('can create a storage relationship for content')
-	it('can toggle a storage relationship to ready state')
+  // Needs properly staked accounts
+  it('can accept content')
+  it('can reject content')
+  it('can create a storage relationship for content')
+  it('can toggle a storage relationship to ready state')
 })

+ 21 - 21
storage-node/packages/runtime-api/test/balances.js

@@ -23,28 +23,28 @@ const expect = require('chai').expect
 const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
 describe('Balances', () => {
-	let api
-	let key
-	before(async () => {
-		api = await RuntimeApi.create()
-		key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-	})
+  let api
+  let key
+  before(async () => {
+    api = await RuntimeApi.create()
+    key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+  })
 
-	it('returns free balance for an account', async () => {
-		const balance = await api.balances.freeBalance(key.address)
-		// Should be exactly zero
-		expect(balance.cmpn(0)).to.equal(0)
-	})
+  it('returns free balance for an account', async () => {
+    const balance = await api.balances.freeBalance(key.address)
+    // Should be exactly zero
+    expect(balance.cmpn(0)).to.equal(0)
+  })
 
-	it('checks whether a minimum balance exists', async () => {
-		// A minimum of 0 should exist, but no more.
-		expect(await api.balances.hasMinimumBalanceOf(key.address, 0)).to.be.true
-		expect(await api.balances.hasMinimumBalanceOf(key.address, 1)).to.be.false
-	})
+  it('checks whether a minimum balance exists', async () => {
+    // A minimum of 0 should exist, but no more.
+    expect(await api.balances.hasMinimumBalanceOf(key.address, 0)).to.be.true
+    expect(await api.balances.hasMinimumBalanceOf(key.address, 1)).to.be.false
+  })
 
-	it('returns the base transaction fee of the chain', async () => {
-		const fee = await api.balances.baseTransactionFee()
-		// >= 0 comparison works
-		expect(fee.cmpn(0)).to.be.at.least(0)
-	})
+  it('returns the base transaction fee of the chain', async () => {
+    const fee = await api.balances.baseTransactionFee()
+    // >= 0 comparison works
+    expect(fee.cmpn(0)).to.be.at.least(0)
+  })
 })

+ 53 - 53
storage-node/packages/runtime-api/test/identities.js

@@ -25,74 +25,74 @@ const temp = require('temp').track()
 const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
 describe('Identities', () => {
-	let api
-	before(async () => {
-		api = await RuntimeApi.create({ canPromptForPassphrase: true })
-	})
+  let api
+  before(async () => {
+    api = await RuntimeApi.create({ canPromptForPassphrase: true })
+  })
 
-	it('imports keys', async () => {
-		// Unlocked keys can be imported without asking for a passphrase
-		await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+  it('imports keys', async () => {
+    // Unlocked keys can be imported without asking for a passphrase
+    await api.identities.loadUnlock('test/data/edwards_unlocked.json')
 
-		// Edwards and schnorr keys should unlock
-		const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
-		await api.identities.loadUnlock('test/data/edwards.json')
-		await api.identities.loadUnlock('test/data/schnorr.json')
-		passphraseStub.restore()
+    // Edwards and schnorr keys should unlock
+    const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
+    await api.identities.loadUnlock('test/data/edwards.json')
+    await api.identities.loadUnlock('test/data/schnorr.json')
+    passphraseStub.restore()
 
-		// Except if the wrong passphrase is given
-		const passphraseStubBad = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'bad')
-		expect(async () => {
-			await api.identities.loadUnlock('test/data/edwards.json')
-		}).to.throw
-		passphraseStubBad.restore()
-	})
+    // Except if the wrong passphrase is given
+    const passphraseStubBad = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'bad')
+    expect(async () => {
+      await api.identities.loadUnlock('test/data/edwards.json')
+    }).to.throw
+    passphraseStubBad.restore()
+  })
 
-	it('knows about membership', async () => {
-		const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-		const addr = key.address
+  it('knows about membership', async () => {
+    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+    const addr = key.address
 
-		// Without seeding the runtime with data, we can only verify that the API
-		// reacts well in the absence of membership
-		expect(await api.identities.isMember(addr)).to.be.false
-		const memberId = await api.identities.firstMemberIdOf(addr)
+    // Without seeding the runtime with data, we can only verify that the API
+    // reacts well in the absence of membership
+    expect(await api.identities.isMember(addr)).to.be.false
+    const memberId = await api.identities.firstMemberIdOf(addr)
 
-		expect(memberId).to.be.undefined
-	})
+    expect(memberId).to.be.undefined
+  })
 
-	it('exports keys', async () => {
-		const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+  it('exports keys', async () => {
+    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
 
-		const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
-		const exported = await api.identities.exportKeyPair(key.address)
-		passphraseStub.restore()
+    const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
+    const exported = await api.identities.exportKeyPair(key.address)
+    passphraseStub.restore()
 
-		expect(exported).to.have.property('address')
-		expect(exported.address).to.equal(key.address)
+    expect(exported).to.have.property('address')
+    expect(exported.address).to.equal(key.address)
 
-		expect(exported).to.have.property('encoding')
+    expect(exported).to.have.property('encoding')
 
-		expect(exported.encoding).to.have.property('version', '2')
+    expect(exported.encoding).to.have.property('version', '2')
 
-		expect(exported.encoding).to.have.property('content')
-		expect(exported.encoding.content).to.include('pkcs8')
-		expect(exported.encoding.content).to.include('ed25519')
+    expect(exported.encoding).to.have.property('content')
+    expect(exported.encoding.content).to.include('pkcs8')
+    expect(exported.encoding.content).to.include('ed25519')
 
-		expect(exported.encoding).to.have.property('type')
-		expect(exported.encoding.type).to.include('salsa20')
-	})
+    expect(exported.encoding).to.have.property('type')
+    expect(exported.encoding.type).to.include('salsa20')
+  })
 
-	it('writes key export files', async () => {
-		const prefix = temp.mkdirSync('joystream-runtime-api-test')
+  it('writes key export files', async () => {
+    const prefix = temp.mkdirSync('joystream-runtime-api-test')
 
-		const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
 
-		const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
-		const filename = await api.identities.writeKeyPairExport(key.address, prefix)
-		passphraseStub.restore()
+    const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
+    const filename = await api.identities.writeKeyPairExport(key.address, prefix)
+    passphraseStub.restore()
 
-		const fs = require('fs')
-		const stat = fs.statSync(filename)
-		expect(stat.isFile()).to.be.true
-	})
+    const fs = require('fs')
+    const stat = fs.statSync(filename)
+    expect(stat.isFile()).to.be.true
+  })
 })

+ 4 - 4
storage-node/packages/runtime-api/test/index.js

@@ -21,8 +21,8 @@
 const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
 describe('RuntimeApi', () => {
-	it('can be created', async () => {
-		const api = await RuntimeApi.create()
-		api.disconnect()
-	})
+  it('can be created', async () => {
+    const api = await RuntimeApi.create()
+    api.disconnect()
+  })
 })

+ 254 - 256
storage-node/packages/runtime-api/workers.js

@@ -26,246 +26,244 @@ const { Worker } = require('@joystream/types/working-group')
  * Add worker related functionality to the substrate API.
  */
 class WorkersApi {
-	static async create(base) {
-		const ret = new WorkersApi()
-		ret.base = base
-		await ret.init()
-		return ret
-	}
-
-	// eslint-disable-next-line class-methods-use-this, require-await
-	async init() {
-		debug('Init')
-	}
-
-	/*
-	 * Check whether the given account and id represent an enrolled storage provider
-	 */
-	async isRoleAccountOfStorageProvider(storageProviderId, roleAccountId) {
-		const id = new BN(storageProviderId)
-		const roleAccount = this.base.identities.keyring.decodeAddress(roleAccountId)
-		const providerAccount = await this.storageProviderRoleAccount(id)
-		return providerAccount && providerAccount.eq(roleAccount)
-	}
-
-	/*
-	 * Returns true if the provider id is enrolled
-	 */
-	async isStorageProvider(storageProviderId) {
-		const worker = await this.storageWorkerByProviderId(storageProviderId)
-		return worker !== null
-	}
-
-	/*
-	 * Returns a provider's role account or null if provider doesn't exist
-	 */
-	async storageProviderRoleAccount(storageProviderId) {
-		const worker = await this.storageWorkerByProviderId(storageProviderId)
-		return worker ? worker.role_account_id : null
-	}
-
-	/*
-	 * Returns a Worker instance or null if provider does not exist
-	 */
-	async storageWorkerByProviderId(storageProviderId) {
-		const id = new BN(storageProviderId)
-		const { providers } = await this.getAllProviders()
-		return providers[id.toNumber()] || null
-	}
-
-	/*
-	 * Returns the the first found provider id with a role account or null if not found
-	 */
-	async findProviderIdByRoleAccount(roleAccount) {
-		const { ids, providers } = await this.getAllProviders()
-
-		for (let i = 0; i < ids.length; i++) {
-			const id = ids[i]
-			if (providers[id].role_account_id.eq(roleAccount)) {
-				return id
-			}
-		}
-
-		return null
-	}
-
-	/*
-	 * Returns the set of ids and Worker instances of providers enrolled on the network
-	 */
-	async getAllProviders() {
-		// const workerEntries = await this.base.api.query.storageWorkingGroup.workerById()
-		// can't rely on .isEmpty or isNone property to detect empty map
-		// return workerEntries.isNone ? [] : workerEntries[0]
-		// return workerEntries.isEmpty ? [] : workerEntries[0]
-		// So we iterate over possible ids which may or may not exist, by reading directly
-		// from storage value
-		const nextWorkerId = (await this.base.api.query.storageWorkingGroup.nextWorkerId()).toNumber()
-		const ids = []
-		const providers = {}
-		for (let id = 0; id < nextWorkerId; id++) {
-			// We get back an Option. Will be None if value doesn't exist
-			// eslint-disable-next-line no-await-in-loop
-			let value = await this.base.api.rpc.state.getStorage(
-				this.base.api.query.storageWorkingGroup.workerById.key(id)
-			)
-
-			if (!value.isNone) {
-				// no need to read from storage again!
-				// const worker = (await this.base.api.query.storageWorkingGroup.workerById(id))[0]
-				value = value.unwrap()
-				// construct the Worker type from raw data
-				// const worker = createType('WorkerOf', value)
-				// const worker = new Worker(value)
-				ids.push(id)
-				providers[id] = new Worker(value)
-			}
-		}
-
-		return { ids, providers }
-	}
-
-	async getLeadRoleAccount() {
-		const currentLead = await this.base.api.query.storageWorkingGroup.currentLead()
-		if (currentLead.isSome) {
-			const leadWorkerId = currentLead.unwrap()
-			const worker = await this.base.api.query.storageWorkingGroup.workerById(leadWorkerId)
-			return worker[0].role_account_id
-		}
-		return null
-	}
-
-	// Helper methods below don't really belong in the colossus runtime api library.
-	// They are only used by the dev-init command in the cli to setup a development environment
-
-	/*
-	 * Add a new storage group opening using the lead account. Returns the
-	 * new opening id.
-	 */
-	async devAddStorageOpening() {
-		const openTx = this.devMakeAddOpeningTx('Worker')
-		return this.devSubmitAddOpeningTx(openTx, await this.getLeadRoleAccount())
-	}
-
-	/*
-	 * Add a new storage working group lead opening using sudo account. Returns the
-	 * new opening id.
-	 */
-	async devAddStorageLeadOpening() {
-		const openTx = this.devMakeAddOpeningTx('Leader')
-		const sudoTx = this.base.api.tx.sudo.sudo(openTx)
-		return this.devSubmitAddOpeningTx(sudoTx, await this.base.identities.getSudoAccount())
-	}
-
-	/*
-	 * Constructs an addOpening tx of openingType
-	 */
-	devMakeAddOpeningTx(openingType) {
-		return this.base.api.tx.storageWorkingGroup.addOpening(
-			'CurrentBlock',
-			{
-				application_rationing_policy: {
-					max_active_applicants: 1,
-				},
-				max_review_period_length: 1000,
-				// default values for everything else..
-			},
-			'dev-opening',
-			openingType
-		)
-	}
-
-	/*
-	 * Submits a tx (expecting it to dispatch storageWorkingGroup.addOpening) and returns
-	 * the OpeningId from the resulting event.
-	 */
-	async devSubmitAddOpeningTx(tx, senderAccount) {
-		return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
-			eventModule: 'storageWorkingGroup',
-			eventName: 'OpeningAdded',
-			eventProperty: 'OpeningId',
-		})
-	}
-
-	/*
-	 * Apply on an opening, returns the application id.
-	 */
-	async devApplyOnOpening(openingId, memberId, memberAccount, roleAccount) {
-		const applyTx = this.base.api.tx.storageWorkingGroup.applyOnOpening(
-			memberId,
-			openingId,
-			roleAccount,
-			null,
-			null,
-			`colossus-${memberId}`
-		)
-
-		return this.base.signAndSendThenGetEventResult(memberAccount, applyTx, {
-			eventModule: 'storageWorkingGroup',
-			eventName: 'AppliedOnOpening',
-			eventProperty: 'ApplicationId',
-		})
-	}
-
-	/*
-	 * Move lead opening to review state using sudo account
-	 */
-	async devBeginLeadOpeningReview(openingId) {
-		const beginReviewTx = this.devMakeBeginOpeningReviewTx(openingId)
-		const sudoTx = this.base.api.tx.sudo.sudo(beginReviewTx)
-		return this.base.signAndSend(await this.base.identities.getSudoAccount(), sudoTx)
-	}
-
-	/*
-	 * Move a storage opening to review state using lead account
-	 */
-	async devBeginStorageOpeningReview(openingId) {
-		const beginReviewTx = this.devMakeBeginOpeningReviewTx(openingId)
-		return this.base.signAndSend(await this.getLeadRoleAccount(), beginReviewTx)
-	}
-
-	/*
-	 * Constructs a beingApplicantReview tx for openingId, which puts an opening into the review state
-	 */
-	devMakeBeginOpeningReviewTx(openingId) {
-		return this.base.api.tx.storageWorkingGroup.beginApplicantReview(openingId)
-	}
-
-	/*
-	 * Fill a lead opening, return the assigned worker id, using the sudo account
-	 */
-	async devFillLeadOpening(openingId, applicationId) {
-		const fillTx = this.devMakeFillOpeningTx(openingId, applicationId)
-		const sudoTx = this.base.api.tx.sudo.sudo(fillTx)
-		const filled = await this.devSubmitFillOpeningTx(await this.base.identities.getSudoAccount(), sudoTx)
-		return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
-	}
-
-	/*
-	 * Fill a storage opening, return the assigned worker id, using the lead account
-	 */
-	async devFillStorageOpening(openingId, applicationId) {
-		const fillTx = this.devMakeFillOpeningTx(openingId, applicationId)
-		const filled = await this.devSubmitFillOpeningTx(await this.getLeadRoleAccount(), fillTx)
-		return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
-	}
-
-	/*
-	 * Constructs a FillOpening transaction
-	 */
-	devMakeFillOpeningTx(openingId, applicationId) {
-		return this.base.api.tx.storageWorkingGroup.fillOpening(openingId, [applicationId], null)
-	}
-
-	/*
-	 * Dispatches a fill opening tx and returns a map of the application id to their new assigned worker ids.
-	 */
-	async devSubmitFillOpeningTx(senderAccount, tx) {
-		return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
-			eventModule: 'storageWorkingGroup',
-			eventName: 'OpeningFilled',
-			eventProperty: 'ApplicationIdToWorkerIdMap',
-		})
-	}
+  static async create(base) {
+    const ret = new WorkersApi()
+    ret.base = base
+    await ret.init()
+    return ret
+  }
+
+  // eslint-disable-next-line class-methods-use-this, require-await
+  async init() {
+    debug('Init')
+  }
+
+  /*
+   * Check whether the given account and id represent an enrolled storage provider
+   */
+  async isRoleAccountOfStorageProvider(storageProviderId, roleAccountId) {
+    const id = new BN(storageProviderId)
+    const roleAccount = this.base.identities.keyring.decodeAddress(roleAccountId)
+    const providerAccount = await this.storageProviderRoleAccount(id)
+    return providerAccount && providerAccount.eq(roleAccount)
+  }
+
+  /*
+   * Returns true if the provider id is enrolled
+   */
+  async isStorageProvider(storageProviderId) {
+    const worker = await this.storageWorkerByProviderId(storageProviderId)
+    return worker !== null
+  }
+
+  /*
+   * Returns a provider's role account or null if provider doesn't exist
+   */
+  async storageProviderRoleAccount(storageProviderId) {
+    const worker = await this.storageWorkerByProviderId(storageProviderId)
+    return worker ? worker.role_account_id : null
+  }
+
+  /*
+   * Returns a Worker instance or null if provider does not exist
+   */
+  async storageWorkerByProviderId(storageProviderId) {
+    const id = new BN(storageProviderId)
+    const { providers } = await this.getAllProviders()
+    return providers[id.toNumber()] || null
+  }
+
+  /*
+   * Returns the the first found provider id with a role account or null if not found
+   */
+  async findProviderIdByRoleAccount(roleAccount) {
+    const { ids, providers } = await this.getAllProviders()
+
+    for (let i = 0; i < ids.length; i++) {
+      const id = ids[i]
+      if (providers[id].role_account_id.eq(roleAccount)) {
+        return id
+      }
+    }
+
+    return null
+  }
+
+  /*
+   * Returns the set of ids and Worker instances of providers enrolled on the network
+   */
+  async getAllProviders() {
+    // const workerEntries = await this.base.api.query.storageWorkingGroup.workerById()
+    // can't rely on .isEmpty or isNone property to detect empty map
+    // return workerEntries.isNone ? [] : workerEntries[0]
+    // return workerEntries.isEmpty ? [] : workerEntries[0]
+    // So we iterate over possible ids which may or may not exist, by reading directly
+    // from storage value
+    const nextWorkerId = (await this.base.api.query.storageWorkingGroup.nextWorkerId()).toNumber()
+    const ids = []
+    const providers = {}
+    for (let id = 0; id < nextWorkerId; id++) {
+      // We get back an Option. Will be None if value doesn't exist
+      // eslint-disable-next-line no-await-in-loop
+      let value = await this.base.api.rpc.state.getStorage(this.base.api.query.storageWorkingGroup.workerById.key(id))
+
+      if (!value.isNone) {
+        // no need to read from storage again!
+        // const worker = (await this.base.api.query.storageWorkingGroup.workerById(id))[0]
+        value = value.unwrap()
+        // construct the Worker type from raw data
+        // const worker = createType('WorkerOf', value)
+        // const worker = new Worker(value)
+        ids.push(id)
+        providers[id] = new Worker(value)
+      }
+    }
+
+    return { ids, providers }
+  }
+
+  async getLeadRoleAccount() {
+    const currentLead = await this.base.api.query.storageWorkingGroup.currentLead()
+    if (currentLead.isSome) {
+      const leadWorkerId = currentLead.unwrap()
+      const worker = await this.base.api.query.storageWorkingGroup.workerById(leadWorkerId)
+      return worker[0].role_account_id
+    }
+    return null
+  }
+
+  // Helper methods below don't really belong in the colossus runtime api library.
+  // They are only used by the dev-init command in the cli to setup a development environment
+
+  /*
+   * Add a new storage group opening using the lead account. Returns the
+   * new opening id.
+   */
+  async devAddStorageOpening() {
+    const openTx = this.devMakeAddOpeningTx('Worker')
+    return this.devSubmitAddOpeningTx(openTx, await this.getLeadRoleAccount())
+  }
+
+  /*
+   * Add a new storage working group lead opening using sudo account. Returns the
+   * new opening id.
+   */
+  async devAddStorageLeadOpening() {
+    const openTx = this.devMakeAddOpeningTx('Leader')
+    const sudoTx = this.base.api.tx.sudo.sudo(openTx)
+    return this.devSubmitAddOpeningTx(sudoTx, await this.base.identities.getSudoAccount())
+  }
+
+  /*
+   * Constructs an addOpening tx of openingType
+   */
+  devMakeAddOpeningTx(openingType) {
+    return this.base.api.tx.storageWorkingGroup.addOpening(
+      'CurrentBlock',
+      {
+        application_rationing_policy: {
+          max_active_applicants: 1,
+        },
+        max_review_period_length: 1000,
+        // default values for everything else..
+      },
+      'dev-opening',
+      openingType
+    )
+  }
+
+  /*
+   * Submits a tx (expecting it to dispatch storageWorkingGroup.addOpening) and returns
+   * the OpeningId from the resulting event.
+   */
+  async devSubmitAddOpeningTx(tx, senderAccount) {
+    return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
+      eventModule: 'storageWorkingGroup',
+      eventName: 'OpeningAdded',
+      eventProperty: 'OpeningId',
+    })
+  }
+
+  /*
+   * Apply on an opening, returns the application id.
+   */
+  async devApplyOnOpening(openingId, memberId, memberAccount, roleAccount) {
+    const applyTx = this.base.api.tx.storageWorkingGroup.applyOnOpening(
+      memberId,
+      openingId,
+      roleAccount,
+      null,
+      null,
+      `colossus-${memberId}`
+    )
+
+    return this.base.signAndSendThenGetEventResult(memberAccount, applyTx, {
+      eventModule: 'storageWorkingGroup',
+      eventName: 'AppliedOnOpening',
+      eventProperty: 'ApplicationId',
+    })
+  }
+
+  /*
+   * Move lead opening to review state using sudo account
+   */
+  async devBeginLeadOpeningReview(openingId) {
+    const beginReviewTx = this.devMakeBeginOpeningReviewTx(openingId)
+    const sudoTx = this.base.api.tx.sudo.sudo(beginReviewTx)
+    return this.base.signAndSend(await this.base.identities.getSudoAccount(), sudoTx)
+  }
+
+  /*
+   * Move a storage opening to review state using lead account
+   */
+  async devBeginStorageOpeningReview(openingId) {
+    const beginReviewTx = this.devMakeBeginOpeningReviewTx(openingId)
+    return this.base.signAndSend(await this.getLeadRoleAccount(), beginReviewTx)
+  }
+
+  /*
+   * Constructs a beingApplicantReview tx for openingId, which puts an opening into the review state
+   */
+  devMakeBeginOpeningReviewTx(openingId) {
+    return this.base.api.tx.storageWorkingGroup.beginApplicantReview(openingId)
+  }
+
+  /*
+   * Fill a lead opening, return the assigned worker id, using the sudo account
+   */
+  async devFillLeadOpening(openingId, applicationId) {
+    const fillTx = this.devMakeFillOpeningTx(openingId, applicationId)
+    const sudoTx = this.base.api.tx.sudo.sudo(fillTx)
+    const filled = await this.devSubmitFillOpeningTx(await this.base.identities.getSudoAccount(), sudoTx)
+    return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
+  }
+
+  /*
+   * Fill a storage opening, return the assigned worker id, using the lead account
+   */
+  async devFillStorageOpening(openingId, applicationId) {
+    const fillTx = this.devMakeFillOpeningTx(openingId, applicationId)
+    const filled = await this.devSubmitFillOpeningTx(await this.getLeadRoleAccount(), fillTx)
+    return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
+  }
+
+  /*
+   * Constructs a FillOpening transaction
+   */
+  devMakeFillOpeningTx(openingId, applicationId) {
+    return this.base.api.tx.storageWorkingGroup.fillOpening(openingId, [applicationId], null)
+  }
+
+  /*
+   * Dispatches a fill opening tx and returns a map of the application id to their new assigned worker ids.
+   */
+  async devSubmitFillOpeningTx(senderAccount, tx) {
+    return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
+      eventModule: 'storageWorkingGroup',
+      eventName: 'OpeningFilled',
+      eventProperty: 'ApplicationIdToWorkerIdMap',
+    })
+  }
 }
 
 /*
@@ -274,28 +272,28 @@ class WorkersApi {
  * contain at least one entry.
  */
 function getWorkerIdFromApplicationIdToWorkerIdMap(filledMap, applicationId) {
-	if (filledMap.size === 0) {
-		throw new Error('Expected opening to be filled!')
-	}
+  if (filledMap.size === 0) {
+    throw new Error('Expected opening to be filled!')
+  }
 
-	let ourApplicationIdKey
+  let ourApplicationIdKey
 
-	for (const key of filledMap.keys()) {
-		if (key.eq(applicationId)) {
-			ourApplicationIdKey = key
-			break
-		}
-	}
+  for (const key of filledMap.keys()) {
+    if (key.eq(applicationId)) {
+      ourApplicationIdKey = key
+      break
+    }
+  }
 
-	if (!ourApplicationIdKey) {
-		throw new Error('Expected application id to have been filled!')
-	}
+  if (!ourApplicationIdKey) {
+    throw new Error('Expected application id to have been filled!')
+  }
 
-	const workerId = filledMap.get(ourApplicationIdKey)
+  const workerId = filledMap.get(ourApplicationIdKey)
 
-	return workerId
+  return workerId
 }
 
 module.exports = {
-	WorkersApi,
+  WorkersApi,
 }

+ 72 - 72
storage-node/packages/storage/filter.js

@@ -26,57 +26,57 @@ const DEFAULT_REJECT_TYPES = []
 
 // Configuration defaults
 function configDefaults(config) {
-	const filter = config.filter || {}
+  const filter = config.filter || {}
 
-	// We accept zero as switching this check off.
-	if (typeof filter.max_size === 'undefined') {
-		filter.max_size = DEFAULT_MAX_FILE_SIZE
-	}
+  // We accept zero as switching this check off.
+  if (typeof filter.max_size === 'undefined') {
+    filter.max_size = DEFAULT_MAX_FILE_SIZE
+  }
 
-	// Figure out mime types
-	filter.mime = filter.mime || []
-	filter.mime.accept = filter.mime.accept || DEFAULT_ACCEPT_TYPES
-	filter.mime.reject = filter.mime.reject || DEFAULT_REJECT_TYPES
+  // Figure out mime types
+  filter.mime = filter.mime || []
+  filter.mime.accept = filter.mime.accept || DEFAULT_ACCEPT_TYPES
+  filter.mime.reject = filter.mime.reject || DEFAULT_REJECT_TYPES
 
-	return filter
+  return filter
 }
 
 // Mime type matching
 function mimeMatches(acceptable, provided) {
-	if (acceptable.endsWith('*')) {
-		// Wildcard match
-		const prefix = acceptable.slice(0, acceptable.length - 1)
-		debug('wildcard matching', provided, 'against', acceptable, '/', prefix)
-		return provided.startsWith(prefix)
-	}
-	// Exact match
-	debug('exact matching', provided, 'against', acceptable)
-	return provided === acceptable
+  if (acceptable.endsWith('*')) {
+    // Wildcard match
+    const prefix = acceptable.slice(0, acceptable.length - 1)
+    debug('wildcard matching', provided, 'against', acceptable, '/', prefix)
+    return provided.startsWith(prefix)
+  }
+  // Exact match
+  debug('exact matching', provided, 'against', acceptable)
+  return provided === acceptable
 }
 
 function mimeMatchesAny(accept, reject, provided) {
-	// Pass accept
-	let accepted = false
-	for (const item of accept) {
-		if (mimeMatches(item, provided)) {
-			debug('Content type matches', item, 'which is acceptable.')
-			accepted = true
-			break
-		}
-	}
-	if (!accepted) {
-		return false
-	}
-
-	// Don't pass reject
-	for (const item of reject) {
-		if (mimeMatches(item, provided)) {
-			debug('Content type matches', item, 'which is unacceptable.')
-			return false
-		}
-	}
-
-	return true
+  // Pass accept
+  let accepted = false
+  for (const item of accept) {
+    if (mimeMatches(item, provided)) {
+      debug('Content type matches', item, 'which is acceptable.')
+      accepted = true
+      break
+    }
+  }
+  if (!accepted) {
+    return false
+  }
+
+  // Don't pass reject
+  for (const item of reject) {
+    if (mimeMatches(item, provided)) {
+      debug('Content type matches', item, 'which is unacceptable.')
+      return false
+    }
+  }
+
+  return true
 }
 
 /**
@@ -92,37 +92,37 @@ function mimeMatchesAny(accept, reject, provided) {
  * @return {object} HTTP status code and error message.
  **/
 function filterFunc(config, headers, mimeType) {
-	const filter = configDefaults(config)
-
-	// Enforce maximum file upload size
-	if (filter.max_size) {
-		const size = parseInt(headers['content-length'], 10)
-		if (!size) {
-			return {
-				code: 411,
-				message: 'A Content-Length header is required.',
-			}
-		}
-
-		if (size > filter.max_size) {
-			return {
-				code: 413,
-				message: 'The provided Content-Length is too large.',
-			}
-		}
-	}
-
-	// Enforce mime type based filtering
-	if (!mimeMatchesAny(filter.mime.accept, filter.mime.reject, mimeType)) {
-		return {
-			code: 415,
-			message: 'Content has an unacceptable MIME type.',
-		}
-	}
-
-	return {
-		code: 200,
-	}
+  const filter = configDefaults(config)
+
+  // Enforce maximum file upload size
+  if (filter.max_size) {
+    const size = parseInt(headers['content-length'], 10)
+    if (!size) {
+      return {
+        code: 411,
+        message: 'A Content-Length header is required.',
+      }
+    }
+
+    if (size > filter.max_size) {
+      return {
+        code: 413,
+        message: 'The provided Content-Length is too large.',
+      }
+    }
+  }
+
+  // Enforce mime type based filtering
+  if (!mimeMatchesAny(filter.mime.accept, filter.mime.reject, mimeType)) {
+    return {
+      code: 415,
+      message: 'Content has an unacceptable MIME type.',
+    }
+  }
+
+  return {
+    code: 200,
+  }
 }
 
 module.exports = filterFunc

+ 1 - 1
storage-node/packages/storage/index.js

@@ -21,5 +21,5 @@
 const { Storage } = require('./storage')
 
 module.exports = {
-	Storage,
+  Storage,
 }

+ 311 - 311
storage-node/packages/storage/storage.js

@@ -26,7 +26,7 @@ const debug = require('debug')('joystream:storage:storage')
 const Promise = require('bluebird')
 
 Promise.config({
-	cancellation: true,
+  cancellation: true,
 })
 
 const fileType = require('file-type')
@@ -39,15 +39,15 @@ const _ = require('lodash')
 const DEFAULT_TIMEOUT = 30 * 1000
 
 // Default/dummy resolution implementation.
-const DEFAULT_RESOLVE_CONTENT_ID = async (original) => {
-	debug('Warning: Default resolution returns original CID', original)
-	return original
+const DEFAULT_RESOLVE_CONTENT_ID = async original => {
+  debug('Warning: Default resolution returns original CID', original)
+  return original
 }
 
 // Default file info if nothing could be detected.
 const DEFAULT_FILE_INFO = {
-	mimeType: 'application/octet-stream',
-	ext: 'bin',
+  mimeType: 'application/octet-stream',
+  ext: 'bin',
 }
 
 /*
@@ -58,20 +58,20 @@ const DEFAULT_FILE_INFO = {
  * go wrong.
  */
 function fixFileInfo(info) {
-	if (!info) {
-		info = DEFAULT_FILE_INFO
-	} else {
-		info.mimeType = info.mime
-		delete info.mime
-	}
-	return info
+  if (!info) {
+    info = DEFAULT_FILE_INFO
+  } else {
+    info.mimeType = info.mime
+    delete info.mime
+  }
+  return info
 }
 
 function fixFileInfoOnStream(stream) {
-	const info = fixFileInfo(stream.fileType)
-	delete stream.fileType
-	stream.fileInfo = info
-	return stream
+  const info = fixFileInfo(stream.fileType)
+  delete stream.fileType
+  stream.fileInfo = info
+  return stream
 }
 
 /*
@@ -79,100 +79,100 @@ function fixFileInfoOnStream(stream) {
  * MIME type detection, and a commit() function.
  */
 class StorageWriteStream extends Transform {
-	constructor(storage, options) {
-		options = _.clone(options || {})
-
-		super(options)
-
-		this.storage = storage
-
-		// Create temp target.
-		this.temp = temp.createWriteStream()
-		this.buf = Buffer.alloc(0)
-	}
-
-	_transform(chunk, encoding, callback) {
-		// Deal with buffers only
-		if (typeof chunk === 'string') {
-			chunk = Buffer.from(chunk)
-		}
-
-		// Logging this all the time is too verbose
-		// debug('Writing temporary chunk', chunk.length, chunk);
-		this.temp.write(chunk)
-
-		// Try to detect file type during streaming.
-		if (!this.fileInfo && this.buf < fileType.minimumBytes) {
-			this.buf = Buffer.concat([this.buf, chunk])
-
-			if (this.buf >= fileType.minimumBytes) {
-				const info = fileType(this.buf)
-				// No info? We can try again at the end of the stream.
-				if (info) {
-					this.fileInfo = fixFileInfo(info)
-					this.emit('fileInfo', this.fileInfo)
-				}
-			}
-		}
-
-		callback(null)
-	}
-
-	_flush(callback) {
-		debug('Flushing temporary stream:', this.temp.path)
-		this.temp.end()
-
-		// Since we're finished, we can try to detect the file type again.
-		if (!this.fileInfo) {
-			const read = fs.createReadStream(this.temp.path)
-			fileType
-				.stream(read)
-				.then((stream) => {
-					this.fileInfo = fixFileInfoOnStream(stream).fileInfo
-					this.emit('fileInfo', this.fileInfo)
-				})
-				.catch((err) => {
-					debug('Error trying to detect file type at end-of-stream:', err)
-				})
-		}
-
-		callback(null)
-	}
-
-	/*
-	 * Commit this stream to the IPFS backend.
-	 */
-	commit() {
-		// Create a read stream from the temp file.
-		if (!this.temp) {
-			throw new Error('Cannot commit a temporary stream that does not exist. Did you call cleanup()?')
-		}
-
-		debug('Committing temporary stream: ', this.temp.path)
-		this.storage.ipfs
-			.addFromFs(this.temp.path)
-			.then(async (result) => {
-				const hash = result[0].hash
-				debug('Stream committed as', hash)
-				this.emit('committed', hash)
-				await this.storage.ipfs.pin.add(hash)
-			})
-			.catch((err) => {
-				debug('Error committing stream', err)
-				this.emit('error', err)
-			})
-	}
-
-	/*
-	 * Clean up temporary data.
-	 */
-	cleanup() {
-		debug('Cleaning up temporary file: ', this.temp.path)
-		fs.unlink(this.temp.path, () => {
-			/* Ignore errors.*/
-		})
-		delete this.temp
-	}
+  constructor(storage, options) {
+    options = _.clone(options || {})
+
+    super(options)
+
+    this.storage = storage
+
+    // Create temp target.
+    this.temp = temp.createWriteStream()
+    this.buf = Buffer.alloc(0)
+  }
+
+  _transform(chunk, encoding, callback) {
+    // Deal with buffers only
+    if (typeof chunk === 'string') {
+      chunk = Buffer.from(chunk)
+    }
+
+    // Logging this all the time is too verbose
+    // debug('Writing temporary chunk', chunk.length, chunk);
+    this.temp.write(chunk)
+
+    // Try to detect file type during streaming.
+    if (!this.fileInfo && this.buf < fileType.minimumBytes) {
+      this.buf = Buffer.concat([this.buf, chunk])
+
+      if (this.buf >= fileType.minimumBytes) {
+        const info = fileType(this.buf)
+        // No info? We can try again at the end of the stream.
+        if (info) {
+          this.fileInfo = fixFileInfo(info)
+          this.emit('fileInfo', this.fileInfo)
+        }
+      }
+    }
+
+    callback(null)
+  }
+
+  _flush(callback) {
+    debug('Flushing temporary stream:', this.temp.path)
+    this.temp.end()
+
+    // Since we're finished, we can try to detect the file type again.
+    if (!this.fileInfo) {
+      const read = fs.createReadStream(this.temp.path)
+      fileType
+        .stream(read)
+        .then(stream => {
+          this.fileInfo = fixFileInfoOnStream(stream).fileInfo
+          this.emit('fileInfo', this.fileInfo)
+        })
+        .catch(err => {
+          debug('Error trying to detect file type at end-of-stream:', err)
+        })
+    }
+
+    callback(null)
+  }
+
+  /*
+   * Commit this stream to the IPFS backend.
+   */
+  commit() {
+    // Create a read stream from the temp file.
+    if (!this.temp) {
+      throw new Error('Cannot commit a temporary stream that does not exist. Did you call cleanup()?')
+    }
+
+    debug('Committing temporary stream: ', this.temp.path)
+    this.storage.ipfs
+      .addFromFs(this.temp.path)
+      .then(async result => {
+        const hash = result[0].hash
+        debug('Stream committed as', hash)
+        this.emit('committed', hash)
+        await this.storage.ipfs.pin.add(hash)
+      })
+      .catch(err => {
+        debug('Error committing stream', err)
+        this.emit('error', err)
+      })
+  }
+
+  /*
+   * Clean up temporary data.
+   */
+  cleanup() {
+    debug('Cleaning up temporary file: ', this.temp.path)
+    fs.unlink(this.temp.path, () => {
+      /* Ignore errors.*/
+    })
+    delete this.temp
+  }
 }
 
 /*
@@ -184,207 +184,207 @@ class StorageWriteStream extends Transform {
  *   store.open(...);
  */
 class Storage {
-	/*
-	 * Create a Storage instance. Options include:
-	 *
-	 * - an `ipfs` property, which is itself a hash containing
-	 *   - `connect_options` to be passed to the IPFS client library for
-	 *     connecting to an IPFS node.
-	 * - a `resolve_content_id` function, which translates Joystream
-	 *   content IDs to IPFS content IDs or vice versa. The default is to
-	 *   not perform any translation, which is not practical for a production
-	 *   system, but serves its function during development and testing. The
-	 *   function must be asynchronous.
-	 * - a `timeout` parameter, defaulting to DEFAULT_TIMEOUT. After this time,
-	 *   requests to the IPFS backend time out.
-	 *
-	 * Functions in this class accept an optional timeout parameter. If the
-	 * timeout is given, it is used - otherwise, the `option.timeout` value
-	 * above is used.
-	 */
-	static create(options) {
-		const storage = new Storage()
-		storage._init(options)
-		return storage
-	}
-
-	_init(options) {
-		this.options = _.clone(options || {})
-		this.options.ipfs = this.options.ipfs || {}
-
-		this._timeout = this.options.timeout || DEFAULT_TIMEOUT
-		this._resolve_content_id = this.options.resolve_content_id || DEFAULT_RESOLVE_CONTENT_ID
-
-		this.ipfs = ipfsClient(this.options.ipfs.connect_options)
-
-		this.pins = {}
-
-		this.ipfs.id((err, identity) => {
-			if (err) {
-				debug(`Warning IPFS daemon not running: ${err.message}`)
-			} else {
-				debug(`IPFS node is up with identity: ${identity.id}`)
-			}
-		})
-	}
-
-	/*
-	 * Uses bluebird's timeout mechanism to return a Promise that times out after
-	 * the given timeout interval, and tries to execute the given operation within
-	 * that time.
-	 */
-	async withSpecifiedTimeout(timeout, operation) {
-		return new Promise(async (resolve, reject) => {
-			try {
-				resolve(await new Promise(operation))
-			} catch (err) {
-				reject(err)
-			}
-		}).timeout(timeout || this._timeout)
-	}
-
-	/*
-	 * Resolve content ID with timeout.
-	 */
-	async resolveContentIdWithTimeout(timeout, contentId) {
-		return await this.withSpecifiedTimeout(timeout, async (resolve, reject) => {
-			try {
-				resolve(await this._resolve_content_id(contentId))
-			} catch (err) {
-				reject(err)
-			}
-		})
-	}
-
-	/*
-	 * Stat a content ID.
-	 */
-	async stat(contentId, timeout) {
-		const resolved = await this.resolveContentIdWithTimeout(timeout, contentId)
-
-		return await this.withSpecifiedTimeout(timeout, (resolve, reject) => {
-			this.ipfs.files.stat(`/ipfs/${resolved}`, { withLocal: true }, (err, res) => {
-				if (err) {
-					reject(err)
-					return
-				}
-				resolve(res)
-			})
-		})
-	}
-
-	/*
-	 * Return the size of a content ID.
-	 */
-	async size(contentId, timeout) {
-		const stat = await this.stat(contentId, timeout)
-		return stat.size
-	}
-
-	/*
-	 * Opens the specified content in read or write mode, and returns a Promise
-	 * with the stream.
-	 *
-	 * Read streams will contain a fileInfo property, with:
-	 *  - a `mimeType` field providing the file's MIME type, or a default.
-	 *  - an `ext` property, providing a file extension suggestion, or a default.
-	 *
-	 * Write streams have a slightly different flow, in order to allow for MIME
-	 * type detection and potential filtering. First off, they are written to a
-	 * temporary location, and only committed to the backend once their
-	 * `commit()` function is called.
-	 *
-	 * When the commit has finished, a `committed` event is emitted, which
-	 * contains the IPFS backend's content ID.
-	 *
-	 * Write streams also emit a `fileInfo` event during writing. It is passed
-	 * the `fileInfo` field as described above. Event listeners may now opt to
-	 * abort the write or continue and eventually `commit()` the file. There is
-	 * an explicit `cleanup()` function that removes temporary files as well,
-	 * in case comitting is not desired.
-	 */
-	async open(contentId, mode, timeout) {
-		if (mode !== 'r' && mode !== 'w') {
-			throw Error('The only supported modes are "r", "w" and "a".')
-		}
-
-		// Write stream
-		if (mode === 'w') {
-			return await this.createWriteStream(contentId, timeout)
-		}
-
-		// Read stream - with file type detection
-		return await this.createReadStream(contentId, timeout)
-	}
-
-	async createWriteStream() {
-		// IPFS wants us to just dump a stream into its storage, then returns a
-		// content ID (of its own).
-		// We need to instead return a stream immediately, that we eventually
-		// decorate with the content ID when that's available.
-		return new Promise((resolve) => {
-			const stream = new StorageWriteStream(this)
-			resolve(stream)
-		})
-	}
-
-	async createReadStream(contentId, timeout) {
-		const resolved = await this.resolveContentIdWithTimeout(timeout, contentId)
-
-		let found = false
-		return await this.withSpecifiedTimeout(timeout, (resolve, reject) => {
-			const ls = this.ipfs.getReadableStream(resolved)
-			ls.on('data', async (result) => {
-				if (result.path === resolved) {
-					found = true
-
-					const ftStream = await fileType.stream(result.content)
-					resolve(fixFileInfoOnStream(ftStream))
-				}
-			})
-			ls.on('error', (err) => {
-				ls.end()
-				debug(err)
-				reject(err)
-			})
-			ls.on('end', () => {
-				if (!found) {
-					const err = new Error('No matching content found for', contentId)
-					debug(err)
-					reject(err)
-				}
-			})
-			ls.resume()
-		})
-	}
-
-	/*
-	 * Synchronize the given content ID
-	 */
-	async synchronize(contentId) {
-		const resolved = await this.resolveContentIdWithTimeout(this._timeout, contentId)
-
-		// validate resolved id is proper ipfs_cid, not null or empty string
-
-		if (this.pins[resolved]) {
-			return
-		}
-
-		debug(`Pinning ${resolved}`)
-
-		// This call blocks until file is retrieved..
-		this.ipfs.pin.add(resolved, { quiet: true, pin: true }, (err) => {
-			if (err) {
-				debug(`Error Pinning: ${resolved}`)
-				delete this.pins[resolved]
-			} else {
-				debug(`Pinned ${resolved}`)
-				// why aren't we doing this.pins[resolved] = true
-			}
-		})
-	}
+  /*
+   * Create a Storage instance. Options include:
+   *
+   * - an `ipfs` property, which is itself a hash containing
+   *   - `connect_options` to be passed to the IPFS client library for
+   *     connecting to an IPFS node.
+   * - a `resolve_content_id` function, which translates Joystream
+   *   content IDs to IPFS content IDs or vice versa. The default is to
+   *   not perform any translation, which is not practical for a production
+   *   system, but serves its function during development and testing. The
+   *   function must be asynchronous.
+   * - a `timeout` parameter, defaulting to DEFAULT_TIMEOUT. After this time,
+   *   requests to the IPFS backend time out.
+   *
+   * Functions in this class accept an optional timeout parameter. If the
+   * timeout is given, it is used - otherwise, the `option.timeout` value
+   * above is used.
+   */
+  static create(options) {
+    const storage = new Storage()
+    storage._init(options)
+    return storage
+  }
+
+  _init(options) {
+    this.options = _.clone(options || {})
+    this.options.ipfs = this.options.ipfs || {}
+
+    this._timeout = this.options.timeout || DEFAULT_TIMEOUT
+    this._resolve_content_id = this.options.resolve_content_id || DEFAULT_RESOLVE_CONTENT_ID
+
+    this.ipfs = ipfsClient(this.options.ipfs.connect_options)
+
+    this.pins = {}
+
+    this.ipfs.id((err, identity) => {
+      if (err) {
+        debug(`Warning IPFS daemon not running: ${err.message}`)
+      } else {
+        debug(`IPFS node is up with identity: ${identity.id}`)
+      }
+    })
+  }
+
+  /*
+   * Uses bluebird's timeout mechanism to return a Promise that times out after
+   * the given timeout interval, and tries to execute the given operation within
+   * that time.
+   */
+  async withSpecifiedTimeout(timeout, operation) {
+    return new Promise(async (resolve, reject) => {
+      try {
+        resolve(await new Promise(operation))
+      } catch (err) {
+        reject(err)
+      }
+    }).timeout(timeout || this._timeout)
+  }
+
+  /*
+   * Resolve content ID with timeout.
+   */
+  async resolveContentIdWithTimeout(timeout, contentId) {
+    return await this.withSpecifiedTimeout(timeout, async (resolve, reject) => {
+      try {
+        resolve(await this._resolve_content_id(contentId))
+      } catch (err) {
+        reject(err)
+      }
+    })
+  }
+
+  /*
+   * Stat a content ID.
+   */
+  async stat(contentId, timeout) {
+    const resolved = await this.resolveContentIdWithTimeout(timeout, contentId)
+
+    return await this.withSpecifiedTimeout(timeout, (resolve, reject) => {
+      this.ipfs.files.stat(`/ipfs/${resolved}`, { withLocal: true }, (err, res) => {
+        if (err) {
+          reject(err)
+          return
+        }
+        resolve(res)
+      })
+    })
+  }
+
+  /*
+   * Return the size of a content ID.
+   */
+  async size(contentId, timeout) {
+    const stat = await this.stat(contentId, timeout)
+    return stat.size
+  }
+
+  /*
+   * Opens the specified content in read or write mode, and returns a Promise
+   * with the stream.
+   *
+   * Read streams will contain a fileInfo property, with:
+   *  - a `mimeType` field providing the file's MIME type, or a default.
+   *  - an `ext` property, providing a file extension suggestion, or a default.
+   *
+   * Write streams have a slightly different flow, in order to allow for MIME
+   * type detection and potential filtering. First off, they are written to a
+   * temporary location, and only committed to the backend once their
+   * `commit()` function is called.
+   *
+   * When the commit has finished, a `committed` event is emitted, which
+   * contains the IPFS backend's content ID.
+   *
+   * Write streams also emit a `fileInfo` event during writing. It is passed
+   * the `fileInfo` field as described above. Event listeners may now opt to
+   * abort the write or continue and eventually `commit()` the file. There is
+   * an explicit `cleanup()` function that removes temporary files as well,
+   * in case comitting is not desired.
+   */
+  async open(contentId, mode, timeout) {
+    if (mode !== 'r' && mode !== 'w') {
+      throw Error('The only supported modes are "r", "w" and "a".')
+    }
+
+    // Write stream
+    if (mode === 'w') {
+      return await this.createWriteStream(contentId, timeout)
+    }
+
+    // Read stream - with file type detection
+    return await this.createReadStream(contentId, timeout)
+  }
+
+  async createWriteStream() {
+    // IPFS wants us to just dump a stream into its storage, then returns a
+    // content ID (of its own).
+    // We need to instead return a stream immediately, that we eventually
+    // decorate with the content ID when that's available.
+    return new Promise(resolve => {
+      const stream = new StorageWriteStream(this)
+      resolve(stream)
+    })
+  }
+
+  async createReadStream(contentId, timeout) {
+    const resolved = await this.resolveContentIdWithTimeout(timeout, contentId)
+
+    let found = false
+    return await this.withSpecifiedTimeout(timeout, (resolve, reject) => {
+      const ls = this.ipfs.getReadableStream(resolved)
+      ls.on('data', async result => {
+        if (result.path === resolved) {
+          found = true
+
+          const ftStream = await fileType.stream(result.content)
+          resolve(fixFileInfoOnStream(ftStream))
+        }
+      })
+      ls.on('error', err => {
+        ls.end()
+        debug(err)
+        reject(err)
+      })
+      ls.on('end', () => {
+        if (!found) {
+          const err = new Error('No matching content found for', contentId)
+          debug(err)
+          reject(err)
+        }
+      })
+      ls.resume()
+    })
+  }
+
+  /*
+   * Synchronize the given content ID
+   */
+  async synchronize(contentId) {
+    const resolved = await this.resolveContentIdWithTimeout(this._timeout, contentId)
+
+    // validate resolved id is proper ipfs_cid, not null or empty string
+
+    if (this.pins[resolved]) {
+      return
+    }
+
+    debug(`Pinning ${resolved}`)
+
+    // This call blocks until file is retrieved..
+    this.ipfs.pin.add(resolved, { quiet: true, pin: true }, err => {
+      if (err) {
+        debug(`Error Pinning: ${resolved}`)
+        delete this.pins[resolved]
+      } else {
+        debug(`Pinned ${resolved}`)
+        // why aren't we doing this.pins[resolved] = true
+      }
+    })
+  }
 }
 
 module.exports = {
-	Storage,
+  Storage,
 }

+ 179 - 179
storage-node/packages/storage/test/storage.js

@@ -31,192 +31,192 @@ const { Storage } = require('@joystream/storage-node-backend')
 const IPFS_CID_REGEX = /^Qm[1-9A-HJ-NP-Za-km-z]{44}$/
 
 function write(store, contentId, contents, callback) {
-	store
-		.open(contentId, 'w')
-		.then((stream) => {
-			stream.on('finish', () => {
-				stream.commit()
-			})
-			stream.on('committed', callback)
-
-			if (!stream.write(contents)) {
-				stream.once('drain', () => stream.end())
-			} else {
-				process.nextTick(() => stream.end())
-			}
-		})
-		.catch((err) => {
-			expect.fail(err)
-		})
+  store
+    .open(contentId, 'w')
+    .then(stream => {
+      stream.on('finish', () => {
+        stream.commit()
+      })
+      stream.on('committed', callback)
+
+      if (!stream.write(contents)) {
+        stream.once('drain', () => stream.end())
+      } else {
+        process.nextTick(() => stream.end())
+      }
+    })
+    .catch(err => {
+      expect.fail(err)
+    })
 }
 
 function readAll(stream) {
-	return new Promise((resolve, reject) => {
-		const chunks = []
-		stream.on('data', (chunk) => chunks.push(chunk))
-		stream.on('end', () => resolve(Buffer.concat(chunks)))
-		stream.on('error', (err) => reject(err))
-		stream.resume()
-	})
+  return new Promise((resolve, reject) => {
+    const chunks = []
+    stream.on('data', chunk => chunks.push(chunk))
+    stream.on('end', () => resolve(Buffer.concat(chunks)))
+    stream.on('error', err => reject(err))
+    stream.resume()
+  })
 }
 
 function createKnownObject(contentId, contents, callback) {
-	let hash
-	const store = Storage.create({
-		resolve_content_id: () => {
-			return hash
-		},
-	})
-
-	write(store, contentId, contents, (theHash) => {
-		hash = theHash
-
-		callback(store, hash)
-	})
+  let hash
+  const store = Storage.create({
+    resolve_content_id: () => {
+      return hash
+    },
+  })
+
+  write(store, contentId, contents, theHash => {
+    hash = theHash
+
+    callback(store, hash)
+  })
 }
 
 describe('storage/storage', () => {
-	let storage
-	before(async () => {
-		storage = await Storage.create({ timeout: 1900 })
-	})
-
-	describe('open()', () => {
-		it('can write a stream', (done) => {
-			write(storage, 'foobar', 'test-content', (hash) => {
-				expect(hash).to.not.be.undefined
-				expect(hash).to.match(IPFS_CID_REGEX)
-				done()
-			})
-		})
-
-		// it('detects the MIME type of a write stream', (done) => {
-		// 	const contents = fs.readFileSync('../../storage-node_new.svg')
-		// 	storage
-		// 		.open('mime-test', 'w')
-		// 		.then((stream) => {
-		// 			let fileInfo
-		// 			stream.on('fileInfo', (info) => {
-		// 				// Could filter & abort here now, but we're just going to set this,
-		// 				// and expect it to be set later...
-		// 				fileInfo = info
-		// 			})
-		//
-		// 			stream.on('finish', () => {
-		// 				stream.commit()
-		// 			})
-		//
-		// 			stream.on('committed', () => {
-		// 				// ... if fileInfo is not set here, there's an issue.
-		// 				expect(fileInfo).to.have.property('mimeType', 'application/xml')
-		// 				expect(fileInfo).to.have.property('ext', 'xml')
-		// 				done()
-		// 			})
-		//
-		// 			if (!stream.write(contents)) {
-		// 				stream.once('drain', () => stream.end())
-		// 			} else {
-		// 				process.nextTick(() => stream.end())
-		// 			}
-		// 		})
-		// 		.catch((err) => {
-		// 			expect.fail(err)
-		// 		})
-		// })
-
-		it('can read a stream', (done) => {
-			const contents = 'test-for-reading'
-			createKnownObject('foobar', contents, (store) => {
-				store
-					.open('foobar', 'r')
-					.then(async (stream) => {
-						const data = await readAll(stream)
-						expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0)
-						done()
-					})
-					.catch((err) => {
-						expect.fail(err)
-					})
-			})
-		})
-
-		it('detects the MIME type of a read stream', (done) => {
-			const contents = fs.readFileSync('../../storage-node_new.svg')
-			createKnownObject('foobar', contents, (store) => {
-				store
-					.open('foobar', 'r')
-					.then(async (stream) => {
-						const data = await readAll(stream)
-						expect(contents.length).to.equal(data.length)
-						expect(Buffer.compare(data, contents)).to.equal(0)
-						expect(stream).to.have.property('fileInfo')
-
-						// application/xml+svg would be better, but this is good-ish.
-						expect(stream.fileInfo).to.have.property('mimeType', 'application/xml')
-						expect(stream.fileInfo).to.have.property('ext', 'xml')
-						done()
-					})
-					.catch((err) => {
-						expect.fail(err)
-					})
-			})
-		})
-
-		it('provides default MIME type for read streams', (done) => {
-			const contents = 'test-for-reading'
-			createKnownObject('foobar', contents, (store) => {
-				store
-					.open('foobar', 'r')
-					.then(async (stream) => {
-						const data = await readAll(stream)
-						expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0)
-
-						expect(stream.fileInfo).to.have.property('mimeType', 'application/octet-stream')
-						expect(stream.fileInfo).to.have.property('ext', 'bin')
-						done()
-					})
-					.catch((err) => {
-						expect.fail(err)
-					})
-			})
-		})
-	})
-
-	describe('stat()', () => {
-		it('times out for unknown content', async () => {
-			const content = Buffer.from('this-should-not-exist')
-			const x = await storage.ipfs.add(content, { onlyHash: true })
-			const hash = x[0].hash
-
-			// Try to stat this entry, it should timeout.
-			expect(storage.stat(hash)).to.eventually.be.rejectedWith('timed out')
-		})
-
-		it('returns stats for a known object', (done) => {
-			const content = 'stat-test'
-			const expectedSize = content.length
-			createKnownObject('foobar', content, (store, hash) => {
-				expect(store.stat(hash)).to.eventually.have.property('size', expectedSize)
-				done()
-			})
-		})
-	})
-
-	describe('size()', () => {
-		it('times out for unknown content', async () => {
-			const content = Buffer.from('this-should-not-exist')
-			const x = await storage.ipfs.add(content, { onlyHash: true })
-			const hash = x[0].hash
-
-			// Try to stat this entry, it should timeout.
-			expect(storage.size(hash)).to.eventually.be.rejectedWith('timed out')
-		})
-
-		it('returns the size of a known object', (done) => {
-			createKnownObject('foobar', 'stat-test', (store, hash) => {
-				expect(store.size(hash)).to.eventually.equal(15)
-				done()
-			})
-		})
-	})
+  let storage
+  before(async () => {
+    storage = await Storage.create({ timeout: 1900 })
+  })
+
+  describe('open()', () => {
+    it('can write a stream', done => {
+      write(storage, 'foobar', 'test-content', hash => {
+        expect(hash).to.not.be.undefined
+        expect(hash).to.match(IPFS_CID_REGEX)
+        done()
+      })
+    })
+
+    // it('detects the MIME type of a write stream', (done) => {
+    // 	const contents = fs.readFileSync('../../storage-node_new.svg')
+    // 	storage
+    // 		.open('mime-test', 'w')
+    // 		.then((stream) => {
+    // 			let fileInfo
+    // 			stream.on('fileInfo', (info) => {
+    // 				// Could filter & abort here now, but we're just going to set this,
+    // 				// and expect it to be set later...
+    // 				fileInfo = info
+    // 			})
+    //
+    // 			stream.on('finish', () => {
+    // 				stream.commit()
+    // 			})
+    //
+    // 			stream.on('committed', () => {
+    // 				// ... if fileInfo is not set here, there's an issue.
+    // 				expect(fileInfo).to.have.property('mimeType', 'application/xml')
+    // 				expect(fileInfo).to.have.property('ext', 'xml')
+    // 				done()
+    // 			})
+    //
+    // 			if (!stream.write(contents)) {
+    // 				stream.once('drain', () => stream.end())
+    // 			} else {
+    // 				process.nextTick(() => stream.end())
+    // 			}
+    // 		})
+    // 		.catch((err) => {
+    // 			expect.fail(err)
+    // 		})
+    // })
+
+    it('can read a stream', done => {
+      const contents = 'test-for-reading'
+      createKnownObject('foobar', contents, store => {
+        store
+          .open('foobar', 'r')
+          .then(async stream => {
+            const data = await readAll(stream)
+            expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0)
+            done()
+          })
+          .catch(err => {
+            expect.fail(err)
+          })
+      })
+    })
+
+    it('detects the MIME type of a read stream', done => {
+      const contents = fs.readFileSync('../../storage-node_new.svg')
+      createKnownObject('foobar', contents, store => {
+        store
+          .open('foobar', 'r')
+          .then(async stream => {
+            const data = await readAll(stream)
+            expect(contents.length).to.equal(data.length)
+            expect(Buffer.compare(data, contents)).to.equal(0)
+            expect(stream).to.have.property('fileInfo')
+
+            // application/xml+svg would be better, but this is good-ish.
+            expect(stream.fileInfo).to.have.property('mimeType', 'application/xml')
+            expect(stream.fileInfo).to.have.property('ext', 'xml')
+            done()
+          })
+          .catch(err => {
+            expect.fail(err)
+          })
+      })
+    })
+
+    it('provides default MIME type for read streams', done => {
+      const contents = 'test-for-reading'
+      createKnownObject('foobar', contents, store => {
+        store
+          .open('foobar', 'r')
+          .then(async stream => {
+            const data = await readAll(stream)
+            expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0)
+
+            expect(stream.fileInfo).to.have.property('mimeType', 'application/octet-stream')
+            expect(stream.fileInfo).to.have.property('ext', 'bin')
+            done()
+          })
+          .catch(err => {
+            expect.fail(err)
+          })
+      })
+    })
+  })
+
+  describe('stat()', () => {
+    it('times out for unknown content', async () => {
+      const content = Buffer.from('this-should-not-exist')
+      const x = await storage.ipfs.add(content, { onlyHash: true })
+      const hash = x[0].hash
+
+      // Try to stat this entry, it should timeout.
+      expect(storage.stat(hash)).to.eventually.be.rejectedWith('timed out')
+    })
+
+    it('returns stats for a known object', done => {
+      const content = 'stat-test'
+      const expectedSize = content.length
+      createKnownObject('foobar', content, (store, hash) => {
+        expect(store.stat(hash)).to.eventually.have.property('size', expectedSize)
+        done()
+      })
+    })
+  })
+
+  describe('size()', () => {
+    it('times out for unknown content', async () => {
+      const content = Buffer.from('this-should-not-exist')
+      const x = await storage.ipfs.add(content, { onlyHash: true })
+      const hash = x[0].hash
+
+      // Try to stat this entry, it should timeout.
+      expect(storage.size(hash)).to.eventually.be.rejectedWith('timed out')
+    })
+
+    it('returns the size of a known object', done => {
+      createKnownObject('foobar', 'stat-test', (store, hash) => {
+        expect(store.size(hash)).to.eventually.equal(15)
+        done()
+      })
+    })
+  })
 })

+ 7 - 7
storage-node/packages/util/externalPromise.js

@@ -5,16 +5,16 @@
  * concurrent async operations are initiated that are all waiting on the same result value.
  */
 function newExternallyControlledPromise() {
-	let resolve, reject
+  let resolve, reject
 
-	const promise = new Promise((res, rej) => {
-		resolve = res
-		reject = rej
-	})
+  const promise = new Promise((res, rej) => {
+    resolve = res
+    reject = rej
+  })
 
-	return { resolve, reject, promise }
+  return { resolve, reject, promise }
 }
 
 module.exports = {
-	newExternallyControlledPromise,
+  newExternallyControlledPromise,
 }

+ 23 - 23
storage-node/packages/util/fs/resolve.js

@@ -32,34 +32,34 @@ const debug = require('debug')('joystream:util:fs:resolve')
  * a base directory.
  */
 function resolve(base, name) {
-	debug('Resolving', name)
+  debug('Resolving', name)
 
-	// In a firs step, we strip leading slashes from the name, because they're
-	// just saying "relative to the base" in our use case.
-	let res = name.replace(/^\/+/, '')
-	debug('Stripped', res)
+  // In a firs step, we strip leading slashes from the name, because they're
+  // just saying "relative to the base" in our use case.
+  let res = name.replace(/^\/+/, '')
+  debug('Stripped', res)
 
-	// At this point resolving the path should stay within the base we specify.
-	// We do specify a base other than the file system root, because the file
-	// everything is always relative to the file system root.
-	const testBase = path.join(path.sep, 'test-base')
-	debug('Test base is', testBase)
-	res = path.resolve(testBase, res)
-	debug('Resolved', res)
+  // At this point resolving the path should stay within the base we specify.
+  // We do specify a base other than the file system root, because the file
+  // everything is always relative to the file system root.
+  const testBase = path.join(path.sep, 'test-base')
+  debug('Test base is', testBase)
+  res = path.resolve(testBase, res)
+  debug('Resolved', res)
 
-	// Ok, we can check for violations now.
-	if (res.slice(0, testBase.length) !== testBase) {
-		throw Error(`Name "${name}" cannot be resolved to a repo relative path, aborting!`)
-	}
+  // Ok, we can check for violations now.
+  if (res.slice(0, testBase.length) !== testBase) {
+    throw Error(`Name "${name}" cannot be resolved to a repo relative path, aborting!`)
+  }
 
-	// If we strip the base now, we have the relative name resolved.
-	res = res.slice(testBase.length + 1)
-	debug('Relative', res)
+  // If we strip the base now, we have the relative name resolved.
+  res = res.slice(testBase.length + 1)
+  debug('Relative', res)
 
-	// Finally we can join this relative name to the requested base.
-	res = path.join(base, res)
-	debug('Result', res)
-	return res
+  // Finally we can join this relative name to the requested base.
+  res = path.join(base, res)
+  debug('Result', res)
+  return res
 }
 
 module.exports = resolve

+ 98 - 98
storage-node/packages/util/fs/walk.js

@@ -24,95 +24,95 @@ const path = require('path')
 const debug = require('debug')('joystream:util:fs:walk')
 
 class Walker {
-	constructor(archive, base, cb) {
-		this.archive = archive
-		this.base = base
-		this.slice_offset = this.base.length
-		if (this.base[this.slice_offset - 1] !== '/') {
-			this.slice_offset += 1
-		}
-		this.cb = cb
-		this.pending = 0
-	}
-
-	/*
-	 * Check pending
-	 */
-	checkPending(name) {
-		// Decrease pending count again.
-		this.pending -= 1
-		debug('Finishing', name, 'decreases pending to', this.pending)
-		if (!this.pending) {
-			debug('No more pending.')
-			this.cb(null)
-		}
-	}
-
-	/*
-	 * Helper function for walk; split out because it's used in two places.
-	 */
-	reportAndRecurse(relname, fname, lstat, linktarget) {
-		// First report the value
-		this.cb(null, relname, lstat, linktarget)
-
-		// Recurse
-		if (lstat.isDirectory()) {
-			this.walk(fname)
-		}
-
-		this.checkPending(fname)
-	}
-
-	walk(dir) {
-		// This is a little hacky - since readdir() may take a while, and we don't
-		// want the pending count to drop to zero before it's finished, we bump
-		// it up and down while readdir() does it's job.
-		// What this achieves is that when processing a parent directory finishes
-		// before walk() on a subdirectory could finish its readdir() call, the
-		// pending count still has a value.
-		// Note that in order not to hang on empty directories, we need to
-		// explicitly check the pending count in cases when there are no files.
-		this.pending += 1
-		this.archive.readdir(dir, (err, files) => {
-			if (err) {
-				this.cb(err)
-				return
-			}
-
-			// More pending data.
-			this.pending += files.length
-			debug('Reading', dir, 'bumps pending to', this.pending)
-
-			files.forEach((name) => {
-				const fname = path.resolve(dir, name)
-				this.archive.lstat(fname, (err2, lstat) => {
-					if (err2) {
-						this.cb(err2)
-						return
-					}
-
-					// The base is always prefixed, so a simple string slice should do.
-					const relname = fname.slice(this.slice_offset)
-
-					// We have a symbolic link? Resolve it.
-					if (lstat.isSymbolicLink()) {
-						this.archive.readlink(fname, (err3, linktarget) => {
-							if (err3) {
-								this.cb(err3)
-								return
-							}
-
-							this.reportAndRecurse(relname, fname, lstat, linktarget)
-						})
-					} else {
-						this.reportAndRecurse(relname, fname, lstat)
-					}
-				})
-			})
-
-			this.checkPending(dir)
-		})
-	}
+  constructor(archive, base, cb) {
+    this.archive = archive
+    this.base = base
+    this.slice_offset = this.base.length
+    if (this.base[this.slice_offset - 1] !== '/') {
+      this.slice_offset += 1
+    }
+    this.cb = cb
+    this.pending = 0
+  }
+
+  /*
+   * Check pending
+   */
+  checkPending(name) {
+    // Decrease pending count again.
+    this.pending -= 1
+    debug('Finishing', name, 'decreases pending to', this.pending)
+    if (!this.pending) {
+      debug('No more pending.')
+      this.cb(null)
+    }
+  }
+
+  /*
+   * Helper function for walk; split out because it's used in two places.
+   */
+  reportAndRecurse(relname, fname, lstat, linktarget) {
+    // First report the value
+    this.cb(null, relname, lstat, linktarget)
+
+    // Recurse
+    if (lstat.isDirectory()) {
+      this.walk(fname)
+    }
+
+    this.checkPending(fname)
+  }
+
+  walk(dir) {
+    // This is a little hacky - since readdir() may take a while, and we don't
+    // want the pending count to drop to zero before it's finished, we bump
+    // it up and down while readdir() does it's job.
+    // What this achieves is that when processing a parent directory finishes
+    // before walk() on a subdirectory could finish its readdir() call, the
+    // pending count still has a value.
+    // Note that in order not to hang on empty directories, we need to
+    // explicitly check the pending count in cases when there are no files.
+    this.pending += 1
+    this.archive.readdir(dir, (err, files) => {
+      if (err) {
+        this.cb(err)
+        return
+      }
+
+      // More pending data.
+      this.pending += files.length
+      debug('Reading', dir, 'bumps pending to', this.pending)
+
+      files.forEach(name => {
+        const fname = path.resolve(dir, name)
+        this.archive.lstat(fname, (err2, lstat) => {
+          if (err2) {
+            this.cb(err2)
+            return
+          }
+
+          // The base is always prefixed, so a simple string slice should do.
+          const relname = fname.slice(this.slice_offset)
+
+          // We have a symbolic link? Resolve it.
+          if (lstat.isSymbolicLink()) {
+            this.archive.readlink(fname, (err3, linktarget) => {
+              if (err3) {
+                this.cb(err3)
+                return
+              }
+
+              this.reportAndRecurse(relname, fname, lstat, linktarget)
+            })
+          } else {
+            this.reportAndRecurse(relname, fname, lstat)
+          }
+        })
+      })
+
+      this.checkPending(dir)
+    })
+  }
 }
 
 /*
@@ -127,13 +127,13 @@ class Walker {
  * The callback is invoked one last time without data to signal the end of data.
  */
 module.exports = function (base, archive, cb) {
-	// Archive is optional and defaults to fs, but cb is not.
-	if (!cb) {
-		cb = archive
-		archive = fs
-	}
-
-	const resolved = path.resolve(base)
-	const w = new Walker(archive, resolved, cb)
-	w.walk(resolved)
+  // Archive is optional and defaults to fs, but cb is not.
+  if (!cb) {
+    cb = archive
+    archive = fs
+  }
+
+  const resolved = path.resolve(base)
+  const w = new Walker(archive, resolved, cb)
+  w.walk(resolved)
 }

+ 75 - 75
storage-node/packages/util/lru.js

@@ -26,92 +26,92 @@ const debug = require('debug')('joystream:util:lru')
  * Simple least recently used cache.
  */
 class LRUCache {
-	constructor(capacity = DEFAULT_CAPACITY) {
-		this.capacity = capacity
-		this.clear()
-	}
+  constructor(capacity = DEFAULT_CAPACITY) {
+    this.capacity = capacity
+    this.clear()
+  }
 
-	/*
-	 * Return the entry with the given key, and update it's usage.
-	 */
-	get(key) {
-		const val = this.store.get(key)
-		if (val) {
-			this.access.set(key, Date.now())
-		}
-		return val
-	}
+  /*
+   * Return the entry with the given key, and update it's usage.
+   */
+  get(key) {
+    const val = this.store.get(key)
+    if (val) {
+      this.access.set(key, Date.now())
+    }
+    return val
+  }
 
-	/*
-	 * Return true if the key is the cache, false otherwise.
-	 */
-	has(key) {
-		return this.store.has(key)
-	}
+  /*
+   * Return true if the key is the cache, false otherwise.
+   */
+  has(key) {
+    return this.store.has(key)
+  }
 
-	/*
-	 * Put a value into the cache.
-	 */
-	put(key, value) {
-		this.store.set(key, value)
-		this.access.set(key, Date.now())
-		this._prune()
-	}
+  /*
+   * Put a value into the cache.
+   */
+  put(key, value) {
+    this.store.set(key, value)
+    this.access.set(key, Date.now())
+    this._prune()
+  }
 
-	/*
-	 * Delete a value from the cache.
-	 */
-	del(key) {
-		this.store.delete(key)
-		this.access.delete(key)
-	}
+  /*
+   * Delete a value from the cache.
+   */
+  del(key) {
+    this.store.delete(key)
+    this.access.delete(key)
+  }
 
-	/*
-	 * Current size of the cache
-	 */
-	size() {
-		return this.store.size
-	}
+  /*
+   * Current size of the cache
+   */
+  size() {
+    return this.store.size
+  }
 
-	/*
-	 * Clear the LRU cache entirely.
-	 */
-	clear() {
-		this.store = new Map()
-		this.access = new Map()
-	}
+  /*
+   * Clear the LRU cache entirely.
+   */
+  clear() {
+    this.store = new Map()
+    this.access = new Map()
+  }
 
-	/*
-	 * Internal pruning function.
-	 */
-	_prune() {
-		debug('About to prune; have', this.store.size, 'and capacity is', this.capacity)
+  /*
+   * Internal pruning function.
+   */
+  _prune() {
+    debug('About to prune; have', this.store.size, 'and capacity is', this.capacity)
 
-		const sorted = Array.from(this.access.entries())
-		sorted.sort((first, second) => {
-			if (first[1] === second[1]) {
-				return 0
-			}
-			return first[1] < second[1] ? -1 : 1
-		})
-		debug('Sorted keys are:', sorted)
+    const sorted = Array.from(this.access.entries())
+    sorted.sort((first, second) => {
+      if (first[1] === second[1]) {
+        return 0
+      }
+      return first[1] < second[1] ? -1 : 1
+    })
+    debug('Sorted keys are:', sorted)
 
-		debug('Have to prune', this.store.size - this.capacity, 'items.')
-		let idx = 0
-		const toPrune = []
-		while (idx < sorted.length && toPrune.length < this.store.size - this.capacity) {
-			toPrune.push(sorted[idx][0])
-			++idx
-		}
+    debug('Have to prune', this.store.size - this.capacity, 'items.')
+    let idx = 0
+    const toPrune = []
+    while (idx < sorted.length && toPrune.length < this.store.size - this.capacity) {
+      toPrune.push(sorted[idx][0])
+      ++idx
+    }
 
-		toPrune.forEach((key) => {
-			this.store.delete(key)
-			this.access.delete(key)
-		})
-		debug('Size after pruning', this.store.size)
-	}
+    toPrune.forEach(key => {
+      this.store.delete(key)
+      this.access.delete(key)
+    })
+    debug('Size after pruning', this.store.size)
+  }
 }
 
 module.exports = {
-	LRUCache,
+  LRUCache,
 }

+ 118 - 118
storage-node/packages/util/pagination.js

@@ -22,52 +22,52 @@ const debug = require('debug')('joystream:middleware:pagination')
 
 // Pagination definitions
 const apiDefs = {
-	parameters: {
-		paginationLimit: {
-			name: 'limit',
-			in: 'query',
-			description: 'Number of items per page.',
-			required: false,
-			schema: {
-				type: 'integer',
-				minimum: 1,
-				maximum: 50,
-				default: 20,
-			},
-		},
-		paginationOffset: {
-			name: 'offset',
-			in: 'query',
-			description: 'Page number (offset)',
-			schema: {
-				type: 'integer',
-				minimum: 0,
-			},
-		},
-	},
-	schemas: {
-		PaginationInfo: {
-			type: 'object',
-			required: ['self'],
-			properties: {
-				self: {
-					type: 'string',
-				},
-				next: {
-					type: 'string',
-				},
-				prev: {
-					type: 'string',
-				},
-				first: {
-					type: 'string',
-				},
-				last: {
-					type: 'string',
-				},
-			},
-		},
-	},
+  parameters: {
+    paginationLimit: {
+      name: 'limit',
+      in: 'query',
+      description: 'Number of items per page.',
+      required: false,
+      schema: {
+        type: 'integer',
+        minimum: 1,
+        maximum: 50,
+        default: 20,
+      },
+    },
+    paginationOffset: {
+      name: 'offset',
+      in: 'query',
+      description: 'Page number (offset)',
+      schema: {
+        type: 'integer',
+        minimum: 0,
+      },
+    },
+  },
+  schemas: {
+    PaginationInfo: {
+      type: 'object',
+      required: ['self'],
+      properties: {
+        self: {
+          type: 'string',
+        },
+        next: {
+          type: 'string',
+        },
+        prev: {
+          type: 'string',
+        },
+        first: {
+          type: 'string',
+        },
+        last: {
+          type: 'string',
+        },
+      },
+    },
+  },
 }
 
 /**
@@ -83,76 +83,76 @@ const apiDefs = {
  *      If lastOffset is given, create a last link with that offset
  **/
 module.exports = {
-	// Add pagination parameters and pagination info responses.
-	parameters: [
-		{ $ref: '#/components/parameters/paginationLimit' },
-		{ $ref: '#/components/parameters/paginationOffset' },
-	],
-
-	response: {
-		$ref: '#/components/schema/PaginationInfo',
-	},
-
-	// Update swagger/openapi specs with our own parameters and definitions
-	openapi(api) {
-		api.components = api.components || {}
-		api.components.parameters = { ...(api.components.parameters || {}), ...apiDefs.parameters }
-		api.components.schemas = { ...(api.components.schemas || {}), ...apiDefs.schemas }
-		return api
-	},
-
-	// Pagination function
-	paginate(req, res, lastOffset) {
-		// Skip if the response is not an object.
-		if (Object.prototype.toString.call(res) !== '[object Object]') {
-			debug('Cannot paginate non-objects.')
-			return res
-		}
-
-		// Defaults for parameters
-		const offset = req.query.offset || 0
-		const limit = req.query.limit || 20
-		debug('Create pagination links from offset=' + offset, 'limit=' + limit)
-
-		// Parse current url
-		const url = require('url')
-		const reqUrl = url.parse(req.protocol + '://' + req.get('host') + req.originalUrl)
-		const params = new url.URLSearchParams(reqUrl.query)
-
-		// Pagination object
-		const pagination = {
-			self: reqUrl.href,
-		}
-
-		const prev = offset - limit
-		if (prev >= 0) {
-			params.set('offset', prev)
-			reqUrl.search = params.toString()
-			pagination.prev = url.format(reqUrl)
-		}
-
-		const next = offset + limit
-		if (next >= 0) {
-			params.set('offset', next)
-			reqUrl.search = params.toString()
-			pagination.next = url.format(reqUrl)
-		}
-
-		if (lastOffset) {
-			params.set('offset', lastOffset)
-			reqUrl.search = params.toString()
-			pagination.last = url.format(reqUrl)
-		}
-
-		// First
-		params.set('offset', 0)
-		reqUrl.search = params.toString()
-		pagination.first = url.format(reqUrl)
-
-		debug('pagination', pagination)
-
-		// Now set pagination values in response.
-		res.pagination = pagination
-		return res
-	},
+  // Add pagination parameters and pagination info responses.
+  parameters: [
+    { $ref: '#/components/parameters/paginationLimit' },
+    { $ref: '#/components/parameters/paginationOffset' },
+  ],
+
+  response: {
+    $ref: '#/components/schema/PaginationInfo',
+  },
+
+  // Update swagger/openapi specs with our own parameters and definitions
+  openapi(api) {
+    api.components = api.components || {}
+    api.components.parameters = { ...(api.components.parameters || {}), ...apiDefs.parameters }
+    api.components.schemas = { ...(api.components.schemas || {}), ...apiDefs.schemas }
+    return api
+  },
+
+  // Pagination function
+  paginate(req, res, lastOffset) {
+    // Skip if the response is not an object.
+    if (Object.prototype.toString.call(res) !== '[object Object]') {
+      debug('Cannot paginate non-objects.')
+      return res
+    }
+
+    // Defaults for parameters
+    const offset = req.query.offset || 0
+    const limit = req.query.limit || 20
+    debug('Create pagination links from offset=' + offset, 'limit=' + limit)
+
+    // Parse current url
+    const url = require('url')
+    const reqUrl = url.parse(req.protocol + '://' + req.get('host') + req.originalUrl)
+    const params = new url.URLSearchParams(reqUrl.query)
+
+    // Pagination object
+    const pagination = {
+      self: reqUrl.href,
+    }
+
+    const prev = offset - limit
+    if (prev >= 0) {
+      params.set('offset', prev)
+      reqUrl.search = params.toString()
+      pagination.prev = url.format(reqUrl)
+    }
+
+    const next = offset + limit
+    if (next >= 0) {
+      params.set('offset', next)
+      reqUrl.search = params.toString()
+      pagination.next = url.format(reqUrl)
+    }
+
+    if (lastOffset) {
+      params.set('offset', lastOffset)
+      reqUrl.search = params.toString()
+      pagination.last = url.format(reqUrl)
+    }
+
+    // First
+    params.set('offset', 0)
+    reqUrl.search = params.toString()
+    pagination.first = url.format(reqUrl)
+
+    debug('pagination', pagination)
+
+    // Now set pagination values in response.
+    res.pagination = pagination
+    return res
+  },
 }

+ 374 - 374
storage-node/packages/util/ranges.js

@@ -32,20 +32,20 @@ const debug = require('debug')('joystream:util:ranges')
  * in an array of int or undefined (if not provided).
  */
 function parseRange(range) {
-	const matches = range.match(/^(\d+-\d+|\d+-|-\d+|\*)$/u)
-	if (!matches) {
-		throw new Error(`Not a valid range: ${range}`)
-	}
+  const matches = range.match(/^(\d+-\d+|\d+-|-\d+|\*)$/u)
+  if (!matches) {
+    throw new Error(`Not a valid range: ${range}`)
+  }
 
-	const vals = matches[1].split('-').map((v) => {
-		return v === '*' || v === '' ? undefined : parseInt(v, 10)
-	})
+  const vals = matches[1].split('-').map(v => {
+    return v === '*' || v === '' ? undefined : parseInt(v, 10)
+  })
 
-	if (vals[1] <= vals[0]) {
-		throw new Error(`Invalid range: start "${vals[0]}" must be before end "${vals[1]}".`)
-	}
+  if (vals[1] <= vals[0]) {
+    throw new Error(`Invalid range: start "${vals[0]}" must be before end "${vals[1]}".`)
+  }
 
-	return [vals[0], vals[1]]
+  return [vals[0], vals[1]]
 }
 
 /*
@@ -55,74 +55,74 @@ function parseRange(range) {
  * 'bytes'.
  */
 function parse(rangeStr) {
-	const res = {}
-	debug('Parse range header value:', rangeStr)
-	const matches = rangeStr.match(/^(([^\s]+)=)?((?:(?:\d+-\d+|-\d+|\d+-),?)+)$/u)
-	if (!matches) {
-		throw new Error(`Not a valid range header: ${rangeStr}`)
-	}
-
-	res.unit = matches[2] || 'bytes'
-	res.rangeStr = matches[3]
-	res.ranges = []
-
-	// Parse individual ranges
-	const ranges = []
-	res.rangeStr.split(',').forEach((range) => {
-		ranges.push(parseRange(range))
-	})
-
-	// Merge ranges into result.
-	ranges.forEach((newRange) => {
-		debug('Found range:', newRange)
-
-		let isMerged = false
-		for (const i in res.ranges) {
-			const oldRange = res.ranges[i]
-
-			// Skip if the new range is fully separate from the old range.
-			if (oldRange[1] + 1 < newRange[0] || newRange[1] + 1 < oldRange[0]) {
-				debug('Range does not overlap with', oldRange)
-				continue
-			}
-
-			// If we know they're adjacent or overlapping, we construct the
-			// merged range from the lower start and the higher end of both
-			// ranges.
-			const merged = [Math.min(oldRange[0], newRange[0]), Math.max(oldRange[1], newRange[1])]
-			res.ranges[i] = merged
-			isMerged = true
-			debug('Merged', newRange, 'into', oldRange, 'as', merged)
-		}
-
-		if (!isMerged) {
-			debug('Non-overlapping range!')
-			res.ranges.push(newRange)
-		}
-	})
-
-	// Finally, sort ranges
-	res.ranges.sort((first, second) => {
-		if (first[0] === second[0]) {
-			// Should not happen due to merging.
-			return 0
-		}
-		return first[0] < second[0] ? -1 : 1
-	})
-
-	debug('Result of parse is', res)
-	return res
+  const res = {}
+  debug('Parse range header value:', rangeStr)
+  const matches = rangeStr.match(/^(([^\s]+)=)?((?:(?:\d+-\d+|-\d+|\d+-),?)+)$/u)
+  if (!matches) {
+    throw new Error(`Not a valid range header: ${rangeStr}`)
+  }
+
+  res.unit = matches[2] || 'bytes'
+  res.rangeStr = matches[3]
+  res.ranges = []
+
+  // Parse individual ranges
+  const ranges = []
+  res.rangeStr.split(',').forEach(range => {
+    ranges.push(parseRange(range))
+  })
+
+  // Merge ranges into result.
+  ranges.forEach(newRange => {
+    debug('Found range:', newRange)
+
+    let isMerged = false
+    for (const i in res.ranges) {
+      const oldRange = res.ranges[i]
+
+      // Skip if the new range is fully separate from the old range.
+      if (oldRange[1] + 1 < newRange[0] || newRange[1] + 1 < oldRange[0]) {
+        debug('Range does not overlap with', oldRange)
+        continue
+      }
+
+      // If we know they're adjacent or overlapping, we construct the
+      // merged range from the lower start and the higher end of both
+      // ranges.
+      const merged = [Math.min(oldRange[0], newRange[0]), Math.max(oldRange[1], newRange[1])]
+      res.ranges[i] = merged
+      isMerged = true
+      debug('Merged', newRange, 'into', oldRange, 'as', merged)
+    }
+
+    if (!isMerged) {
+      debug('Non-overlapping range!')
+      res.ranges.push(newRange)
+    }
+  })
+
+  // Finally, sort ranges
+  res.ranges.sort((first, second) => {
+    if (first[0] === second[0]) {
+      // Should not happen due to merging.
+      return 0
+    }
+    return first[0] < second[0] ? -1 : 1
+  })
+
+  debug('Result of parse is', res)
+  return res
 }
 
 /*
  * Async version of parse().
  */
 function parseAsync(rangeStr, cb) {
-	try {
-		return cb(parse(rangeStr))
-	} catch (err) {
-		return cb(null, err)
-	}
+  try {
+    return cb(parse(rangeStr))
+  } catch (err) {
+    return cb(null, err)
+  }
 }
 
 /*
@@ -142,306 +142,306 @@ function parseAsync(rangeStr, cb) {
  * future.
  */
 class RangeSender {
-	constructor(response, stream, opts, endCallback) {
-		// Options
-		this.name = opts.name || 'content.bin'
-		this.type = opts.type || 'application/octet-stream'
-		this.size = opts.size
-		this.ranges = opts.ranges
-		this.download = opts.download || false
-
-		// Range handling related state.
-		this.readOffset = 0 // Nothing read so far
-		this.rangeIndex = -1 // No range index yet.
-		this.rangeBoundary = undefined // Generate boundary when needed.
-
-		// Event handlers & state
-		this.handlers = {}
-		this.opened = false
-
-		debug('RangeSender:', this)
-		if (opts.ranges) {
-			debug('Parsed ranges:', opts.ranges.ranges)
-		}
-
-		// Parameters
-		this.response = response
-		this.stream = stream
-		this.opts = opts
-		this.endCallback = endCallback
-	}
-
-	onError(err) {
-		// Assume hiding the actual error is best, and default to 404.
-		debug('Error:', err)
-		if (!this.response.headersSent) {
-			this.response.status(err.code || 404).send({
-				message: err.message || `File not found: ${this.name}`,
-			})
-		}
-		if (this.endCallback) {
-			this.endCallback(err)
-		}
-	}
-
-	onEnd() {
-		debug('End of stream.')
-		this.response.end()
-		if (this.endCallback) {
-			this.endCallback()
-		}
-	}
-
-	// **** No ranges
-	onOpenNoRange() {
-		// File got opened, so we can set headers/status
-		debug('Open succeeded:', this.name, this.type)
-		this.opened = true
-
-		this.response.status(200)
-		this.response.contentType(this.type)
-		this.response.header('Accept-Ranges', 'bytes')
-		this.response.header('Content-Transfer-Encoding', 'binary')
-
-		if (this.download) {
-			this.response.header('Content-Disposition', `attachment; filename="${this.name}"`)
-		} else {
-			this.response.header('Content-Disposition', 'inline')
-		}
-
-		if (this.size) {
-			this.response.header('Content-Length', this.size)
-		}
-	}
-
-	onDataNoRange(chunk) {
-		if (!this.opened) {
-			this.handlers.open()
-		}
-
-		// As simple as it can be.
-		this.response.write(Buffer.from(chunk, 'binary'))
-	}
-
-	// *** With ranges
-	nextRangeHeaders() {
-		// Next range
-		this.rangeIndex += 1
-		if (this.rangeIndex >= this.ranges.ranges.length) {
-			debug('Cannot advance range index; we are done.')
-			return undefined
-		}
-
-		// Calculate this range's size.
-		const range = this.ranges.ranges[this.rangeIndex]
-		let totalSize
-		if (this.size) {
-			totalSize = this.size
-		}
-		if (typeof range[0] === 'undefined') {
-			range[0] = 0
-		}
-		if (typeof range[1] === 'undefined') {
-			if (this.size) {
-				range[1] = totalSize - 1
-			}
-		}
-
-		let sendSize
-		if (typeof range[0] !== 'undefined' && typeof range[1] !== 'undefined') {
-			sendSize = range[1] - range[0] + 1
-		}
-
-		// Write headers, but since we may be in a multipart situation, write them
-		// explicitly to the stream.
-		const start = typeof range[0] === 'undefined' ? '' : `${range[0]}`
-		const end = typeof range[1] === 'undefined' ? '' : `${range[1]}`
-
-		let sizeStr
-		if (totalSize) {
-			sizeStr = `${totalSize}`
-		} else {
-			sizeStr = '*'
-		}
-
-		const ret = {
-			'Content-Range': `bytes ${start}-${end}/${sizeStr}`,
-			'Content-Type': `${this.type}`,
-		}
-		if (sendSize) {
-			ret['Content-Length'] = `${sendSize}`
-		}
-		return ret
-	}
-
-	nextRange() {
-		if (this.ranges.ranges.length === 1) {
-			debug('Cannot start new range; only one requested.')
-			this.stream.off('data', this.handlers.data)
-			return false
-		}
-
-		const headers = this.nextRangeHeaders()
-
-		if (headers) {
-			const onDataRanges = new streamBuf.WritableStreamBuffer()
-			// We start a range with a boundary.
-			onDataRanges.write(`\r\n--${this.rangeBoundary}\r\n`)
-
-			// The we write the range headers.
-			for (const header in headers) {
-				onDataRanges.write(`${header}: ${headers[header]}\r\n`)
-			}
-			onDataRanges.write('\r\n')
-			this.response.write(onDataRanges.getContents())
-			debug('New range started.')
-			return true
-		}
-
-		// No headers means we're finishing the last range.
-		this.response.write(`\r\n--${this.rangeBoundary}--\r\n`)
-		debug('End of ranges sent.')
-		this.stream.off('data', this.handlers.data)
-		return false
-	}
-
-	onOpenRanges() {
-		// File got opened, so we can set headers/status
-		debug('Open succeeded:', this.name, this.type)
-		this.opened = true
-
-		this.response.header('Accept-Ranges', 'bytes')
-		this.response.header('Content-Transfer-Encoding', 'binary')
-		this.response.header('Content-Disposition', 'inline')
-
-		// For single ranges, the content length should be the size of the
-		// range. For multiple ranges, we don't send a content length
-		// header.
-		//
-		// Similarly, the type is different whether or not there is more than
-		// one range.
-		if (this.ranges.ranges.length === 1) {
-			this.response.writeHead(206, 'Partial Content', this.nextRangeHeaders())
-		} else {
-			this.rangeBoundary = uuid.v4()
-			const headers = {
-				'Content-Type': `multipart/byteranges; boundary=${this.rangeBoundary}`,
-			}
-			this.response.writeHead(206, 'Partial Content', headers)
-			this.nextRange()
-		}
-	}
-
-	onDataRanges(chunk) {
-		if (!this.opened) {
-			this.handlers.open()
-		}
-		// Crap, node.js streams are stupid. No guarantee for seek support. Sure,
-		// that makes node.js easier to implement, but offloads everything onto the
-		// application developer.
-		//
-		// So, we skip chunks until our read position is within the range we want to
-		// send at the moment. We're relying on ranges being in-order, which this
-		// file's parser luckily (?) provides.
-		//
-		// The simplest optimization would be at ever range start to seek() to the
-		// start.
-		const chunkRange = [this.readOffset, this.readOffset + chunk.length - 1]
-		debug('= Got chunk with byte range', chunkRange)
-		while (true) {
-			let reqRange = this.ranges.ranges[this.rangeIndex]
-			if (!reqRange) {
-				break
-			}
-			debug('Current requested range is', reqRange)
-			if (!reqRange[1]) {
-				reqRange = [reqRange[0], Number.MAX_SAFE_INTEGER]
-				debug('Treating as', reqRange)
-			}
-
-			// No overlap in the chunk and requested range; don't write.
-			if (chunkRange[1] < reqRange[0] || chunkRange[0] > reqRange[1]) {
-				debug('Ignoring chunk; it is out of range.')
-				break
-			}
-
-			// Since there is overlap, find the segment that's entirely within the
-			// chunk.
-			const segment = [Math.max(chunkRange[0], reqRange[0]), Math.min(chunkRange[1], reqRange[1])]
-			debug('Segment to send within chunk is', segment)
-
-			// Normalize the segment to a chunk offset
-			const start = segment[0] - this.readOffset
-			const end = segment[1] - this.readOffset
-			const len = end - start + 1
-			debug('Offsets into buffer are', [start, end], 'with length', len)
-
-			// Write the slice that we want to write. We first create a buffer from the
-			// chunk. Then we slice a new buffer from the same underlying ArrayBuffer,
-			// starting at the original buffer's offset, further offset by the segment
-			// start. The segment length bounds the end of our slice.
-			const buf = Buffer.from(chunk, 'binary')
-			this.response.write(Buffer.from(buf.buffer, buf.byteOffset + start, len))
-
-			// If the requested range is finished, we should start the next one.
-			if (reqRange[1] > chunkRange[1]) {
-				debug('Chunk is finished, but the requested range is missing bytes.')
-				break
-			}
-
-			if (reqRange[1] <= chunkRange[1]) {
-				debug('Range is finished.')
-				if (!this.nextRange(segment)) {
-					break
-				}
-			}
-		}
-
-		// Update read offset when chunk is finished.
-		this.readOffset += chunk.length
-	}
-
-	start() {
-		// Before we start streaming, let's ensure our ranges don't contain any
-		// without start - if they do, we nuke them all and treat this as a full
-		// request.
-		let nuke = false
-		if (this.ranges) {
-			for (const i in this.ranges.ranges) {
-				if (typeof this.ranges.ranges[i][0] === 'undefined') {
-					nuke = true
-					break
-				}
-			}
-		}
-		if (nuke) {
-			this.ranges = undefined
-		}
-
-		// Register callbacks. Store them in a handlers object so we can
-		// keep the bound version around for stopping to listen to events.
-		this.handlers.error = this.onError.bind(this)
-		this.handlers.end = this.onEnd.bind(this)
-
-		if (this.ranges) {
-			debug('Preparing to handle ranges.')
-			this.handlers.open = this.onOpenRanges.bind(this)
-			this.handlers.data = this.onDataRanges.bind(this)
-		} else {
-			debug('No ranges, just send the whole file.')
-			this.handlers.open = this.onOpenNoRange.bind(this)
-			this.handlers.data = this.onDataNoRange.bind(this)
-		}
-
-		for (const handler in this.handlers) {
-			this.stream.on(handler, this.handlers[handler])
-		}
-	}
+  constructor(response, stream, opts, endCallback) {
+    // Options
+    this.name = opts.name || 'content.bin'
+    this.type = opts.type || 'application/octet-stream'
+    this.size = opts.size
+    this.ranges = opts.ranges
+    this.download = opts.download || false
+
+    // Range handling related state.
+    this.readOffset = 0 // Nothing read so far
+    this.rangeIndex = -1 // No range index yet.
+    this.rangeBoundary = undefined // Generate boundary when needed.
+
+    // Event handlers & state
+    this.handlers = {}
+    this.opened = false
+
+    debug('RangeSender:', this)
+    if (opts.ranges) {
+      debug('Parsed ranges:', opts.ranges.ranges)
+    }
+
+    // Parameters
+    this.response = response
+    this.stream = stream
+    this.opts = opts
+    this.endCallback = endCallback
+  }
+
+  onError(err) {
+    // Assume hiding the actual error is best, and default to 404.
+    debug('Error:', err)
+    if (!this.response.headersSent) {
+      this.response.status(err.code || 404).send({
+        message: err.message || `File not found: ${this.name}`,
+      })
+    }
+    if (this.endCallback) {
+      this.endCallback(err)
+    }
+  }
+
+  onEnd() {
+    debug('End of stream.')
+    this.response.end()
+    if (this.endCallback) {
+      this.endCallback()
+    }
+  }
+
+  // **** No ranges
+  onOpenNoRange() {
+    // File got opened, so we can set headers/status
+    debug('Open succeeded:', this.name, this.type)
+    this.opened = true
+
+    this.response.status(200)
+    this.response.contentType(this.type)
+    this.response.header('Accept-Ranges', 'bytes')
+    this.response.header('Content-Transfer-Encoding', 'binary')
+
+    if (this.download) {
+      this.response.header('Content-Disposition', `attachment; filename="${this.name}"`)
+    } else {
+      this.response.header('Content-Disposition', 'inline')
+    }
+
+    if (this.size) {
+      this.response.header('Content-Length', this.size)
+    }
+  }
+
+  onDataNoRange(chunk) {
+    if (!this.opened) {
+      this.handlers.open()
+    }
+
+    // As simple as it can be.
+    this.response.write(Buffer.from(chunk, 'binary'))
+  }
+
+  // *** With ranges
+  nextRangeHeaders() {
+    // Next range
+    this.rangeIndex += 1
+    if (this.rangeIndex >= this.ranges.ranges.length) {
+      debug('Cannot advance range index; we are done.')
+      return undefined
+    }
+
+    // Calculate this range's size.
+    const range = this.ranges.ranges[this.rangeIndex]
+    let totalSize
+    if (this.size) {
+      totalSize = this.size
+    }
+    if (typeof range[0] === 'undefined') {
+      range[0] = 0
+    }
+    if (typeof range[1] === 'undefined') {
+      if (this.size) {
+        range[1] = totalSize - 1
+      }
+    }
+
+    let sendSize
+    if (typeof range[0] !== 'undefined' && typeof range[1] !== 'undefined') {
+      sendSize = range[1] - range[0] + 1
+    }
+
+    // Write headers, but since we may be in a multipart situation, write them
+    // explicitly to the stream.
+    const start = typeof range[0] === 'undefined' ? '' : `${range[0]}`
+    const end = typeof range[1] === 'undefined' ? '' : `${range[1]}`
+
+    let sizeStr
+    if (totalSize) {
+      sizeStr = `${totalSize}`
+    } else {
+      sizeStr = '*'
+    }
+
+    const ret = {
+      'Content-Range': `bytes ${start}-${end}/${sizeStr}`,
+      'Content-Type': `${this.type}`,
+    }
+    if (sendSize) {
+      ret['Content-Length'] = `${sendSize}`
+    }
+    return ret
+  }
+
+  nextRange() {
+    if (this.ranges.ranges.length === 1) {
+      debug('Cannot start new range; only one requested.')
+      this.stream.off('data', this.handlers.data)
+      return false
+    }
+
+    const headers = this.nextRangeHeaders()
+
+    if (headers) {
+      const onDataRanges = new streamBuf.WritableStreamBuffer()
+      // We start a range with a boundary.
+      onDataRanges.write(`\r\n--${this.rangeBoundary}\r\n`)
+
+      // The we write the range headers.
+      for (const header in headers) {
+        onDataRanges.write(`${header}: ${headers[header]}\r\n`)
+      }
+      onDataRanges.write('\r\n')
+      this.response.write(onDataRanges.getContents())
+      debug('New range started.')
+      return true
+    }
+
+    // No headers means we're finishing the last range.
+    this.response.write(`\r\n--${this.rangeBoundary}--\r\n`)
+    debug('End of ranges sent.')
+    this.stream.off('data', this.handlers.data)
+    return false
+  }
+
+  onOpenRanges() {
+    // File got opened, so we can set headers/status
+    debug('Open succeeded:', this.name, this.type)
+    this.opened = true
+
+    this.response.header('Accept-Ranges', 'bytes')
+    this.response.header('Content-Transfer-Encoding', 'binary')
+    this.response.header('Content-Disposition', 'inline')
+
+    // For single ranges, the content length should be the size of the
+    // range. For multiple ranges, we don't send a content length
+    // header.
+    //
+    // Similarly, the type is different whether or not there is more than
+    // one range.
+    if (this.ranges.ranges.length === 1) {
+      this.response.writeHead(206, 'Partial Content', this.nextRangeHeaders())
+    } else {
+      this.rangeBoundary = uuid.v4()
+      const headers = {
+        'Content-Type': `multipart/byteranges; boundary=${this.rangeBoundary}`,
+      }
+      this.response.writeHead(206, 'Partial Content', headers)
+      this.nextRange()
+    }
+  }
+
+  onDataRanges(chunk) {
+    if (!this.opened) {
+      this.handlers.open()
+    }
+    // Crap, node.js streams are stupid. No guarantee for seek support. Sure,
+    // that makes node.js easier to implement, but offloads everything onto the
+    // application developer.
+    //
+    // So, we skip chunks until our read position is within the range we want to
+    // send at the moment. We're relying on ranges being in-order, which this
+    // file's parser luckily (?) provides.
+    //
+    // The simplest optimization would be at ever range start to seek() to the
+    // start.
+    const chunkRange = [this.readOffset, this.readOffset + chunk.length - 1]
+    debug('= Got chunk with byte range', chunkRange)
+    while (true) {
+      let reqRange = this.ranges.ranges[this.rangeIndex]
+      if (!reqRange) {
+        break
+      }
+      debug('Current requested range is', reqRange)
+      if (!reqRange[1]) {
+        reqRange = [reqRange[0], Number.MAX_SAFE_INTEGER]
+        debug('Treating as', reqRange)
+      }
+
+      // No overlap in the chunk and requested range; don't write.
+      if (chunkRange[1] < reqRange[0] || chunkRange[0] > reqRange[1]) {
+        debug('Ignoring chunk; it is out of range.')
+        break
+      }
+
+      // Since there is overlap, find the segment that's entirely within the
+      // chunk.
+      const segment = [Math.max(chunkRange[0], reqRange[0]), Math.min(chunkRange[1], reqRange[1])]
+      debug('Segment to send within chunk is', segment)
+
+      // Normalize the segment to a chunk offset
+      const start = segment[0] - this.readOffset
+      const end = segment[1] - this.readOffset
+      const len = end - start + 1
+      debug('Offsets into buffer are', [start, end], 'with length', len)
+
+      // Write the slice that we want to write. We first create a buffer from the
+      // chunk. Then we slice a new buffer from the same underlying ArrayBuffer,
+      // starting at the original buffer's offset, further offset by the segment
+      // start. The segment length bounds the end of our slice.
+      const buf = Buffer.from(chunk, 'binary')
+      this.response.write(Buffer.from(buf.buffer, buf.byteOffset + start, len))
+
+      // If the requested range is finished, we should start the next one.
+      if (reqRange[1] > chunkRange[1]) {
+        debug('Chunk is finished, but the requested range is missing bytes.')
+        break
+      }
+
+      if (reqRange[1] <= chunkRange[1]) {
+        debug('Range is finished.')
+        if (!this.nextRange(segment)) {
+          break
+        }
+      }
+    }
+
+    // Update read offset when chunk is finished.
+    this.readOffset += chunk.length
+  }
+
+  start() {
+    // Before we start streaming, let's ensure our ranges don't contain any
+    // without start - if they do, we nuke them all and treat this as a full
+    // request.
+    let nuke = false
+    if (this.ranges) {
+      for (const i in this.ranges.ranges) {
+        if (typeof this.ranges.ranges[i][0] === 'undefined') {
+          nuke = true
+          break
+        }
+      }
+    }
+    if (nuke) {
+      this.ranges = undefined
+    }
+
+    // Register callbacks. Store them in a handlers object so we can
+    // keep the bound version around for stopping to listen to events.
+    this.handlers.error = this.onError.bind(this)
+    this.handlers.end = this.onEnd.bind(this)
+
+    if (this.ranges) {
+      debug('Preparing to handle ranges.')
+      this.handlers.open = this.onOpenRanges.bind(this)
+      this.handlers.data = this.onDataRanges.bind(this)
+    } else {
+      debug('No ranges, just send the whole file.')
+      this.handlers.open = this.onOpenNoRange.bind(this)
+      this.handlers.data = this.onDataNoRange.bind(this)
+    }
+
+    for (const handler in this.handlers) {
+      this.stream.on(handler, this.handlers[handler])
+    }
+  }
 }
 
 function send(response, stream, opts, endCallback) {
-	const sender = new RangeSender(response, stream, opts, endCallback)
-	sender.start()
+  const sender = new RangeSender(response, stream, opts, endCallback)
+  sender.start()
 }
 
 /*
@@ -449,8 +449,8 @@ function send(response, stream, opts, endCallback) {
  */
 
 module.exports = {
-	parse,
-	parseAsync,
-	RangeSender,
-	send,
+  parse,
+  parseAsync,
+  RangeSender,
+  send,
 }

+ 4 - 4
storage-node/packages/util/stripEndingSlash.js

@@ -1,9 +1,9 @@
 // return url with last `/` removed
 function removeEndingForwardSlash(url) {
-	if (url.endsWith('/')) {
-		return url.substring(0, url.length - 1)
-	}
-	return url.toString()
+  if (url.endsWith('/')) {
+    return url.substring(0, url.length - 1)
+  }
+  return url.toString()
 }
 
 module.exports = removeEndingForwardSlash

+ 31 - 31
storage-node/packages/util/test/fs/resolve.js

@@ -24,45 +24,45 @@ const path = require('path')
 const resolve = require('@joystream/storage-utils/fs/resolve')
 
 function tests(base) {
-	it('resolves absolute paths relative to the base', function () {
-		const resolved = resolve(base, '/foo')
-		const relative = path.relative(base, resolved)
-		expect(relative).to.equal('foo')
-	})
+  it('resolves absolute paths relative to the base', function () {
+    const resolved = resolve(base, '/foo')
+    const relative = path.relative(base, resolved)
+    expect(relative).to.equal('foo')
+  })
 
-	it('allows for relative paths that stay in the base', function () {
-		const resolved = resolve(base, 'foo/../bar')
-		const relative = path.relative(base, resolved)
-		expect(relative).to.equal('bar')
-	})
+  it('allows for relative paths that stay in the base', function () {
+    const resolved = resolve(base, 'foo/../bar')
+    const relative = path.relative(base, resolved)
+    expect(relative).to.equal('bar')
+  })
 
-	it('prevents relative paths from breaking out of the base', function () {
-		expect(() => resolve(base, '../foo')).to.throw()
-	})
+  it('prevents relative paths from breaking out of the base', function () {
+    expect(() => resolve(base, '../foo')).to.throw()
+  })
 
-	it('prevents long relative paths from breaking out of the base', function () {
-		expect(() => resolve(base, '../../../foo')).to.throw()
-	})
+  it('prevents long relative paths from breaking out of the base', function () {
+    expect(() => resolve(base, '../../../foo')).to.throw()
+  })
 
-	it('prevents sneaky relative paths from breaking out of the base', function () {
-		expect(() => resolve(base, 'foo/../../../bar')).to.throw()
-	})
+  it('prevents sneaky relative paths from breaking out of the base', function () {
+    expect(() => resolve(base, 'foo/../../../bar')).to.throw()
+  })
 }
 
 describe('util/fs/resolve', function () {
-	describe('slash base', function () {
-		tests('/')
-	})
+  describe('slash base', function () {
+    tests('/')
+  })
 
-	describe('empty base', function () {
-		tests('')
-	})
+  describe('empty base', function () {
+    tests('')
+  })
 
-	describe('short base', function () {
-		tests('/base')
-	})
+  describe('short base', function () {
+    tests('/base')
+  })
 
-	describe('long base', function () {
-		tests('/this/base/is/very/long/indeed')
-	})
+  describe('long base', function () {
+    tests('/this/base/is/very/long/indeed')
+  })
 })

+ 25 - 25
storage-node/packages/util/test/fs/walk.js

@@ -29,39 +29,39 @@ const path = require('path')
 const fswalk = require('@joystream/storage-utils/fs/walk')
 
 function walktest(archive, base, done) {
-	const results = new Map()
+  const results = new Map()
 
-	fswalk(base, archive, (err, relname, stat, linktarget) => {
-		expect(err).to.be.null
+  fswalk(base, archive, (err, relname, stat, linktarget) => {
+    expect(err).to.be.null
 
-		if (relname) {
-			results.set(relname, [stat, linktarget])
-			return
-		}
+    if (relname) {
+      results.set(relname, [stat, linktarget])
+      return
+    }
 
-		// End of data, do testing
-		const entries = Array.from(results.keys())
-		expect(entries).to.include('foo')
-		expect(results.get('foo')[0].isDirectory()).to.be.true
+    // End of data, do testing
+    const entries = Array.from(results.keys())
+    expect(entries).to.include('foo')
+    expect(results.get('foo')[0].isDirectory()).to.be.true
 
-		expect(entries).to.include('bar')
-		expect(results.get('bar')[0].isFile()).to.be.true
+    expect(entries).to.include('bar')
+    expect(results.get('bar')[0].isFile()).to.be.true
 
-		if (archive === fs) {
-			expect(entries).to.include('quux')
-			expect(results.get('quux')[0].isSymbolicLink()).to.be.true
-			expect(results.get('quux')[1]).to.equal('foo/baz')
-		}
+    if (archive === fs) {
+      expect(entries).to.include('quux')
+      expect(results.get('quux')[0].isSymbolicLink()).to.be.true
+      expect(results.get('quux')[1]).to.equal('foo/baz')
+    }
 
-		expect(entries).to.include('foo/baz')
-		expect(results.get('foo/baz')[0].isFile()).to.be.true
+    expect(entries).to.include('foo/baz')
+    expect(results.get('foo/baz')[0].isFile()).to.be.true
 
-		done()
-	})
+    done()
+  })
 }
 
 describe('util/fs/walk', function () {
-	it('reports all files in a file system hierarchy', function (done) {
-		walktest(fs, path.resolve(__dirname, '../data'), done)
-	})
+  it('reports all files in a file system hierarchy', function (done) {
+    walktest(fs, path.resolve(__dirname, '../data'), done)
+  })
 })

+ 93 - 93
storage-node/packages/util/test/lru.js

@@ -24,129 +24,129 @@ const lru = require('@joystream/storage-utils/lru')
 
 const DEFAULT_SLEEP = 1
 function sleep(ms = DEFAULT_SLEEP) {
-	return new Promise((resolve) => {
-		setTimeout(resolve, ms)
-	})
+  return new Promise(resolve => {
+    setTimeout(resolve, ms)
+  })
 }
 
 describe('util/lru', function () {
-	describe('simple usage', function () {
-		it('does not contain keys that were not added', function () {
-			const cache = new lru.LRUCache()
-			expect(cache.size()).to.equal(0)
+  describe('simple usage', function () {
+    it('does not contain keys that were not added', function () {
+      const cache = new lru.LRUCache()
+      expect(cache.size()).to.equal(0)
 
-			const val = cache.get('something')
-			expect(val).to.be.undefined
+      const val = cache.get('something')
+      expect(val).to.be.undefined
 
-			expect(cache.has('something')).to.be.false
-		})
+      expect(cache.has('something')).to.be.false
+    })
 
-		it('contains keys that were added', function () {
-			const cache = new lru.LRUCache()
-			cache.put('something', 'yay!')
-			expect(cache.size()).to.equal(1)
+    it('contains keys that were added', function () {
+      const cache = new lru.LRUCache()
+      cache.put('something', 'yay!')
+      expect(cache.size()).to.equal(1)
 
-			const val = cache.get('something')
-			expect(val).to.be.equal('yay!')
+      const val = cache.get('something')
+      expect(val).to.be.equal('yay!')
 
-			expect(cache.has('something')).to.be.true
-		})
+      expect(cache.has('something')).to.be.true
+    })
 
-		it('does not contain keys that were deleted', function () {
-			const cache = new lru.LRUCache()
-			cache.put('something', 'yay!')
-			expect(cache.size()).to.equal(1)
-			let val = cache.get('something')
-			expect(val).to.be.equal('yay!')
-			expect(cache.has('something')).to.be.true
+    it('does not contain keys that were deleted', function () {
+      const cache = new lru.LRUCache()
+      cache.put('something', 'yay!')
+      expect(cache.size()).to.equal(1)
+      let val = cache.get('something')
+      expect(val).to.be.equal('yay!')
+      expect(cache.has('something')).to.be.true
 
-			cache.del('something')
-			expect(cache.size()).to.equal(0)
-			val = cache.get('something')
-			expect(val).to.be.undefined
-			expect(cache.has('something')).to.be.false
-		})
+      cache.del('something')
+      expect(cache.size()).to.equal(0)
+      val = cache.get('something')
+      expect(val).to.be.undefined
+      expect(cache.has('something')).to.be.false
+    })
 
-		it('can be cleared', function () {
-			const cache = new lru.LRUCache()
-			cache.put('something', 'yay!')
-			expect(cache.size()).to.equal(1)
+    it('can be cleared', function () {
+      const cache = new lru.LRUCache()
+      cache.put('something', 'yay!')
+      expect(cache.size()).to.equal(1)
 
-			cache.clear()
-			expect(cache.size()).to.equal(0)
-		})
-	})
+      cache.clear()
+      expect(cache.size()).to.equal(0)
+    })
+  })
 
-	describe('capacity management', function () {
-		it('does not grow beyond capacity', async function () {
-			const cache = new lru.LRUCache(2) // Small capacity
-			expect(cache.size()).to.equal(0)
+  describe('capacity management', function () {
+    it('does not grow beyond capacity', async function () {
+      const cache = new lru.LRUCache(2) // Small capacity
+      expect(cache.size()).to.equal(0)
 
-			cache.put('foo', '42')
-			expect(cache.size()).to.equal(1)
+      cache.put('foo', '42')
+      expect(cache.size()).to.equal(1)
 
-			await sleep()
+      await sleep()
 
-			cache.put('bar', '42')
-			expect(cache.size()).to.equal(2)
+      cache.put('bar', '42')
+      expect(cache.size()).to.equal(2)
 
-			await sleep()
+      await sleep()
 
-			cache.put('baz', '42')
-			expect(cache.size()).to.equal(2) // Capacity exceeded
-		})
+      cache.put('baz', '42')
+      expect(cache.size()).to.equal(2) // Capacity exceeded
+    })
 
-		it('removes the oldest key when pruning', async function () {
-			const cache = new lru.LRUCache(2) // Small capacity
-			expect(cache.size()).to.equal(0)
+    it('removes the oldest key when pruning', async function () {
+      const cache = new lru.LRUCache(2) // Small capacity
+      expect(cache.size()).to.equal(0)
 
-			cache.put('foo', '42')
-			expect(cache.size()).to.equal(1)
-			expect(cache.has('foo')).to.be.true
+      cache.put('foo', '42')
+      expect(cache.size()).to.equal(1)
+      expect(cache.has('foo')).to.be.true
 
-			await sleep()
+      await sleep()
 
-			cache.put('bar', '42')
-			expect(cache.size()).to.equal(2)
-			expect(cache.has('foo')).to.be.true
-			expect(cache.has('bar')).to.be.true
+      cache.put('bar', '42')
+      expect(cache.size()).to.equal(2)
+      expect(cache.has('foo')).to.be.true
+      expect(cache.has('bar')).to.be.true
 
-			await sleep()
+      await sleep()
 
-			cache.put('baz', '42')
-			expect(cache.size()).to.equal(2) // Capacity exceeded
-			expect(cache.has('bar')).to.be.true
-			expect(cache.has('baz')).to.be.true
-		})
+      cache.put('baz', '42')
+      expect(cache.size()).to.equal(2) // Capacity exceeded
+      expect(cache.has('bar')).to.be.true
+      expect(cache.has('baz')).to.be.true
+    })
 
-		it('updates LRU timestamp when reading', async function () {
-			const cache = new lru.LRUCache(2) // Small capacity
-			expect(cache.size()).to.equal(0)
+    it('updates LRU timestamp when reading', async function () {
+      const cache = new lru.LRUCache(2) // Small capacity
+      expect(cache.size()).to.equal(0)
 
-			cache.put('foo', '42')
-			expect(cache.size()).to.equal(1)
-			expect(cache.has('foo')).to.be.true
+      cache.put('foo', '42')
+      expect(cache.size()).to.equal(1)
+      expect(cache.has('foo')).to.be.true
 
-			await sleep()
+      await sleep()
 
-			cache.put('bar', '42')
-			expect(cache.size()).to.equal(2)
-			expect(cache.has('foo')).to.be.true
-			expect(cache.has('bar')).to.be.true
+      cache.put('bar', '42')
+      expect(cache.size()).to.equal(2)
+      expect(cache.has('foo')).to.be.true
+      expect(cache.has('bar')).to.be.true
 
-			await sleep()
+      await sleep()
 
-			// 'foo' is older than 'bar' right now, so should be pruned first. But
-			// if we get 'foo', it would be 'bar' that has to go.
-			cache.get('foo')
+      // 'foo' is older than 'bar' right now, so should be pruned first. But
+      // if we get 'foo', it would be 'bar' that has to go.
+      cache.get('foo')
 
-			// Makes debugging a bit more obvious
-			await sleep()
+      // Makes debugging a bit more obvious
+      await sleep()
 
-			cache.put('baz', '42')
-			expect(cache.size()).to.equal(2) // Capacity exceeded
-			expect(cache.has('foo')).to.be.true
-			expect(cache.has('baz')).to.be.true
-		})
-	})
+      cache.put('baz', '42')
+      expect(cache.size()).to.equal(2) // Capacity exceeded
+      expect(cache.has('foo')).to.be.true
+      expect(cache.has('baz')).to.be.true
+    })
+  })
 })

+ 86 - 86
storage-node/packages/util/test/pagination.js

@@ -24,90 +24,90 @@ const mockHttp = require('node-mocks-http')
 const pagination = require('@joystream/storage-utils/pagination')
 
 describe('util/pagination', function () {
-	describe('openapi()', function () {
-		it('should add parameters and definitions to an API spec', function () {
-			const api = pagination.openapi({})
-
-			// Parameters
-			expect(api).to.have.property('components')
-
-			expect(api.components).to.have.property('parameters')
-			expect(api.components.parameters).to.have.property('paginationLimit')
-
-			expect(api.components.parameters.paginationLimit).to.have.property('name')
-			expect(api.components.parameters.paginationLimit.name).to.equal('limit')
-
-			expect(api.components.parameters.paginationLimit).to.have.property('schema')
-			expect(api.components.parameters.paginationLimit.schema).to.have.property('type')
-			expect(api.components.parameters.paginationLimit.schema.type).to.equal('integer')
-
-			expect(api.components.parameters.paginationOffset).to.have.property('name')
-			expect(api.components.parameters.paginationOffset.name).to.equal('offset')
-
-			expect(api.components.parameters.paginationOffset).to.have.property('schema')
-			expect(api.components.parameters.paginationOffset.schema).to.have.property('type')
-			expect(api.components.parameters.paginationOffset.schema.type).to.equal('integer')
-
-			// Defintiions
-			expect(api.components).to.have.property('schemas')
-			expect(api.components.schemas).to.have.property('PaginationInfo')
-
-			expect(api.components.schemas.PaginationInfo).to.have.property('type')
-			expect(api.components.schemas.PaginationInfo.type).to.equal('object')
-
-			expect(api.components.schemas.PaginationInfo).to.have.property('properties')
-			expect(api.components.schemas.PaginationInfo.properties)
-				.to.be.an('object')
-				.that.has.all.keys('self', 'next', 'prev', 'first', 'last')
-		})
-	})
-
-	describe('paginate()', function () {
-		it('should add pagination links to a response object', function () {
-			const req = mockHttp.createRequest({
-				method: 'GET',
-				url: '/foo?limit=10',
-				query: {
-					limit: 10, // Mock is a little stupid, we have to explicitly set query
-				},
-				headers: {
-					host: 'localhost',
-				},
-				protocol: 'http',
-			})
-
-			const res = pagination.paginate(req, {})
-
-			expect(res).to.have.property('pagination').that.has.all.keys('self', 'first', 'next')
-
-			expect(res.pagination.self).to.equal('http://localhost/foo?limit=10')
-			expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0')
-			expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=10')
-		})
-
-		it('should add a last pagination link when requested', function () {
-			const req = mockHttp.createRequest({
-				method: 'GET',
-				url: '/foo?limit=10&offset=15',
-				query: {
-					limit: 10, // Mock is a little stupid, we have to explicitly set query
-					offset: 15,
-				},
-				headers: {
-					host: 'localhost',
-				},
-				protocol: 'http',
-			})
-
-			const res = pagination.paginate(req, {}, 35)
-
-			expect(res).to.have.property('pagination').that.has.all.keys('self', 'first', 'next', 'prev', 'last')
-
-			expect(res.pagination.self).to.equal('http://localhost/foo?limit=10&offset=15')
-			expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0')
-			expect(res.pagination.last).to.equal('http://localhost/foo?limit=10&offset=35')
-			expect(res.pagination.prev).to.equal('http://localhost/foo?limit=10&offset=5')
-			expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=25')
-		})
-	})
+  describe('openapi()', function () {
+    it('should add parameters and definitions to an API spec', function () {
+      const api = pagination.openapi({})
+
+      // Parameters
+      expect(api).to.have.property('components')
+
+      expect(api.components).to.have.property('parameters')
+      expect(api.components.parameters).to.have.property('paginationLimit')
+
+      expect(api.components.parameters.paginationLimit).to.have.property('name')
+      expect(api.components.parameters.paginationLimit.name).to.equal('limit')
+
+      expect(api.components.parameters.paginationLimit).to.have.property('schema')
+      expect(api.components.parameters.paginationLimit.schema).to.have.property('type')
+      expect(api.components.parameters.paginationLimit.schema.type).to.equal('integer')
+
+      expect(api.components.parameters.paginationOffset).to.have.property('name')
+      expect(api.components.parameters.paginationOffset.name).to.equal('offset')
+
+      expect(api.components.parameters.paginationOffset).to.have.property('schema')
+      expect(api.components.parameters.paginationOffset.schema).to.have.property('type')
+      expect(api.components.parameters.paginationOffset.schema.type).to.equal('integer')
+
+      // Defintiions
+      expect(api.components).to.have.property('schemas')
+      expect(api.components.schemas).to.have.property('PaginationInfo')
+
+      expect(api.components.schemas.PaginationInfo).to.have.property('type')
+      expect(api.components.schemas.PaginationInfo.type).to.equal('object')
+
+      expect(api.components.schemas.PaginationInfo).to.have.property('properties')
+      expect(api.components.schemas.PaginationInfo.properties)
+        .to.be.an('object')
+        .that.has.all.keys('self', 'next', 'prev', 'first', 'last')
+    })
+  })
+
+  describe('paginate()', function () {
+    it('should add pagination links to a response object', function () {
+      const req = mockHttp.createRequest({
+        method: 'GET',
+        url: '/foo?limit=10',
+        query: {
+          limit: 10, // Mock is a little stupid, we have to explicitly set query
+        },
+        headers: {
+          host: 'localhost',
+        },
+        protocol: 'http',
+      })
+
+      const res = pagination.paginate(req, {})
+
+      expect(res).to.have.property('pagination').that.has.all.keys('self', 'first', 'next')
+
+      expect(res.pagination.self).to.equal('http://localhost/foo?limit=10')
+      expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0')
+      expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=10')
+    })
+
+    it('should add a last pagination link when requested', function () {
+      const req = mockHttp.createRequest({
+        method: 'GET',
+        url: '/foo?limit=10&offset=15',
+        query: {
+          limit: 10, // Mock is a little stupid, we have to explicitly set query
+          offset: 15,
+        },
+        headers: {
+          host: 'localhost',
+        },
+        protocol: 'http',
+      })
+
+      const res = pagination.paginate(req, {}, 35)
+
+      expect(res).to.have.property('pagination').that.has.all.keys('self', 'first', 'next', 'prev', 'last')
+
+      expect(res.pagination.self).to.equal('http://localhost/foo?limit=10&offset=15')
+      expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0')
+      expect(res.pagination.last).to.equal('http://localhost/foo?limit=10&offset=35')
+      expect(res.pagination.prev).to.equal('http://localhost/foo?limit=10&offset=5')
+      expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=25')
+    })
+  })
 })

+ 364 - 364
storage-node/packages/util/test/ranges.js

@@ -25,368 +25,368 @@ const streamBuffers = require('stream-buffers')
 const ranges = require('@joystream/storage-utils/ranges')
 
 describe('util/ranges', function () {
-	describe('parse()', function () {
-		it('should parse a full range', function () {
-			// Range with unit
-			let range = ranges.parse('bytes=0-100')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('0-100')
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(100)
-
-			// Range without unit
-			range = ranges.parse('0-100')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('0-100')
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(100)
-
-			// Range with custom unit
-			//
-			range = ranges.parse('foo=0-100')
-			expect(range.unit).to.equal('foo')
-			expect(range.rangeStr).to.equal('0-100')
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(100)
-		})
-
-		it('should error out on malformed strings', function () {
-			expect(() => ranges.parse('foo')).to.throw()
-			expect(() => ranges.parse('foo=bar')).to.throw()
-			expect(() => ranges.parse('foo=100')).to.throw()
-			expect(() => ranges.parse('foo=100-0')).to.throw()
-		})
-
-		it('should parse a range without end', function () {
-			const range = ranges.parse('0-')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('0-')
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.be.undefined
-		})
-
-		it('should parse a range without start', function () {
-			const range = ranges.parse('-100')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('-100')
-			expect(range.ranges[0][0]).to.be.undefined
-			expect(range.ranges[0][1]).to.equal(100)
-		})
-
-		it('should parse multiple ranges', function () {
-			const range = ranges.parse('0-10,30-40,60-80')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('0-10,30-40,60-80')
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(10)
-			expect(range.ranges[1][0]).to.equal(30)
-			expect(range.ranges[1][1]).to.equal(40)
-			expect(range.ranges[2][0]).to.equal(60)
-			expect(range.ranges[2][1]).to.equal(80)
-		})
-
-		it('should merge overlapping ranges', function () {
-			// Two overlapping ranges
-			let range = ranges.parse('0-20,10-30')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('0-20,10-30')
-			expect(range.ranges).to.have.lengthOf(1)
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(30)
-
-			// Three overlapping ranges
-			range = ranges.parse('0-15,10-25,20-30')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('0-15,10-25,20-30')
-			expect(range.ranges).to.have.lengthOf(1)
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(30)
-
-			// Three overlapping ranges, reverse order
-			range = ranges.parse('20-30,10-25,0-15')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('20-30,10-25,0-15')
-			expect(range.ranges).to.have.lengthOf(1)
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(30)
-
-			// Adjacent ranges
-			range = ranges.parse('0-10,11-20')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('0-10,11-20')
-			expect(range.ranges).to.have.lengthOf(1)
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(20)
-		})
-
-		it('should sort ranges', function () {
-			const range = ranges.parse('10-30,0-5')
-			expect(range.unit).to.equal('bytes')
-			expect(range.rangeStr).to.equal('10-30,0-5')
-			expect(range.ranges).to.have.lengthOf(2)
-			expect(range.ranges[0][0]).to.equal(0)
-			expect(range.ranges[0][1]).to.equal(5)
-			expect(range.ranges[1][0]).to.equal(10)
-			expect(range.ranges[1][1]).to.equal(30)
-		})
-	})
-
-	describe('send()', function () {
-		it('should send full files on request', function (done) {
-			const res = mockHttp.createResponse({})
-			const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-			// End-of-stream callback
-			const opts = {
-				name: 'test.file',
-				type: 'application/test',
-			}
-			ranges.send(res, inStream, opts, function (err) {
-				expect(err).to.not.exist
-
-				// HTTP handling
-				expect(res.statusCode).to.equal(200)
-				expect(res.getHeader('content-type')).to.equal('application/test')
-				expect(res.getHeader('content-disposition')).to.equal('inline')
-
-				// Data/stream handling
-				expect(res._isEndCalled()).to.be.true
-				expect(res._getBuffer().toString()).to.equal('Hello, world!')
-
-				// Notify mocha that we're done.
-				done()
-			})
-
-			// Simulate file stream
-			inStream.emit('open')
-			inStream.put('Hello, world!')
-			inStream.stop()
-		})
-
-		it('should send a range spanning the entire file on request', function (done) {
-			const res = mockHttp.createResponse({})
-			const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-			// End-of-stream callback
-			const opts = {
-				name: 'test.file',
-				type: 'application/test',
-				ranges: {
-					ranges: [[0, 12]],
-				},
-			}
-			ranges.send(res, inStream, opts, function (err) {
-				expect(err).to.not.exist
-
-				// HTTP handling
-				expect(res.statusCode).to.equal(206)
-				expect(res.getHeader('content-type')).to.equal('application/test')
-				expect(res.getHeader('content-disposition')).to.equal('inline')
-				expect(res.getHeader('content-range')).to.equal('bytes 0-12/*')
-				expect(res.getHeader('content-length')).to.equal('13')
-
-				// Data/stream handling
-				expect(res._isEndCalled()).to.be.true
-				expect(res._getBuffer().toString()).to.equal('Hello, world!')
-
-				// Notify mocha that we're done.
-				done()
-			})
-
-			// Simulate file stream
-			inStream.emit('open')
-			inStream.put('Hello, world!')
-			inStream.stop()
-		})
-
-		it('should send a small range on request', function (done) {
-			const res = mockHttp.createResponse({})
-			const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-			// End-of-stream callback
-			const opts = {
-				name: 'test.file',
-				type: 'application/test',
-				ranges: {
-					ranges: [[1, 11]], // Cut off first and last letter
-				},
-			}
-			ranges.send(res, inStream, opts, function (err) {
-				expect(err).to.not.exist
-
-				// HTTP handling
-				expect(res.statusCode).to.equal(206)
-				expect(res.getHeader('content-type')).to.equal('application/test')
-				expect(res.getHeader('content-disposition')).to.equal('inline')
-				expect(res.getHeader('content-range')).to.equal('bytes 1-11/*')
-				expect(res.getHeader('content-length')).to.equal('11')
-
-				// Data/stream handling
-				expect(res._isEndCalled()).to.be.true
-				expect(res._getBuffer().toString()).to.equal('ello, world')
-
-				// Notify mocha that we're done.
-				done()
-			})
-
-			// Simulate file stream
-			inStream.emit('open')
-			inStream.put('Hello, world!')
-			inStream.stop()
-		})
-
-		it('should send ranges crossing buffer boundaries', function (done) {
-			const res = mockHttp.createResponse({})
-			const inStream = new streamBuffers.ReadableStreamBuffer({
-				chunkSize: 3, // Setting a chunk size smaller than the range should
-				// not impact the test.
-			})
-
-			// End-of-stream callback
-			const opts = {
-				name: 'test.file',
-				type: 'application/test',
-				ranges: {
-					ranges: [[1, 11]], // Cut off first and last letter
-				},
-			}
-			ranges.send(res, inStream, opts, function (err) {
-				expect(err).to.not.exist
-
-				// HTTP handling
-				expect(res.statusCode).to.equal(206)
-				expect(res.getHeader('content-type')).to.equal('application/test')
-				expect(res.getHeader('content-disposition')).to.equal('inline')
-				expect(res.getHeader('content-range')).to.equal('bytes 1-11/*')
-				expect(res.getHeader('content-length')).to.equal('11')
-
-				// Data/stream handling
-				expect(res._isEndCalled()).to.be.true
-				expect(res._getBuffer().toString()).to.equal('ello, world')
-
-				// Notify mocha that we're done.
-				done()
-			})
-
-			// Simulate file stream
-			inStream.emit('open')
-			inStream.put('Hello, world!')
-			inStream.stop()
-		})
-
-		it('should send multiple ranges', function (done) {
-			const res = mockHttp.createResponse({})
-			const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-			// End-of-stream callback
-			const opts = {
-				name: 'test.file',
-				type: 'application/test',
-				ranges: {
-					ranges: [
-						[1, 3],
-						[5, 7],
-					], // Slice two ranges out
-				},
-			}
-			ranges.send(res, inStream, opts, function (err) {
-				expect(err).to.not.exist
-
-				// HTTP handling
-				expect(res.statusCode).to.equal(206)
-				expect(res.getHeader('content-type')).to.satisfy((str) => str.startsWith('multipart/byteranges'))
-				expect(res.getHeader('content-disposition')).to.equal('inline')
-
-				// Data/stream handling
-				expect(res._isEndCalled()).to.be.true
-
-				// The buffer should contain both ranges, but with all the That would be
-				// "ell" and ", w".
-				// It's pretty elaborate having to parse the entire multipart response
-				// body, so we'll restrict ourselves to finding lines within it.
-				const body = res._getBuffer().toString()
-				expect(body).to.contain('\r\nContent-Range: bytes 1-3/*\r\n')
-				expect(body).to.contain('\r\nell\r\n')
-				expect(body).to.contain('\r\nContent-Range: bytes 5-7/*\r\n')
-				expect(body).to.contain('\r\n, w')
-
-				// Notify mocha that we're done.
-				done()
-			})
-
-			// Simulate file stream
-			inStream.emit('open')
-			inStream.put('Hello, world!')
-			inStream.stop()
-		})
-
-		it('should deal with ranges without end', function (done) {
-			const res = mockHttp.createResponse({})
-			const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-			// End-of-stream callback
-			const opts = {
-				name: 'test.file',
-				type: 'application/test',
-				ranges: {
-					ranges: [[5, undefined]], // Skip the first part, but read until end
-				},
-			}
-			ranges.send(res, inStream, opts, function (err) {
-				expect(err).to.not.exist
-
-				// HTTP handling
-				expect(res.statusCode).to.equal(206)
-				expect(res.getHeader('content-type')).to.equal('application/test')
-				expect(res.getHeader('content-disposition')).to.equal('inline')
-				expect(res.getHeader('content-range')).to.equal('bytes 5-/*')
-
-				// Data/stream handling
-				expect(res._isEndCalled()).to.be.true
-				expect(res._getBuffer().toString()).to.equal(', world!')
-
-				// Notify mocha that we're done.
-				done()
-			})
-
-			// Simulate file stream
-			inStream.emit('open')
-			inStream.put('Hello, world!')
-			inStream.stop()
-		})
-
-		it('should ignore ranges without start', function (done) {
-			const res = mockHttp.createResponse({})
-			const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-			// End-of-stream callback
-			const opts = {
-				name: 'test.file',
-				type: 'application/test',
-				ranges: {
-					ranges: [[undefined, 5]], // Only last five
-				},
-			}
-			ranges.send(res, inStream, opts, function (err) {
-				expect(err).to.not.exist
-
-				// HTTP handling
-				expect(res.statusCode).to.equal(200)
-				expect(res.getHeader('content-type')).to.equal('application/test')
-				expect(res.getHeader('content-disposition')).to.equal('inline')
-
-				// Data/stream handling
-				expect(res._isEndCalled()).to.be.true
-				expect(res._getBuffer().toString()).to.equal('Hello, world!')
-
-				// Notify mocha that we're done.
-				done()
-			})
-
-			// Simulate file stream
-			inStream.emit('open')
-			inStream.put('Hello, world!')
-			inStream.stop()
-		})
-	})
+  describe('parse()', function () {
+    it('should parse a full range', function () {
+      // Range with unit
+      let range = ranges.parse('bytes=0-100')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('0-100')
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(100)
+
+      // Range without unit
+      range = ranges.parse('0-100')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('0-100')
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(100)
+
+      // Range with custom unit
+      //
+      range = ranges.parse('foo=0-100')
+      expect(range.unit).to.equal('foo')
+      expect(range.rangeStr).to.equal('0-100')
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(100)
+    })
+
+    it('should error out on malformed strings', function () {
+      expect(() => ranges.parse('foo')).to.throw()
+      expect(() => ranges.parse('foo=bar')).to.throw()
+      expect(() => ranges.parse('foo=100')).to.throw()
+      expect(() => ranges.parse('foo=100-0')).to.throw()
+    })
+
+    it('should parse a range without end', function () {
+      const range = ranges.parse('0-')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('0-')
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.be.undefined
+    })
+
+    it('should parse a range without start', function () {
+      const range = ranges.parse('-100')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('-100')
+      expect(range.ranges[0][0]).to.be.undefined
+      expect(range.ranges[0][1]).to.equal(100)
+    })
+
+    it('should parse multiple ranges', function () {
+      const range = ranges.parse('0-10,30-40,60-80')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('0-10,30-40,60-80')
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(10)
+      expect(range.ranges[1][0]).to.equal(30)
+      expect(range.ranges[1][1]).to.equal(40)
+      expect(range.ranges[2][0]).to.equal(60)
+      expect(range.ranges[2][1]).to.equal(80)
+    })
+
+    it('should merge overlapping ranges', function () {
+      // Two overlapping ranges
+      let range = ranges.parse('0-20,10-30')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('0-20,10-30')
+      expect(range.ranges).to.have.lengthOf(1)
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(30)
+
+      // Three overlapping ranges
+      range = ranges.parse('0-15,10-25,20-30')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('0-15,10-25,20-30')
+      expect(range.ranges).to.have.lengthOf(1)
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(30)
+
+      // Three overlapping ranges, reverse order
+      range = ranges.parse('20-30,10-25,0-15')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('20-30,10-25,0-15')
+      expect(range.ranges).to.have.lengthOf(1)
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(30)
+
+      // Adjacent ranges
+      range = ranges.parse('0-10,11-20')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('0-10,11-20')
+      expect(range.ranges).to.have.lengthOf(1)
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(20)
+    })
+
+    it('should sort ranges', function () {
+      const range = ranges.parse('10-30,0-5')
+      expect(range.unit).to.equal('bytes')
+      expect(range.rangeStr).to.equal('10-30,0-5')
+      expect(range.ranges).to.have.lengthOf(2)
+      expect(range.ranges[0][0]).to.equal(0)
+      expect(range.ranges[0][1]).to.equal(5)
+      expect(range.ranges[1][0]).to.equal(10)
+      expect(range.ranges[1][1]).to.equal(30)
+    })
+  })
+
+  describe('send()', function () {
+    it('should send full files on request', function (done) {
+      const res = mockHttp.createResponse({})
+      const inStream = new streamBuffers.ReadableStreamBuffer({})
+
+      // End-of-stream callback
+      const opts = {
+        name: 'test.file',
+        type: 'application/test',
+      }
+      ranges.send(res, inStream, opts, function (err) {
+        expect(err).to.not.exist
+
+        // HTTP handling
+        expect(res.statusCode).to.equal(200)
+        expect(res.getHeader('content-type')).to.equal('application/test')
+        expect(res.getHeader('content-disposition')).to.equal('inline')
+
+        // Data/stream handling
+        expect(res._isEndCalled()).to.be.true
+        expect(res._getBuffer().toString()).to.equal('Hello, world!')
+
+        // Notify mocha that we're done.
+        done()
+      })
+
+      // Simulate file stream
+      inStream.emit('open')
+      inStream.put('Hello, world!')
+      inStream.stop()
+    })
+
+    it('should send a range spanning the entire file on request', function (done) {
+      const res = mockHttp.createResponse({})
+      const inStream = new streamBuffers.ReadableStreamBuffer({})
+
+      // End-of-stream callback
+      const opts = {
+        name: 'test.file',
+        type: 'application/test',
+        ranges: {
+          ranges: [[0, 12]],
+        },
+      }
+      ranges.send(res, inStream, opts, function (err) {
+        expect(err).to.not.exist
+
+        // HTTP handling
+        expect(res.statusCode).to.equal(206)
+        expect(res.getHeader('content-type')).to.equal('application/test')
+        expect(res.getHeader('content-disposition')).to.equal('inline')
+        expect(res.getHeader('content-range')).to.equal('bytes 0-12/*')
+        expect(res.getHeader('content-length')).to.equal('13')
+
+        // Data/stream handling
+        expect(res._isEndCalled()).to.be.true
+        expect(res._getBuffer().toString()).to.equal('Hello, world!')
+
+        // Notify mocha that we're done.
+        done()
+      })
+
+      // Simulate file stream
+      inStream.emit('open')
+      inStream.put('Hello, world!')
+      inStream.stop()
+    })
+
+    it('should send a small range on request', function (done) {
+      const res = mockHttp.createResponse({})
+      const inStream = new streamBuffers.ReadableStreamBuffer({})
+
+      // End-of-stream callback
+      const opts = {
+        name: 'test.file',
+        type: 'application/test',
+        ranges: {
+          ranges: [[1, 11]], // Cut off first and last letter
+        },
+      }
+      ranges.send(res, inStream, opts, function (err) {
+        expect(err).to.not.exist
+
+        // HTTP handling
+        expect(res.statusCode).to.equal(206)
+        expect(res.getHeader('content-type')).to.equal('application/test')
+        expect(res.getHeader('content-disposition')).to.equal('inline')
+        expect(res.getHeader('content-range')).to.equal('bytes 1-11/*')
+        expect(res.getHeader('content-length')).to.equal('11')
+
+        // Data/stream handling
+        expect(res._isEndCalled()).to.be.true
+        expect(res._getBuffer().toString()).to.equal('ello, world')
+
+        // Notify mocha that we're done.
+        done()
+      })
+
+      // Simulate file stream
+      inStream.emit('open')
+      inStream.put('Hello, world!')
+      inStream.stop()
+    })
+
+    it('should send ranges crossing buffer boundaries', function (done) {
+      const res = mockHttp.createResponse({})
+      const inStream = new streamBuffers.ReadableStreamBuffer({
+        chunkSize: 3, // Setting a chunk size smaller than the range should
+        // not impact the test.
+      })
+
+      // End-of-stream callback
+      const opts = {
+        name: 'test.file',
+        type: 'application/test',
+        ranges: {
+          ranges: [[1, 11]], // Cut off first and last letter
+        },
+      }
+      ranges.send(res, inStream, opts, function (err) {
+        expect(err).to.not.exist
+
+        // HTTP handling
+        expect(res.statusCode).to.equal(206)
+        expect(res.getHeader('content-type')).to.equal('application/test')
+        expect(res.getHeader('content-disposition')).to.equal('inline')
+        expect(res.getHeader('content-range')).to.equal('bytes 1-11/*')
+        expect(res.getHeader('content-length')).to.equal('11')
+
+        // Data/stream handling
+        expect(res._isEndCalled()).to.be.true
+        expect(res._getBuffer().toString()).to.equal('ello, world')
+
+        // Notify mocha that we're done.
+        done()
+      })
+
+      // Simulate file stream
+      inStream.emit('open')
+      inStream.put('Hello, world!')
+      inStream.stop()
+    })
+
+    it('should send multiple ranges', function (done) {
+      const res = mockHttp.createResponse({})
+      const inStream = new streamBuffers.ReadableStreamBuffer({})
+
+      // End-of-stream callback
+      const opts = {
+        name: 'test.file',
+        type: 'application/test',
+        ranges: {
+          ranges: [
+            [1, 3],
+            [5, 7],
+          ], // Slice two ranges out
+        },
+      }
+      ranges.send(res, inStream, opts, function (err) {
+        expect(err).to.not.exist
+
+        // HTTP handling
+        expect(res.statusCode).to.equal(206)
+        expect(res.getHeader('content-type')).to.satisfy(str => str.startsWith('multipart/byteranges'))
+        expect(res.getHeader('content-disposition')).to.equal('inline')
+
+        // Data/stream handling
+        expect(res._isEndCalled()).to.be.true
+
+        // The buffer should contain both ranges, but with all the That would be
+        // "ell" and ", w".
+        // It's pretty elaborate having to parse the entire multipart response
+        // body, so we'll restrict ourselves to finding lines within it.
+        const body = res._getBuffer().toString()
+        expect(body).to.contain('\r\nContent-Range: bytes 1-3/*\r\n')
+        expect(body).to.contain('\r\nell\r\n')
+        expect(body).to.contain('\r\nContent-Range: bytes 5-7/*\r\n')
+        expect(body).to.contain('\r\n, w')
+
+        // Notify mocha that we're done.
+        done()
+      })
+
+      // Simulate file stream
+      inStream.emit('open')
+      inStream.put('Hello, world!')
+      inStream.stop()
+    })
+
+    it('should deal with ranges without end', function (done) {
+      const res = mockHttp.createResponse({})
+      const inStream = new streamBuffers.ReadableStreamBuffer({})
+
+      // End-of-stream callback
+      const opts = {
+        name: 'test.file',
+        type: 'application/test',
+        ranges: {
+          ranges: [[5, undefined]], // Skip the first part, but read until end
+        },
+      }
+      ranges.send(res, inStream, opts, function (err) {
+        expect(err).to.not.exist
+
+        // HTTP handling
+        expect(res.statusCode).to.equal(206)
+        expect(res.getHeader('content-type')).to.equal('application/test')
+        expect(res.getHeader('content-disposition')).to.equal('inline')
+        expect(res.getHeader('content-range')).to.equal('bytes 5-/*')
+
+        // Data/stream handling
+        expect(res._isEndCalled()).to.be.true
+        expect(res._getBuffer().toString()).to.equal(', world!')
+
+        // Notify mocha that we're done.
+        done()
+      })
+
+      // Simulate file stream
+      inStream.emit('open')
+      inStream.put('Hello, world!')
+      inStream.stop()
+    })
+
+    it('should ignore ranges without start', function (done) {
+      const res = mockHttp.createResponse({})
+      const inStream = new streamBuffers.ReadableStreamBuffer({})
+
+      // End-of-stream callback
+      const opts = {
+        name: 'test.file',
+        type: 'application/test',
+        ranges: {
+          ranges: [[undefined, 5]], // Only last five
+        },
+      }
+      ranges.send(res, inStream, opts, function (err) {
+        expect(err).to.not.exist
+
+        // HTTP handling
+        expect(res.statusCode).to.equal(200)
+        expect(res.getHeader('content-type')).to.equal('application/test')
+        expect(res.getHeader('content-disposition')).to.equal('inline')
+
+        // Data/stream handling
+        expect(res._isEndCalled()).to.be.true
+        expect(res._getBuffer().toString()).to.equal('Hello, world!')
+
+        // Notify mocha that we're done.
+        done()
+      })
+
+      // Simulate file stream
+      inStream.emit('open')
+      inStream.put('Hello, world!')
+      inStream.stop()
+    })
+  })
 })

+ 6 - 6
storage-node/packages/util/test/stripEndingSlash.js

@@ -4,10 +4,10 @@ const expect = require('chai').expect
 const stripEndingSlash = require('@joystream/storage-utils/stripEndingSlash')
 
 describe('stripEndingSlash', function () {
-	it('stripEndingSlash should keep URL without the slash', function () {
-		expect(stripEndingSlash('http://keep.one')).to.equal('http://keep.one')
-	})
-	it('stripEndingSlash should remove ending slash', function () {
-		expect(stripEndingSlash('http://strip.one/')).to.equal('http://strip.one')
-	})
+  it('stripEndingSlash should keep URL without the slash', function () {
+    expect(stripEndingSlash('http://keep.one')).to.equal('http://keep.one')
+  })
+  it('stripEndingSlash should remove ending slash', function () {
+    expect(stripEndingSlash('http://strip.one/')).to.equal('http://strip.one')
+  })
 })