Shamil Gadelshin пре 4 година
родитељ
комит
77c8a67d86
45 измењених фајлова са 4601 додато и 4753 уклоњено
  1. 188 181
      storage-node/packages/cli/bin/cli.js
  2. 73 73
      storage-node/packages/cli/bin/dev.js
  3. 1 1
      storage-node/packages/cli/test/index.js
  4. 202 200
      storage-node/packages/colossus/bin/cli.js
  5. 41 43
      storage-node/packages/colossus/lib/app.js
  6. 38 40
      storage-node/packages/colossus/lib/discovery.js
  7. 21 22
      storage-node/packages/colossus/lib/middleware/file_uploads.js
  8. 38 38
      storage-node/packages/colossus/lib/middleware/validate_responses.js
  9. 80 85
      storage-node/packages/colossus/lib/sync.js
  10. 325 335
      storage-node/packages/colossus/paths/asset/v0/{id}.js
  11. 79 81
      storage-node/packages/colossus/paths/discover/v0/{id}.js
  12. 1 1
      storage-node/packages/colossus/test/index.js
  13. 148 152
      storage-node/packages/discovery/discover.js
  14. 29 32
      storage-node/packages/discovery/example.js
  15. 3 4
      storage-node/packages/discovery/index.js
  16. 43 42
      storage-node/packages/discovery/publish.js
  17. 1 1
      storage-node/packages/discovery/test/index.js
  18. 180 163
      storage-node/packages/helios/bin/cli.js
  19. 1 1
      storage-node/packages/helios/test/index.js
  20. 151 151
      storage-node/packages/runtime-api/assets.js
  21. 47 56
      storage-node/packages/runtime-api/balances.js
  22. 54 58
      storage-node/packages/runtime-api/discovery.js
  23. 198 198
      storage-node/packages/runtime-api/identities.js
  24. 257 255
      storage-node/packages/runtime-api/index.js
  25. 26 26
      storage-node/packages/runtime-api/test/assets.js
  26. 27 27
      storage-node/packages/runtime-api/test/balances.js
  27. 60 60
      storage-node/packages/runtime-api/test/identities.js
  28. 9 9
      storage-node/packages/runtime-api/test/index.js
  29. 257 254
      storage-node/packages/runtime-api/workers.js
  30. 82 90
      storage-node/packages/storage/filter.js
  31. 4 4
      storage-node/packages/storage/index.js
  32. 326 347
      storage-node/packages/storage/storage.js
  33. 196 196
      storage-node/packages/storage/test/storage.js
  34. 8 8
      storage-node/packages/util/externalPromise.js
  35. 28 30
      storage-node/packages/util/fs/resolve.js
  36. 108 117
      storage-node/packages/util/fs/walk.js
  37. 80 89
      storage-node/packages/util/lru.js
  38. 122 127
      storage-node/packages/util/pagination.js
  39. 388 424
      storage-node/packages/util/ranges.js
  40. 6 6
      storage-node/packages/util/stripEndingSlash.js
  41. 39 50
      storage-node/packages/util/test/fs/resolve.js
  42. 35 38
      storage-node/packages/util/test/fs/walk.js
  43. 130 141
      storage-node/packages/util/test/lru.js
  44. 96 106
      storage-node/packages/util/test/pagination.js
  45. 375 391
      storage-node/packages/util/test/ranges.js

+ 188 - 181
storage-node/packages/cli/bin/cli.js

@@ -30,10 +30,11 @@ const dev = require('./dev')
 
 // Parse CLI
 const FLAG_DEFINITIONS = {
-  // TODO
+	// TODO
 }
 
-const cli = meow(`
+const cli = meow(
+	`
   Usage:
     $ storage-cli command [arguments..] [key_file] [passphrase]
 
@@ -54,193 +55,199 @@ const cli = meow(`
     dev-init          Setup chain with Alice as lead and storage provider.
     dev-check         Check the chain is setup with Alice as lead and storage provider.
   `,
-  { flags: FLAG_DEFINITIONS })
+	{ flags: FLAG_DEFINITIONS }
+)
 
-function assert_file (name, filename) {
-  assert(filename, `Need a ${name} parameter to proceed!`)
-  assert(fs.statSync(filename).isFile(), `Path "${filename}" is not a file, aborting!`)
+function assert_file(name, filename) {
+	assert(filename, `Need a ${name} parameter to proceed!`)
+	assert(fs.statSync(filename).isFile(), `Path "${filename}" is not a file, aborting!`)
 }
 
-function load_identity (api, filename, passphrase) {
-  if (filename) {
-    assert_file('keyfile', filename)
-    api.identities.loadUnlock(filename, passphrase)
-  } else {
-    debug('Loading Alice as identity')
-    api.identities.useKeyPair(dev.aliceKeyPair(api))
-  }
+function load_identity(api, filename, passphrase) {
+	if (filename) {
+		assert_file('keyfile', filename)
+		api.identities.loadUnlock(filename, passphrase)
+	} else {
+		debug('Loading Alice as identity')
+		api.identities.useKeyPair(dev.aliceKeyPair(api))
+	}
 }
 
 const commands = {
-  // add Alice well known account as storage provider
-  'dev-init': async (api) => {
-    // dev accounts are automatically loaded, no need to add explicitly to keyring
-    // load_identity(api)
-    let dev = require('./dev')
-    return dev.init(api)
-  },
-  // Checks that the setup done by dev-init command was successful.
-  'dev-check': async (api) => {
-    // dev accounts are automatically loaded, no need to add explicitly to keyring
-    // load_identity(api)
-    let dev = require('./dev')
-    return dev.check(api)
-  },
-  // The upload method is not correctly implemented
-  // needs to get the liaison after creating a data object,
-  // resolve the ipns id to the asset put api url of the storage-node
-  // before uploading..
-  'upload': async (api, url, filename, do_type_id, keyfile, passphrase) => {
-    load_identity(keyfile, passphrase)
-    // Check parameters
-    assert_file('file', filename)
-
-    const size = fs.statSync(filename).size
-    debug(`File "${filename}" is ${chalk.green(size)} Bytes.`)
-
-    if (!do_type_id) {
-      do_type_id = 1
-    }
-
-    debug('Data Object Type ID is: ' + chalk.green(do_type_id))
-
-    // Generate content ID
-    // FIXME this require path is like this because of
-    // https://github.com/Joystream/apps/issues/207
-    const { ContentId } = require('@joystream/types/media')
-    var cid = ContentId.generate()
-    cid = cid.encode().toString()
-    debug('Generated content ID: ' + chalk.green(cid))
-
-    // Create Data Object
-    const data_object = await api.assets.createDataObject(
-      api.identities.key.address, cid, do_type_id, size)
-    debug('Data object created.')
-
-    // TODO in future, optionally contact liaison here?
-    const request = require('request')
-    url = `${url}asset/v0/${cid}`
-    debug('Uploading to URL', chalk.green(url))
-
-    const f = fs.createReadStream(filename)
-    const opts = {
-      url: url,
-      headers: {
-        'content-type': '',
-        'content-length': `${size}`
-      },
-      json: true
-    }
-    return new Promise((resolve, reject) => {
-      const r = request.put(opts, (error, response, body) => {
-        if (error) {
-          reject(error)
-          return
-        }
-
-        if (response.statusCode / 100 !== 2) {
-          reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
-          return
-        }
-        debug('Upload successful:', body.message)
-        resolve()
-      })
-      f.pipe(r)
-    })
-  },
-  // needs to be updated to take a content id and resolve it a potential set
-  // of providers that has it, and select one (possibly try more than one provider)
-  // to fetch it from the get api url of a provider..
-  'download': async (api, url, content_id, filename) => {
-    const request = require('request')
-    url = `${url}asset/v0/${content_id}`
-    debug('Downloading URL', chalk.green(url), 'to', chalk.green(filename))
-
-    const f = fs.createWriteStream(filename)
-    const opts = {
-      url: url,
-      json: true
-    }
-    return new Promise((resolve, reject) => {
-      const r = request.get(opts, (error, response, body) => {
-        if (error) {
-          reject(error)
-          return
-        }
-
-        debug('Downloading', chalk.green(response.headers['content-type']), 'of size', chalk.green(response.headers['content-length']), '...')
-
-        f.on('error', (err) => {
-          reject(err)
-        })
-
-        f.on('finish', () => {
-          if (response.statusCode / 100 !== 2) {
-            reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
-            return
-          }
-          debug('Download completed.')
-          resolve()
-        })
-      })
-      r.pipe(f)
-    })
-  },
-  // similar to 'download' function
-  'head': async (api, url, content_id) => {
-    const request = require('request')
-    url = `${url}asset/v0/${content_id}`
-    debug('Checking URL', chalk.green(url), '...')
-
-    const opts = {
-      url: url,
-      json: true
-    }
-    return new Promise((resolve, reject) => {
-      const r = request.head(opts, (error, response, body) => {
-        if (error) {
-          reject(error)
-          return
-        }
-
-        if (response.statusCode / 100 !== 2) {
-          reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
-          return
-        }
-
-        for (var propname in response.headers) {
-          debug(`  ${chalk.yellow(propname)}: ${response.headers[propname]}`)
-        }
-
-        resolve()
-      })
-    })
-  }
+	// add Alice well known account as storage provider
+	'dev-init': async (api) => {
+		// dev accounts are automatically loaded, no need to add explicitly to keyring
+		// load_identity(api)
+		const dev = require('./dev')
+		return dev.init(api)
+	},
+	// Checks that the setup done by dev-init command was successful.
+	'dev-check': async (api) => {
+		// dev accounts are automatically loaded, no need to add explicitly to keyring
+		// load_identity(api)
+		const dev = require('./dev')
+		return dev.check(api)
+	},
+	// The upload method is not correctly implemented
+	// needs to get the liaison after creating a data object,
+	// resolve the ipns id to the asset put api url of the storage-node
+	// before uploading..
+	upload: async (api, url, filename, do_type_id, keyfile, passphrase) => {
+		load_identity(keyfile, passphrase)
+		// Check parameters
+		assert_file('file', filename)
+
+		const size = fs.statSync(filename).size
+		debug(`File "${filename}" is ${chalk.green(size)} Bytes.`)
+
+		if (!do_type_id) {
+			do_type_id = 1
+		}
+
+		debug('Data Object Type ID is: ' + chalk.green(do_type_id))
+
+		// Generate content ID
+		// FIXME this require path is like this because of
+		// https://github.com/Joystream/apps/issues/207
+		const { ContentId } = require('@joystream/types/media')
+		let cid = ContentId.generate()
+		cid = cid.encode().toString()
+		debug('Generated content ID: ' + chalk.green(cid))
+
+		// Create Data Object
+		const data_object = await api.assets.createDataObject(api.identities.key.address, cid, do_type_id, size)
+		debug('Data object created.')
+
+		// TODO in future, optionally contact liaison here?
+		const request = require('request')
+		url = `${url}asset/v0/${cid}`
+		debug('Uploading to URL', chalk.green(url))
+
+		const f = fs.createReadStream(filename)
+		const opts = {
+			url,
+			headers: {
+				'content-type': '',
+				'content-length': `${size}`,
+			},
+			json: true,
+		}
+		return new Promise((resolve, reject) => {
+			const r = request.put(opts, (error, response, body) => {
+				if (error) {
+					reject(error)
+					return
+				}
+
+				if (response.statusCode / 100 !== 2) {
+					reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
+					return
+				}
+				debug('Upload successful:', body.message)
+				resolve()
+			})
+			f.pipe(r)
+		})
+	},
+	// needs to be updated to take a content id and resolve it a potential set
+	// of providers that has it, and select one (possibly try more than one provider)
+	// to fetch it from the get api url of a provider..
+	download: async (api, url, content_id, filename) => {
+		const request = require('request')
+		url = `${url}asset/v0/${content_id}`
+		debug('Downloading URL', chalk.green(url), 'to', chalk.green(filename))
+
+		const f = fs.createWriteStream(filename)
+		const opts = {
+			url,
+			json: true,
+		}
+		return new Promise((resolve, reject) => {
+			const r = request.get(opts, (error, response, body) => {
+				if (error) {
+					reject(error)
+					return
+				}
+
+				debug(
+					'Downloading',
+					chalk.green(response.headers['content-type']),
+					'of size',
+					chalk.green(response.headers['content-length']),
+					'...'
+				)
+
+				f.on('error', (err) => {
+					reject(err)
+				})
+
+				f.on('finish', () => {
+					if (response.statusCode / 100 !== 2) {
+						reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
+						return
+					}
+					debug('Download completed.')
+					resolve()
+				})
+			})
+			r.pipe(f)
+		})
+	},
+	// similar to 'download' function
+	head: async (api, url, content_id) => {
+		const request = require('request')
+		url = `${url}asset/v0/${content_id}`
+		debug('Checking URL', chalk.green(url), '...')
+
+		const opts = {
+			url,
+			json: true,
+		}
+		return new Promise((resolve, reject) => {
+			const r = request.head(opts, (error, response, body) => {
+				if (error) {
+					reject(error)
+					return
+				}
+
+				if (response.statusCode / 100 !== 2) {
+					reject(new Error(`${response.statusCode}: ${body.message || 'unknown reason'}`))
+					return
+				}
+
+				for (const propname in response.headers) {
+					debug(`  ${chalk.yellow(propname)}: ${response.headers[propname]}`)
+				}
+
+				resolve()
+			})
+		})
+	},
 }
 
-async function main () {
-  const api = await RuntimeApi.create()
-
-  // Simple CLI commands
-  const command = cli.input[0]
-  if (!command) {
-    throw new Error('Need a command to run!')
-  }
-
-  if (commands.hasOwnProperty(command)) {
-    // Command recognized
-    const args = _.clone(cli.input).slice(1)
-    await commands[command](api, ...args)
-  } else {
-    throw new Error(`Command "${command}" not recognized, aborting!`)
-  }
+async function main() {
+	const api = await RuntimeApi.create()
+
+	// Simple CLI commands
+	const command = cli.input[0]
+	if (!command) {
+		throw new Error('Need a command to run!')
+	}
+
+	if (commands.hasOwnProperty(command)) {
+		// Command recognized
+		const args = _.clone(cli.input).slice(1)
+		await commands[command](api, ...args)
+	} else {
+		throw new Error(`Command "${command}" not recognized, aborting!`)
+	}
 }
 
 main()
-  .then(() => {
-    process.exit(0)
-  })
-  .catch((err) => {
-    console.error(chalk.red(err.stack))
-    process.exit(-1)
-  })
+	.then(() => {
+		process.exit(0)
+	})
+	.catch((err) => {
+		console.error(chalk.red(err.stack))
+		process.exit(-1)
+	})

+ 73 - 73
storage-node/packages/cli/bin/dev.js

@@ -10,119 +10,119 @@ const assert = require('assert')
 const ALICE_URI = '//Alice'
 const ROLE_ACCOUNT_URI = '//Colossus'
 
-function aliceKeyPair (api) {
-  return api.identities.keyring.addFromUri(ALICE_URI, null, 'sr25519')
+function aliceKeyPair(api) {
+	return api.identities.keyring.addFromUri(ALICE_URI, null, 'sr25519')
 }
 
-function roleKeyPair (api) {
-  return api.identities.keyring.addFromUri(ROLE_ACCOUNT_URI, null, 'sr25519')
+function roleKeyPair(api) {
+	return api.identities.keyring.addFromUri(ROLE_ACCOUNT_URI, null, 'sr25519')
 }
 
-function developmentPort () {
-  return 3001
+function developmentPort() {
+	return 3001
 }
 
 const check = async (api) => {
-  const roleAccountId = roleKeyPair(api).address
-  const providerId = await api.workers.findProviderIdByRoleAccount(roleAccountId)
+	const roleAccountId = roleKeyPair(api).address
+	const providerId = await api.workers.findProviderIdByRoleAccount(roleAccountId)
 
-  if (providerId === null) {
-    throw new Error('Dev storage provider not found on chain!')
-  }
+	if (providerId === null) {
+		throw new Error('Dev storage provider not found on chain!')
+	}
 
-  console.log(`
+	console.log(`
   Chain is setup with Dev storage provider:
     providerId = ${providerId}
     roleAccountId = ${roleAccountId}
     roleKey = ${ROLE_ACCOUNT_URI}
   `)
 
-  return providerId
+	return providerId
 }
 
 // Setup Alice account on a developement chain as
 // a member, storage lead, and a storage provider using a deterministic
 // development key for the role account
 const init = async (api) => {
-  try {
-    await check(api)
-    return
-  } catch (err) {
-    // We didn't find a storage provider with expected role account
-  }
+	try {
+		await check(api)
+		return
+	} catch (err) {
+		// We didn't find a storage provider with expected role account
+	}
 
-  const alice = aliceKeyPair(api).address
-  const roleAccount = roleKeyPair(api).address
+	const alice = aliceKeyPair(api).address
+	const roleAccount = roleKeyPair(api).address
 
-  debug(`Ensuring Alice is sudo`)
+	debug(`Ensuring Alice is sudo`)
 
-  // make sure alice is sudo - indirectly checking this is a dev chain
-  const sudo = await api.identities.getSudoAccount()
+	// make sure alice is sudo - indirectly checking this is a dev chain
+	const sudo = await api.identities.getSudoAccount()
 
-  if (!sudo.eq(alice)) {
-    throw new Error('Setup requires Alice to be sudo. Are you sure you are running a devchain?')
-  }
+	if (!sudo.eq(alice)) {
+		throw new Error('Setup requires Alice to be sudo. Are you sure you are running a devchain?')
+	}
 
-  console.log('Running setup')
+	console.log('Running setup')
 
-  // set localhost colossus as discovery provider
-  // assuming pioneer dev server is running on port 3000 we should run
-  // the storage dev server on a different port than the default for colossus which is also
-  // 3000
-  debug('Setting Local development node as bootstrap endpoint')
-  await api.discovery.setBootstrapEndpoints(alice, [`http://localhost:${developmentPort()}/`])
+	// set localhost colossus as discovery provider
+	// assuming pioneer dev server is running on port 3000 we should run
+	// the storage dev server on a different port than the default for colossus which is also
+	// 3000
+	debug('Setting Local development node as bootstrap endpoint')
+	await api.discovery.setBootstrapEndpoints(alice, [`http://localhost:${developmentPort()}/`])
 
-  debug('Transferring tokens to storage role account')
-  // Give role account some tokens to work with
-  api.balances.transfer(alice, roleAccount, 100000)
+	debug('Transferring tokens to storage role account')
+	// Give role account some tokens to work with
+	api.balances.transfer(alice, roleAccount, 100000)
 
-  debug('Ensuring Alice is as member..')
-  let aliceMemberId = await api.identities.firstMemberIdOf(alice)
+	debug('Ensuring Alice is as member..')
+	let aliceMemberId = await api.identities.firstMemberIdOf(alice)
 
-  if (aliceMemberId === undefined) {
-    debug('Registering Alice as member..')
-    aliceMemberId = await api.identities.registerMember(alice, {
-      handle: 'alice'
-    })
-  } else {
-    debug('Alice is already a member')
-  }
+	if (aliceMemberId === undefined) {
+		debug('Registering Alice as member..')
+		aliceMemberId = await api.identities.registerMember(alice, {
+			handle: 'alice',
+		})
+	} else {
+		debug('Alice is already a member')
+	}
 
-  // Make alice the storage lead
-  debug('Making Alice the storage Lead')
-  const leadOpeningId = await api.workers.dev_addStorageLeadOpening()
-  const leadApplicationId = await api.workers.dev_applyOnOpening(leadOpeningId, aliceMemberId, alice, alice)
-  api.workers.dev_beginLeadOpeningReview(leadOpeningId)
-  await api.workers.dev_fillLeadOpening(leadOpeningId, leadApplicationId)
+	// Make alice the storage lead
+	debug('Making Alice the storage Lead')
+	const leadOpeningId = await api.workers.dev_addStorageLeadOpening()
+	const leadApplicationId = await api.workers.dev_applyOnOpening(leadOpeningId, aliceMemberId, alice, alice)
+	api.workers.dev_beginLeadOpeningReview(leadOpeningId)
+	await api.workers.dev_fillLeadOpening(leadOpeningId, leadApplicationId)
 
-  const leadAccount = await api.workers.getLeadRoleAccount()
-  if (!leadAccount.eq(alice)) {
-    throw new Error('Setting alice as lead failed')
-  }
+	const leadAccount = await api.workers.getLeadRoleAccount()
+	if (!leadAccount.eq(alice)) {
+		throw new Error('Setting alice as lead failed')
+	}
 
-  // Create a storage openinging, apply, start review, and fill opening
-  debug(`Making ${ROLE_ACCOUNT_URI} account a storage provider`)
+	// Create a storage openinging, apply, start review, and fill opening
+	debug(`Making ${ROLE_ACCOUNT_URI} account a storage provider`)
 
-  const openingId = await api.workers.dev_addStorageOpening()
-  debug(`created new storage opening: ${openingId}`)
+	const openingId = await api.workers.dev_addStorageOpening()
+	debug(`created new storage opening: ${openingId}`)
 
-  const applicationId = await api.workers.dev_applyOnOpening(openingId, aliceMemberId, alice, roleAccount)
-  debug(`applied with application id: ${applicationId}`)
+	const applicationId = await api.workers.dev_applyOnOpening(openingId, aliceMemberId, alice, roleAccount)
+	debug(`applied with application id: ${applicationId}`)
 
-  api.workers.dev_beginStorageOpeningReview(openingId)
+	api.workers.dev_beginStorageOpeningReview(openingId)
 
-  debug(`filling storage opening`)
-  const providerId = await api.workers.dev_fillStorageOpening(openingId, applicationId)
+	debug(`filling storage opening`)
+	const providerId = await api.workers.dev_fillStorageOpening(openingId, applicationId)
 
-  debug(`Assigned storage provider id: ${providerId}`)
+	debug(`Assigned storage provider id: ${providerId}`)
 
-  return check(api)
+	return check(api)
 }
 
 module.exports = {
-  init,
-  check,
-  aliceKeyPair,
-  roleKeyPair,
-  developmentPort
+	init,
+	check,
+	aliceKeyPair,
+	roleKeyPair,
+	developmentPort,
 }

+ 1 - 1
storage-node/packages/cli/test/index.js

@@ -1 +1 @@
-// Add Tests!
+// Add Tests!

+ 202 - 200
storage-node/packages/colossus/bin/cli.js

@@ -22,41 +22,42 @@ const SYNC_PERIOD_MS = 300000 // 5min
 
 // Parse CLI
 const FLAG_DEFINITIONS = {
-  port: {
-    type: 'number',
-    alias: 'p',
-    default: 3000
-  },
-  keyFile: {
-    type: 'string',
-    isRequired: (flags, input) => {
-      return !flags.dev
-    }
-  },
-  publicUrl: {
-    type: 'string',
-    alias: 'u',
-    isRequired: (flags, input) => {
-      return !flags.dev
-    }
-  },
-  passphrase: {
-    type: 'string'
-  },
-  wsProvider: {
-    type: 'string',
-    default: 'ws://localhost:9944'
-  },
-  providerId: {
-    type: 'number',
-    alias: 'i',
-    isRequired: (flags, input) => {
-      return !flags.dev
-    }
-  }
+	port: {
+		type: 'number',
+		alias: 'p',
+		default: 3000,
+	},
+	keyFile: {
+		type: 'string',
+		isRequired: (flags, input) => {
+			return !flags.dev
+		},
+	},
+	publicUrl: {
+		type: 'string',
+		alias: 'u',
+		isRequired: (flags, input) => {
+			return !flags.dev
+		},
+	},
+	passphrase: {
+		type: 'string',
+	},
+	wsProvider: {
+		type: 'string',
+		default: 'ws://localhost:9944',
+	},
+	providerId: {
+		type: 'number',
+		alias: 'i',
+		isRequired: (flags, input) => {
+			return !flags.dev
+		},
+	},
 }
 
-const cli = meow(`
+const cli = meow(
+	`
   Usage:
     $ colossus [command] [arguments]
 
@@ -76,228 +77,229 @@ const cli = meow(`
     --port=PORT, -p PORT    Port number to listen on, defaults to 3000.
     --ws-provider WS_URL    Joystream-node websocket provider, defaults to ws://localhost:9944
   `,
-  { flags: FLAG_DEFINITIONS })
+	{ flags: FLAG_DEFINITIONS }
+)
 
 // All-important banner!
-function banner () {
-  console.log(chalk.blue(figlet.textSync('joystream', 'Speed')))
+function banner() {
+	console.log(chalk.blue(figlet.textSync('joystream', 'Speed')))
 }
 
 function start_express_app(app, port) {
-  const http = require('http')
-  const server = http.createServer(app)
-
-  return new Promise((resolve, reject) => {
-    server.on('error', reject)
-    server.on('close', (...args) => {
-      console.log('Server closed, shutting down...')
-      resolve(...args)
-    })
-    server.on('listening', () => {
-      console.log('API server started.', server.address())
-    })
-    server.listen(port, '::')
-    console.log('Starting API server...')
-  })
+	const http = require('http')
+	const server = http.createServer(app)
+
+	return new Promise((resolve, reject) => {
+		server.on('error', reject)
+		server.on('close', (...args) => {
+			console.log('Server closed, shutting down...')
+			resolve(...args)
+		})
+		server.on('listening', () => {
+			console.log('API server started.', server.address())
+		})
+		server.listen(port, '::')
+		console.log('Starting API server...')
+	})
 }
 
 // Start app
-function start_all_services ({ store, api, port }) {
-  const app = require('../lib/app')(PROJECT_ROOT, store, api) // reduce falgs to only needed values
-  return start_express_app(app, port)
+function start_all_services({ store, api, port }) {
+	const app = require('../lib/app')(PROJECT_ROOT, store, api) // reduce falgs to only needed values
+	return start_express_app(app, port)
 }
 
 // Start discovery service app only
-function start_discovery_service ({ api, port }) {
-  const app = require('../lib/discovery')(PROJECT_ROOT, api) // reduce flags to only needed values
-  return start_express_app(app, port)
+function start_discovery_service({ api, port }) {
+	const app = require('../lib/discovery')(PROJECT_ROOT, api) // reduce flags to only needed values
+	return start_express_app(app, port)
 }
 
 // Get an initialized storage instance
-function get_storage (runtime_api) {
-  // TODO at some point, we can figure out what backend-specific connection
-  // options make sense. For now, just don't use any configuration.
-  const { Storage } = require('@joystream/storage-node-backend')
-
-  const options = {
-    resolve_content_id: async (content_id) => {
-      // Resolve via API
-      const obj = await runtime_api.assets.getDataObject(content_id)
-      if (!obj || obj.isNone) {
-        return
-      }
-      // if obj.liaison_judgement !== Accepted .. throw ?
-      return obj.unwrap().ipfs_content_id.toString()
-    }
-  }
-
-  return Storage.create(options)
+function get_storage(runtime_api) {
+	// TODO at some point, we can figure out what backend-specific connection
+	// options make sense. For now, just don't use any configuration.
+	const { Storage } = require('@joystream/storage-node-backend')
+
+	const options = {
+		resolve_content_id: async (content_id) => {
+			// Resolve via API
+			const obj = await runtime_api.assets.getDataObject(content_id)
+			if (!obj || obj.isNone) {
+				return
+			}
+			// if obj.liaison_judgement !== Accepted .. throw ?
+			return obj.unwrap().ipfs_content_id.toString()
+		},
+	}
+
+	return Storage.create(options)
 }
 
-async function init_api_production ({ wsProvider, providerId, keyFile, passphrase }) {
-  // Load key information
-  const { RuntimeApi } = require('@joystream/storage-runtime-api')
+async function init_api_production({ wsProvider, providerId, keyFile, passphrase }) {
+	// Load key information
+	const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
-  if (!keyFile) {
-    throw new Error('Must specify a --key-file argument for running a storage node.')
-  }
+	if (!keyFile) {
+		throw new Error('Must specify a --key-file argument for running a storage node.')
+	}
 
-  if (providerId === undefined) {
-    throw new Error('Must specify a --provider-id argument for running a storage node')
-  }
+	if (providerId === undefined) {
+		throw new Error('Must specify a --provider-id argument for running a storage node')
+	}
 
-  const api = await RuntimeApi.create({
-    account_file: keyFile,
-    passphrase,
-    provider_url: wsProvider,
-    storageProviderId: providerId
-  })
+	const api = await RuntimeApi.create({
+		account_file: keyFile,
+		passphrase,
+		provider_url: wsProvider,
+		storageProviderId: providerId,
+	})
 
-  if (!api.identities.key) {
-    throw new Error('Failed to unlock storage provider account')
-  }
+	if (!api.identities.key) {
+		throw new Error('Failed to unlock storage provider account')
+	}
 
-  if (!await api.workers.isRoleAccountOfStorageProvider(api.storageProviderId, api.identities.key.address)) {
-    throw new Error('storage provider role account and storageProviderId are not associated with a worker')
-  }
+	if (!(await api.workers.isRoleAccountOfStorageProvider(api.storageProviderId, api.identities.key.address))) {
+		throw new Error('storage provider role account and storageProviderId are not associated with a worker')
+	}
 
-  return api
+	return api
 }
 
-async function init_api_development () {
-  // Load key information
-  const { RuntimeApi } = require('@joystream/storage-runtime-api')
+async function init_api_development() {
+	// Load key information
+	const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
-  const wsProvider = 'ws://localhost:9944'
+	const wsProvider = 'ws://localhost:9944'
 
-  const api = await RuntimeApi.create({
-    provider_url: wsProvider
-  })
+	const api = await RuntimeApi.create({
+		provider_url: wsProvider,
+	})
 
-  const dev = require('../../cli/bin/dev')
+	const dev = require('../../cli/bin/dev')
 
-  api.identities.useKeyPair(dev.roleKeyPair(api))
+	api.identities.useKeyPair(dev.roleKeyPair(api))
 
-  api.storageProviderId = await dev.check(api)
+	api.storageProviderId = await dev.check(api)
 
-  return api
+	return api
 }
 
-function get_service_information (publicUrl) {
-  // For now assume we run all services on the same endpoint
-  return({
-    asset: {
-      version: 1, // spec version
-      endpoint: publicUrl
-    },
-    discover: {
-      version: 1, // spec version
-      endpoint: publicUrl
-    }
-  })
+function get_service_information(publicUrl) {
+	// For now assume we run all services on the same endpoint
+	return {
+		asset: {
+			version: 1, // spec version
+			endpoint: publicUrl,
+		},
+		discover: {
+			version: 1, // spec version
+			endpoint: publicUrl,
+		},
+	}
 }
 
-async function announce_public_url (api, publicUrl) {
-  // re-announce in future
-  const reannounce = function (timeoutMs) {
-    setTimeout(announce_public_url, timeoutMs, api, publicUrl)
-  }
+async function announce_public_url(api, publicUrl) {
+	// re-announce in future
+	const reannounce = function (timeoutMs) {
+		setTimeout(announce_public_url, timeoutMs, api, publicUrl)
+	}
 
-  debug('announcing public url')
-  const { publish } = require('@joystream/service-discovery')
+	debug('announcing public url')
+	const { publish } = require('@joystream/service-discovery')
 
-  try {
-    const serviceInformation = get_service_information(publicUrl)
+	try {
+		const serviceInformation = get_service_information(publicUrl)
 
-    let keyId = await publish.publish(serviceInformation)
+		const keyId = await publish.publish(serviceInformation)
 
-    await api.discovery.setAccountInfo(keyId)
+		await api.discovery.setAccountInfo(keyId)
 
-    debug('publishing complete, scheduling next update')
+		debug('publishing complete, scheduling next update')
 
-// >> sometimes after tx is finalized.. we are not reaching here!
+		// >> sometimes after tx is finalized.. we are not reaching here!
 
-    // Reannounce before expiery. Here we are concerned primarily
-    // with keeping the account information refreshed and 'available' in
-    // the ipfs network. our record on chain is valid for 24hr
-    reannounce(50 * 60 * 1000) // in 50 minutes
-  } catch (err) {
-    debug(`announcing public url failed: ${err.stack}`)
+		// Reannounce before expiery. Here we are concerned primarily
+		// with keeping the account information refreshed and 'available' in
+		// the ipfs network. our record on chain is valid for 24hr
+		reannounce(50 * 60 * 1000) // in 50 minutes
+	} catch (err) {
+		debug(`announcing public url failed: ${err.stack}`)
 
-    // On failure retry sooner
-    debug(`announcing failed, retrying in: 2 minutes`)
-    reannounce(120 * 1000)
-  }
+		// On failure retry sooner
+		debug(`announcing failed, retrying in: 2 minutes`)
+		reannounce(120 * 1000)
+	}
 }
 
-function go_offline (api) {
-  return api.discovery.unsetAccountInfo()
+function go_offline(api) {
+	return api.discovery.unsetAccountInfo()
 }
 
 // Simple CLI commands
-var command = cli.input[0]
+let command = cli.input[0]
 if (!command) {
-  command = 'server'
+	command = 'server'
 }
 
-async function start_colossus ({ api, publicUrl, port, flags }) {
-  // TODO: check valid url, and valid port number
-  const store = get_storage(api)
-  banner()
-  const { start_syncing } = require('../lib/sync')
-  start_syncing(api, { syncPeriod: SYNC_PERIOD_MS }, store)
-  announce_public_url(api, publicUrl)
-  return start_all_services({ store, api, port, flags }) // dont pass all flags only required values
+async function start_colossus({ api, publicUrl, port, flags }) {
+	// TODO: check valid url, and valid port number
+	const store = get_storage(api)
+	banner()
+	const { start_syncing } = require('../lib/sync')
+	start_syncing(api, { syncPeriod: SYNC_PERIOD_MS }, store)
+	announce_public_url(api, publicUrl)
+	return start_all_services({ store, api, port, flags }) // dont pass all flags only required values
 }
 
 const commands = {
-  'server': async () => {
-    let publicUrl, port, api
-
-    if (cli.flags.dev) {
-      const dev = require('../../cli/bin/dev')
-      api = await init_api_development()
-      port = dev.developmentPort()
-      publicUrl = `http://localhost:${port}/`
-    } else {
-      api = await init_api_production(cli.flags)
-      publicUrl = cli.flags.publicUrl
-      port = cli.flags.port
-    }
-
-    return start_colossus({ api, publicUrl, port })
-  },
-  'discovery': async () => {
-    debug('Starting Joystream Discovery Service')
-    const { RuntimeApi } = require('@joystream/storage-runtime-api')
-    const wsProvider = cli.flags.wsProvider
-    const api = await RuntimeApi.create({ provider_url: wsProvider })
-    const port = cli.flags.port
-    await start_discovery_service({ api, port })
-  }
+	server: async () => {
+		let publicUrl, port, api
+
+		if (cli.flags.dev) {
+			const dev = require('../../cli/bin/dev')
+			api = await init_api_development()
+			port = dev.developmentPort()
+			publicUrl = `http://localhost:${port}/`
+		} else {
+			api = await init_api_production(cli.flags)
+			publicUrl = cli.flags.publicUrl
+			port = cli.flags.port
+		}
+
+		return start_colossus({ api, publicUrl, port })
+	},
+	discovery: async () => {
+		debug('Starting Joystream Discovery Service')
+		const { RuntimeApi } = require('@joystream/storage-runtime-api')
+		const wsProvider = cli.flags.wsProvider
+		const api = await RuntimeApi.create({ provider_url: wsProvider })
+		const port = cli.flags.port
+		await start_discovery_service({ api, port })
+	},
 }
 
-async function main () {
-  // Simple CLI commands
-  var command = cli.input[0]
-  if (!command) {
-    command = 'server'
-  }
-
-  if (commands.hasOwnProperty(command)) {
-    // Command recognized
-    const args = _.clone(cli.input).slice(1)
-    await commands[command](...args)
-  } else {
-    throw new Error(`Command '${command}' not recognized, aborting!`)
-  }
+async function main() {
+	// Simple CLI commands
+	let command = cli.input[0]
+	if (!command) {
+		command = 'server'
+	}
+
+	if (commands.hasOwnProperty(command)) {
+		// Command recognized
+		const args = _.clone(cli.input).slice(1)
+		await commands[command](...args)
+	} else {
+		throw new Error(`Command '${command}' not recognized, aborting!`)
+	}
 }
 
 main()
-  .then(() => {
-    process.exit(0)
-  })
-  .catch((err) => {
-    console.error(chalk.red(err.stack))
-    process.exit(-1)
-  })
+	.then(() => {
+		process.exit(0)
+	})
+	.catch((err) => {
+		console.error(chalk.red(err.stack))
+		process.exit(-1)
+	})

+ 41 - 43
storage-node/packages/colossus/lib/app.js

@@ -16,61 +16,59 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
 // Node requires
-const fs = require('fs');
-const path = require('path');
+const fs = require('fs')
+const path = require('path')
 
 // npm requires
-const express = require('express');
-const openapi = require('express-openapi');
-const bodyParser = require('body-parser');
-const cors = require('cors');
-const yaml = require('js-yaml');
+const express = require('express')
+const openapi = require('express-openapi')
+const bodyParser = require('body-parser')
+const cors = require('cors')
+const yaml = require('js-yaml')
 
 // Project requires
-const validateResponses = require('./middleware/validate_responses');
-const fileUploads = require('./middleware/file_uploads');
-const pagination = require('@joystream/storage-utils/pagination');
+const validateResponses = require('./middleware/validate_responses')
+const fileUploads = require('./middleware/file_uploads')
+const pagination = require('@joystream/storage-utils/pagination')
 
 // Configure app
-function create_app(project_root, storage, runtime)
-{
-  const app = express();
-  app.use(cors());
-  app.use(bodyParser.json());
-  // FIXME app.use(bodyParser.urlencoded({ extended: true }));
+function create_app(project_root, storage, runtime) {
+	const app = express()
+	app.use(cors())
+	app.use(bodyParser.json())
+	// FIXME app.use(bodyParser.urlencoded({ extended: true }));
 
-  // Load & extend/configure API docs
-  var api = yaml.safeLoad(fs.readFileSync(
-    path.resolve(project_root, 'api-base.yml')));
-  api['x-express-openapi-additional-middleware'] = [validateResponses];
-  api['x-express-openapi-validation-strict'] = true;
+	// Load & extend/configure API docs
+	let api = yaml.safeLoad(fs.readFileSync(path.resolve(project_root, 'api-base.yml')))
+	api['x-express-openapi-additional-middleware'] = [validateResponses]
+	api['x-express-openapi-validation-strict'] = true
 
-  api = pagination.openapi(api);
+	api = pagination.openapi(api)
 
-  openapi.initialize({
-    apiDoc: api,
-    app: app,
-    paths: path.resolve(project_root, 'paths'),
-    docsPath: '/swagger.json',
-    consumesMiddleware: {
-      'multipart/form-data': fileUploads
-    },
-    dependencies: {
-      storage: storage,
-      runtime: runtime,
-    },
-  });
+	openapi.initialize({
+		apiDoc: api,
+		app,
+		paths: path.resolve(project_root, 'paths'),
+		docsPath: '/swagger.json',
+		consumesMiddleware: {
+			'multipart/form-data': fileUploads,
+		},
+		dependencies: {
+			storage,
+			runtime,
+		},
+	})
 
-  // If no other handler gets triggered (errors), respond with the
-  // error serialized to JSON.
-  app.use(function(err, req, res, next) {
-    res.status(err.status).json(err);
-  });
+	// If no other handler gets triggered (errors), respond with the
+	// error serialized to JSON.
+	app.use(function (err, req, res, next) {
+		res.status(err.status).json(err)
+	})
 
-  return app;
+	return app
 }
 
-module.exports = create_app;
+module.exports = create_app

+ 38 - 40
storage-node/packages/colossus/lib/discovery.js

@@ -16,57 +16,55 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
 // npm requires
-const express = require('express');
-const openapi = require('express-openapi');
-const bodyParser = require('body-parser');
-const cors = require('cors');
-const yaml = require('js-yaml');
+const express = require('express')
+const openapi = require('express-openapi')
+const bodyParser = require('body-parser')
+const cors = require('cors')
+const yaml = require('js-yaml')
 
 // Node requires
-const fs = require('fs');
-const path = require('path');
+const fs = require('fs')
+const path = require('path')
 
 // Project requires
-const validateResponses = require('./middleware/validate_responses');
+const validateResponses = require('./middleware/validate_responses')
 
 // Configure app
-function create_app(project_root, runtime)
-{
-  const app = express();
-  app.use(cors());
-  app.use(bodyParser.json());
-  // FIXME app.use(bodyParser.urlencoded({ extended: true }));
+function create_app(project_root, runtime) {
+	const app = express()
+	app.use(cors())
+	app.use(bodyParser.json())
+	// FIXME app.use(bodyParser.urlencoded({ extended: true }));
 
-  // Load & extend/configure API docs
-  var api = yaml.safeLoad(fs.readFileSync(
-    path.resolve(project_root, 'api-base.yml')));
-  api['x-express-openapi-additional-middleware'] = [validateResponses];
-  api['x-express-openapi-validation-strict'] = true;
+	// Load & extend/configure API docs
+	const api = yaml.safeLoad(fs.readFileSync(path.resolve(project_root, 'api-base.yml')))
+	api['x-express-openapi-additional-middleware'] = [validateResponses]
+	api['x-express-openapi-validation-strict'] = true
 
-  openapi.initialize({
-    apiDoc: api,
-    app: app,
-    //paths: path.resolve(project_root, 'discovery_app_paths'),
-    paths: {
-      path: '/discover/v0/{id}',
-      module: require('../paths/discover/v0/{id}')
-    },
-    docsPath: '/swagger.json',
-    dependencies: {
-      runtime: runtime,
-    },
-  });
+	openapi.initialize({
+		apiDoc: api,
+		app,
+		// paths: path.resolve(project_root, 'discovery_app_paths'),
+		paths: {
+			path: '/discover/v0/{id}',
+			module: require('../paths/discover/v0/{id}'),
+		},
+		docsPath: '/swagger.json',
+		dependencies: {
+			runtime,
+		},
+	})
 
-  // If no other handler gets triggered (errors), respond with the
-  // error serialized to JSON.
-  app.use(function(err, req, res, next) {
-    res.status(err.status).json(err);
-  });
+	// If no other handler gets triggered (errors), respond with the
+	// error serialized to JSON.
+	app.use(function (err, req, res, next) {
+		res.status(err.status).json(err)
+	})
 
-  return app;
+	return app
 }
 
-module.exports = create_app;
+module.exports = create_app

+ 21 - 22
storage-node/packages/colossus/lib/middleware/file_uploads.js

@@ -16,29 +16,28 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const multer = require('multer');
+const multer = require('multer')
 
 // Taken from express-openapi examples
-module.exports = function(req, res, next)
-{
-  multer().any()(req, res, function(err) {
-    if (err) {
-      return next(err);
-    }
-    // Handle both single and multiple files
-    const filesMap = req.files.reduce(
-      (acc, f) =>
-        Object.assign(acc, {
-          [f.fieldname]: (acc[f.fieldname] || []).concat(f)
-        }),
-      {}
-    );
-    Object.keys(filesMap).forEach((fieldname) => {
-      const files = filesMap[fieldname];
-      req.body[fieldname] = files.length > 1 ? files.map(() => '') : '';
-    });
-    return next();
-  });
+module.exports = function (req, res, next) {
+	multer().any()(req, res, function (err) {
+		if (err) {
+			return next(err)
+		}
+		// Handle both single and multiple files
+		const filesMap = req.files.reduce(
+			(acc, f) =>
+				Object.assign(acc, {
+					[f.fieldname]: (acc[f.fieldname] || []).concat(f),
+				}),
+			{}
+		)
+		Object.keys(filesMap).forEach((fieldname) => {
+			const files = filesMap[fieldname]
+			req.body[fieldname] = files.length > 1 ? files.map(() => '') : ''
+		})
+		return next()
+	})
 }

+ 38 - 38
storage-node/packages/colossus/lib/middleware/validate_responses.js

@@ -16,46 +16,46 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const debug = require('debug')('joystream:middleware:validate');
+const debug = require('debug')('joystream:middleware:validate')
 
 // Function taken directly from https://github.com/kogosoftwarellc/open-api/tree/master/packages/express-openapi
-module.exports = function(req, res, next)
-{
-  const strictValidation = req.apiDoc['x-express-openapi-validation-strict'] ? true : false;
-  if (typeof res.validateResponse === 'function') {
-    const send = res.send;
-    res.send = function expressOpenAPISend(...args) {
-      const onlyWarn = !strictValidation;
-      if (res.get('x-express-openapi-validation-error-for') !== undefined) {
-        return send.apply(res, args);
-      }
-      if (res.get('x-express-openapi-validation-for') !== undefined) {
-        return send.apply(res, args);
-      }
+module.exports = function (req, res, next) {
+	const strictValidation = !!req.apiDoc['x-express-openapi-validation-strict']
+	if (typeof res.validateResponse === 'function') {
+		const send = res.send
+		res.send = function expressOpenAPISend(...args) {
+			const onlyWarn = !strictValidation
+			if (res.get('x-express-openapi-validation-error-for') !== undefined) {
+				return send.apply(res, args)
+			}
+			if (res.get('x-express-openapi-validation-for') !== undefined) {
+				return send.apply(res, args)
+			}
 
-      const body = args[0];
-      let validation = res.validateResponse(res.statusCode, body);
-      let validationMessage;
-      if (validation === undefined) {
-        validation = { message: undefined, errors: undefined };
-      }
-      if (validation.errors) {
-        const errorList = Array.from(validation.errors).map((_) => _.message).join(',');
-        validationMessage = `Invalid response for status code ${res.statusCode}: ${errorList}`;
-        debug(validationMessage);
-        // Set to avoid a loop, and to provide the original status code
-        res.set('x-express-openapi-validation-error-for', res.statusCode.toString());
-      }
-      if ((onlyWarn || !validation.errors) && res.statusCode) {
-        res.set('x-express-openapi-validation-for', res.statusCode.toString());
-        return send.apply(res, args);
-      } else {
-        res.status(500);
-        return res.json({ error: validationMessage });
-      }
-    }
-  }
-  next();
+			const body = args[0]
+			let validation = res.validateResponse(res.statusCode, body)
+			let validationMessage
+			if (validation === undefined) {
+				validation = { message: undefined, errors: undefined }
+			}
+			if (validation.errors) {
+				const errorList = Array.from(validation.errors)
+					.map((_) => _.message)
+					.join(',')
+				validationMessage = `Invalid response for status code ${res.statusCode}: ${errorList}`
+				debug(validationMessage)
+				// Set to avoid a loop, and to provide the original status code
+				res.set('x-express-openapi-validation-error-for', res.statusCode.toString())
+			}
+			if ((onlyWarn || !validation.errors) && res.statusCode) {
+				res.set('x-express-openapi-validation-for', res.statusCode.toString())
+				return send.apply(res, args)
+			}
+			res.status(500)
+			return res.json({ error: validationMessage })
+		}
+	}
+	next()
 }

+ 80 - 85
storage-node/packages/colossus/lib/sync.js

@@ -16,99 +16,94 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const debug = require('debug')('joystream:sync');
+const debug = require('debug')('joystream:sync')
 
 async function sync_callback(api, storage) {
-  // The first step is to gather all data objects from chain.
-  // TODO: in future, limit to a configured tranche
-  // FIXME this isn't actually on chain yet, so we'll fake it.
-  const knownContentIds = await api.assets.getKnownContentIds() || [];
-
-  const role_addr = api.identities.key.address
-  const providerId = api.storageProviderId
-
-  // Iterate over all sync objects, and ensure they're synced.
-  const allChecks = knownContentIds.map(async (content_id) => {
-    let { relationship, relationshipId } = await api.assets.getStorageRelationshipAndId(providerId, content_id);
-
-    // get the data object
-    // make sure the data object was Accepted by the liaison,
-    // don't just blindly attempt to fetch them
-
-    let fileLocal;
-    try {
-      // check if we have content or not
-      let stats = await storage.stat(content_id);
-      fileLocal = stats.local;
-    } catch (err) {
-      // on error stating or timeout
-      debug(err.message);
-      // we don't have content if we can't stat it
-      fileLocal = false;
-    }
-
-    if (!fileLocal) {
-      try {
-        await storage.synchronize(content_id);
-      } catch (err) {
-        // duplicate logging
-        // debug(err.message)
-        return
-      }
-      // why are we returning, if we synced the file
-      return;
-    }
-
-    if (!relationship) {
-      // create relationship
-      debug(`Creating new storage relationship for ${content_id.encode()}`);
-      try {
-        relationshipId = await api.assets.createAndReturnStorageRelationship(role_addr, providerId, content_id);
-        await api.assets.toggleStorageRelationshipReady(role_addr, providerId, relationshipId, true);
-      } catch (err) {
-        debug(`Error creating new storage relationship ${content_id.encode()}: ${err.stack}`);
-        return;
-      }
-    } else if (!relationship.ready) {
-      debug(`Updating storage relationship to ready for ${content_id.encode()}`);
-      // update to ready. (Why would there be a relationship set to ready: false?)
-      try {
-        await api.assets.toggleStorageRelationshipReady(role_addr, providerId, relationshipId, true);
-      } catch(err) {
-        debug(`Error setting relationship ready ${content_id.encode()}: ${err.stack}`);
-      }
-    } else {
-      // we already have content and a ready relationship set. No need to do anything
-      // debug(`content already stored locally ${content_id.encode()}`);
-    }
-  });
-
-
-  return Promise.all(allChecks);
+	// The first step is to gather all data objects from chain.
+	// TODO: in future, limit to a configured tranche
+	// FIXME this isn't actually on chain yet, so we'll fake it.
+	const knownContentIds = (await api.assets.getKnownContentIds()) || []
+
+	const role_addr = api.identities.key.address
+	const providerId = api.storageProviderId
+
+	// Iterate over all sync objects, and ensure they're synced.
+	const allChecks = knownContentIds.map(async (content_id) => {
+		let { relationship, relationshipId } = await api.assets.getStorageRelationshipAndId(providerId, content_id)
+
+		// get the data object
+		// make sure the data object was Accepted by the liaison,
+		// don't just blindly attempt to fetch them
+
+		let fileLocal
+		try {
+			// check if we have content or not
+			const stats = await storage.stat(content_id)
+			fileLocal = stats.local
+		} catch (err) {
+			// on error stating or timeout
+			debug(err.message)
+			// we don't have content if we can't stat it
+			fileLocal = false
+		}
+
+		if (!fileLocal) {
+			try {
+				await storage.synchronize(content_id)
+			} catch (err) {
+				// duplicate logging
+				// debug(err.message)
+				return
+			}
+			// why are we returning, if we synced the file
+			return
+		}
+
+		if (!relationship) {
+			// create relationship
+			debug(`Creating new storage relationship for ${content_id.encode()}`)
+			try {
+				relationshipId = await api.assets.createAndReturnStorageRelationship(role_addr, providerId, content_id)
+				await api.assets.toggleStorageRelationshipReady(role_addr, providerId, relationshipId, true)
+			} catch (err) {
+				debug(`Error creating new storage relationship ${content_id.encode()}: ${err.stack}`)
+				return
+			}
+		} else if (!relationship.ready) {
+			debug(`Updating storage relationship to ready for ${content_id.encode()}`)
+			// update to ready. (Why would there be a relationship set to ready: false?)
+			try {
+				await api.assets.toggleStorageRelationshipReady(role_addr, providerId, relationshipId, true)
+			} catch (err) {
+				debug(`Error setting relationship ready ${content_id.encode()}: ${err.stack}`)
+			}
+		} else {
+			// we already have content and a ready relationship set. No need to do anything
+			// debug(`content already stored locally ${content_id.encode()}`);
+		}
+	})
+
+	return Promise.all(allChecks)
 }
 
-
-async function sync_periodic(api, flags, storage)
-{
-  try {
-    debug('Starting sync run...')
-    await sync_callback(api, storage)
-    debug('sync run complete')
-  } catch (err) {
-    debug(`Error in sync_periodic ${err.stack}`);
-  }
-  // always try again
-  setTimeout(sync_periodic, flags.syncPeriod, api, flags, storage);
+async function sync_periodic(api, flags, storage) {
+	try {
+		debug('Starting sync run...')
+		await sync_callback(api, storage)
+		debug('sync run complete')
+	} catch (err) {
+		debug(`Error in sync_periodic ${err.stack}`)
+	}
+	// always try again
+	setTimeout(sync_periodic, flags.syncPeriod, api, flags, storage)
 }
 
-
-function start_syncing(api, flags, storage)
-{
-  sync_periodic(api, flags, storage);
+function start_syncing(api, flags, storage) {
+	sync_periodic(api, flags, storage)
 }
 
 module.exports = {
-  start_syncing: start_syncing,
+	start_syncing,
 }

+ 325 - 335
storage-node/packages/colossus/paths/asset/v0/{id}.js

@@ -16,344 +16,334 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const path = require('path');
+const path = require('path')
 
-const debug = require('debug')('joystream:colossus:api:asset');
+const debug = require('debug')('joystream:colossus:api:asset')
 
-const util_ranges = require('@joystream/storage-utils/ranges');
-const filter = require('@joystream/storage-node-backend/filter');
+const util_ranges = require('@joystream/storage-utils/ranges')
+const filter = require('@joystream/storage-node-backend/filter')
 
-function error_handler(response, err, code)
-{
-  debug(err);
-  response.status((err.code || code) || 500).send({ message: err.toString() });
+function error_handler(response, err, code) {
+	debug(err)
+	response.status(err.code || code || 500).send({ message: err.toString() })
 }
 
-
-module.exports = function(storage, runtime)
-{
-  var doc = {
-    // parameters for all operations in this path
-    parameters: [
-      {
-        name: 'id',
-        in: 'path',
-        required: true,
-        description: 'Joystream Content ID',
-        schema: {
-          type: 'string',
-        },
-      },
-    ],
-
-    // Head: report that ranges are OK
-    head: async function(req, res, _next)
-    {
-      const id = req.params.id;
-
-      // Open file
-      try {
-        const size = await storage.size(id);
-        const stream = await storage.open(id, 'r');
-        const type = stream.file_info.mime_type;
-
-        // Close the stream; we don't need to fetch the file (if we haven't
-        // already). Then return result.
-        stream.destroy();
-
-        res.status(200);
-        res.contentType(type);
-        res.header('Content-Disposition', 'inline');
-        res.header('Content-Transfer-Encoding', 'binary');
-        res.header('Accept-Ranges', 'bytes');
-        if (size > 0) {
-          res.header('Content-Length', size);
-        }
-        res.send();
-      } catch (err) {
-        error_handler(res, err, err.code);
-      }
-    },
-
-    // Put for uploads
-    put: async function(req, res, _next)
-    {
-      const id = req.params.id; // content id
-
-      // First check if we're the liaison for the name, otherwise we can bail
-      // out already.
-      const role_addr = runtime.identities.key.address;
-      const providerId = runtime.storageProviderId;
-      let dataObject;
-      try {
-        debug('calling checkLiaisonForDataObject')
-        dataObject = await runtime.assets.checkLiaisonForDataObject(providerId, id);
-        debug('called checkLiaisonForDataObject')
-      } catch (err) {
-        error_handler(res, err, 403);
-        return;
-      }
-
-      // We'll open a write stream to the backend, but reserve the right to
-      // abort upload if the filters don't smell right.
-      var stream;
-      try {
-        stream = await storage.open(id, 'w');
-
-        // We don't know whether the filtering occurs before or after the
-        // stream was finished, and can only commit if both passed.
-        var finished = false;
-        var accepted = false;
-        const possibly_commit = () => {
-          if (finished && accepted) {
-            debug('Stream is finished and passed filters; committing.');
-            stream.commit();
-          }
-        };
-
-
-        stream.on('file_info', async (info) => {
-          try {
-            debug('Detected file info:', info);
-
-            // Filter
-            const filter_result = filter({}, req.headers, info.mime_type);
-            if (200 != filter_result.code) {
-              debug('Rejecting content', filter_result.message);
-              stream.end();
-              res.status(filter_result.code).send({ message: filter_result.message });
-
-              // Reject the content
-              await runtime.assets.rejectContent(role_addr, providerId, id);
-              return;
-            }
-            debug('Content accepted.');
-            accepted = true;
-
-            // We may have to commit the stream.
-            possibly_commit();
-          } catch (err) {
-            error_handler(res, err);
-          }
-        });
-
-        stream.on('finish', () => {
-          try {
-            finished = true;
-            possibly_commit();
-          } catch (err) {
-            error_handler(res, err);
-          }
-        });
-
-        stream.on('committed', async (hash) => {
-          console.log('commited', dataObject)
-          try {
-            if (hash !== dataObject.ipfs_content_id.toString()) {
-              debug('Rejecting content. IPFS hash does not match value in objectId');
-              await runtime.assets.rejectContent(role_addr, providerId, id);
-              res.status(400).send({ message: "Uploaded content doesn't match IPFS hash" });
-              return;
-            }
-
-            debug('accepting Content')
-            await runtime.assets.acceptContent(role_addr, providerId, id);
-
-            debug('creating storage relationship for newly uploaded content')
-            // Create storage relationship and flip it to ready.
-            const dosr_id = await runtime.assets.createAndReturnStorageRelationship(role_addr, providerId, id);
-
-            debug('toggling storage relationship for newly uploaded content')
-            await runtime.assets.toggleStorageRelationshipReady(role_addr, providerId, dosr_id, true);
-
-            debug('Sending OK response.');
-            res.status(200).send({ message: 'Asset uploaded.' });
-          } catch (err) {
-            debug(`${err.message}`);
-            error_handler(res, err);
-          }
-        });
-
-        stream.on('error', (err) => error_handler(res, err));
-        req.pipe(stream);
-
-      } catch (err) {
-        error_handler(res, err);
-        return;
-      }
-    },
-
-    // Get content
-    get: async function(req, res, _next)
-    {
-      const id = req.params.id;
-      const download = req.query.download;
-
-      // Parse range header
-      var ranges;
-      if (!download) {
-        try {
-          var range_header = req.headers['range'];
-          ranges = util_ranges.parse(range_header);
-        } catch (err) {
-          // Do nothing; it's ok to ignore malformed ranges and respond with the
-          // full content according to https://www.rfc-editor.org/rfc/rfc7233.txt
-        }
-        if (ranges && ranges.unit != 'bytes') {
-          // Ignore ranges that are not byte units.
-          ranges = undefined;
-        }
-      }
-      debug('Requested range(s) is/are', ranges);
-
-      // Open file
-      try {
-        const size = await storage.size(id);
-        const stream = await storage.open(id, 'r');
-
-        // Add a file extension to download requests if necessary. If the file
-        // already contains an extension, don't add one.
-        var send_name = id;
-        const type = stream.file_info.mime_type;
-        if (download) {
-          var ext = path.extname(send_name);
-          if (!ext) {
-            ext = stream.file_info.ext;
-            if (ext) {
-              send_name = `${send_name}.${ext}`;
-            }
-          }
-        }
-
-        var opts = {
-          name: send_name,
-          type: type,
-          size: size,
-          ranges: ranges,
-          download: download,
-        };
-        util_ranges.send(res, stream, opts);
-
-
-      } catch (err) {
-        error_handler(res, err, err.code);
-      }
-    }
-  };
-
-  // OpenAPI specs
-  doc.get.apiDoc =
-  {
-    description: 'Download an asset.',
-    operationId: 'assetData',
-    tags: ['asset', 'data'],
-    parameters: [
-      {
-        name: 'download',
-        in: 'query',
-        description: 'Download instead of streaming inline.',
-        required: false,
-        allowEmptyValue: true,
-        schema: {
-          type: 'boolean',
-          default: false,
-        },
-      },
-    ],
-    responses: {
-      200: {
-        description: 'Asset download.',
-        content: {
-          default: {
-            schema: {
-              type: 'string',
-              format: 'binary',
-            },
-          },
-        },
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              '$ref': '#/components/schemas/Error'
-            },
-          },
-        },
-      },
-    },
-  };
-
-  doc.put.apiDoc =
-  {
-    description: 'Asset upload.',
-    operationId: 'assetUpload',
-    tags: ['asset', 'data'],
-    requestBody: {
-      content: {
-        '*/*': {
-          schema: {
-            type: 'string',
-            format: 'binary',
-          },
-        },
-      },
-    },
-    responses: {
-      200: {
-        description: 'Asset upload.',
-        content: {
-          'application/json': {
-            schema: {
-              type: 'object',
-              required: ['message'],
-              properties: {
-                message: {
-                  type: 'string',
-                }
-              },
-            },
-          },
-        },
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              '$ref': '#/components/schemas/Error'
-            },
-          },
-        },
-      },
-    },
-  };
-
-
-  doc.head.apiDoc =
-  {
-    description: 'Asset download information.',
-    operationId: 'assetInfo',
-    tags: ['asset', 'metadata'],
-    responses: {
-      200: {
-        description: 'Asset info.',
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              '$ref': '#/components/schemas/Error'
-            },
-          },
-        },
-      },
-    },
-  };
-
-  return doc;
-};
+module.exports = function (storage, runtime) {
+	const doc = {
+		// parameters for all operations in this path
+		parameters: [
+			{
+				name: 'id',
+				in: 'path',
+				required: true,
+				description: 'Joystream Content ID',
+				schema: {
+					type: 'string',
+				},
+			},
+		],
+
+		// Head: report that ranges are OK
+		async head(req, res, _next) {
+			const id = req.params.id
+
+			// Open file
+			try {
+				const size = await storage.size(id)
+				const stream = await storage.open(id, 'r')
+				const type = stream.file_info.mime_type
+
+				// Close the stream; we don't need to fetch the file (if we haven't
+				// already). Then return result.
+				stream.destroy()
+
+				res.status(200)
+				res.contentType(type)
+				res.header('Content-Disposition', 'inline')
+				res.header('Content-Transfer-Encoding', 'binary')
+				res.header('Accept-Ranges', 'bytes')
+				if (size > 0) {
+					res.header('Content-Length', size)
+				}
+				res.send()
+			} catch (err) {
+				error_handler(res, err, err.code)
+			}
+		},
+
+		// Put for uploads
+		async put(req, res, _next) {
+			const id = req.params.id // content id
+
+			// First check if we're the liaison for the name, otherwise we can bail
+			// out already.
+			const role_addr = runtime.identities.key.address
+			const providerId = runtime.storageProviderId
+			let dataObject
+			try {
+				debug('calling checkLiaisonForDataObject')
+				dataObject = await runtime.assets.checkLiaisonForDataObject(providerId, id)
+				debug('called checkLiaisonForDataObject')
+			} catch (err) {
+				error_handler(res, err, 403)
+				return
+			}
+
+			// We'll open a write stream to the backend, but reserve the right to
+			// abort upload if the filters don't smell right.
+			let stream
+			try {
+				stream = await storage.open(id, 'w')
+
+				// We don't know whether the filtering occurs before or after the
+				// stream was finished, and can only commit if both passed.
+				let finished = false
+				let accepted = false
+				const possibly_commit = () => {
+					if (finished && accepted) {
+						debug('Stream is finished and passed filters; committing.')
+						stream.commit()
+					}
+				}
+
+				stream.on('file_info', async (info) => {
+					try {
+						debug('Detected file info:', info)
+
+						// Filter
+						const filter_result = filter({}, req.headers, info.mime_type)
+						if (200 != filter_result.code) {
+							debug('Rejecting content', filter_result.message)
+							stream.end()
+							res.status(filter_result.code).send({ message: filter_result.message })
+
+							// Reject the content
+							await runtime.assets.rejectContent(role_addr, providerId, id)
+							return
+						}
+						debug('Content accepted.')
+						accepted = true
+
+						// We may have to commit the stream.
+						possibly_commit()
+					} catch (err) {
+						error_handler(res, err)
+					}
+				})
+
+				stream.on('finish', () => {
+					try {
+						finished = true
+						possibly_commit()
+					} catch (err) {
+						error_handler(res, err)
+					}
+				})
+
+				stream.on('committed', async (hash) => {
+					console.log('commited', dataObject)
+					try {
+						if (hash !== dataObject.ipfs_content_id.toString()) {
+							debug('Rejecting content. IPFS hash does not match value in objectId')
+							await runtime.assets.rejectContent(role_addr, providerId, id)
+							res.status(400).send({ message: "Uploaded content doesn't match IPFS hash" })
+							return
+						}
+
+						debug('accepting Content')
+						await runtime.assets.acceptContent(role_addr, providerId, id)
+
+						debug('creating storage relationship for newly uploaded content')
+						// Create storage relationship and flip it to ready.
+						const dosr_id = await runtime.assets.createAndReturnStorageRelationship(
+							role_addr,
+							providerId,
+							id
+						)
+
+						debug('toggling storage relationship for newly uploaded content')
+						await runtime.assets.toggleStorageRelationshipReady(role_addr, providerId, dosr_id, true)
+
+						debug('Sending OK response.')
+						res.status(200).send({ message: 'Asset uploaded.' })
+					} catch (err) {
+						debug(`${err.message}`)
+						error_handler(res, err)
+					}
+				})
+
+				stream.on('error', (err) => error_handler(res, err))
+				req.pipe(stream)
+			} catch (err) {
+				error_handler(res, err)
+				return
+			}
+		},
+
+		// Get content
+		async get(req, res, _next) {
+			const id = req.params.id
+			const download = req.query.download
+
+			// Parse range header
+			let ranges
+			if (!download) {
+				try {
+					const range_header = req.headers.range
+					ranges = util_ranges.parse(range_header)
+				} catch (err) {
+					// Do nothing; it's ok to ignore malformed ranges and respond with the
+					// full content according to https://www.rfc-editor.org/rfc/rfc7233.txt
+				}
+				if (ranges && ranges.unit != 'bytes') {
+					// Ignore ranges that are not byte units.
+					ranges = undefined
+				}
+			}
+			debug('Requested range(s) is/are', ranges)
+
+			// Open file
+			try {
+				const size = await storage.size(id)
+				const stream = await storage.open(id, 'r')
+
+				// Add a file extension to download requests if necessary. If the file
+				// already contains an extension, don't add one.
+				let send_name = id
+				const type = stream.file_info.mime_type
+				if (download) {
+					let ext = path.extname(send_name)
+					if (!ext) {
+						ext = stream.file_info.ext
+						if (ext) {
+							send_name = `${send_name}.${ext}`
+						}
+					}
+				}
+
+				const opts = {
+					name: send_name,
+					type,
+					size,
+					ranges,
+					download,
+				}
+				util_ranges.send(res, stream, opts)
+			} catch (err) {
+				error_handler(res, err, err.code)
+			}
+		},
+	}
+
+	// OpenAPI specs
+	doc.get.apiDoc = {
+		description: 'Download an asset.',
+		operationId: 'assetData',
+		tags: ['asset', 'data'],
+		parameters: [
+			{
+				name: 'download',
+				in: 'query',
+				description: 'Download instead of streaming inline.',
+				required: false,
+				allowEmptyValue: true,
+				schema: {
+					type: 'boolean',
+					default: false,
+				},
+			},
+		],
+		responses: {
+			200: {
+				description: 'Asset download.',
+				content: {
+					default: {
+						schema: {
+							type: 'string',
+							format: 'binary',
+						},
+					},
+				},
+			},
+			default: {
+				description: 'Unexpected error',
+				content: {
+					'application/json': {
+						schema: {
+							$ref: '#/components/schemas/Error',
+						},
+					},
+				},
+			},
+		},
+	}
+
+	doc.put.apiDoc = {
+		description: 'Asset upload.',
+		operationId: 'assetUpload',
+		tags: ['asset', 'data'],
+		requestBody: {
+			content: {
+				'*/*': {
+					schema: {
+						type: 'string',
+						format: 'binary',
+					},
+				},
+			},
+		},
+		responses: {
+			200: {
+				description: 'Asset upload.',
+				content: {
+					'application/json': {
+						schema: {
+							type: 'object',
+							required: ['message'],
+							properties: {
+								message: {
+									type: 'string',
+								},
+							},
+						},
+					},
+				},
+			},
+			default: {
+				description: 'Unexpected error',
+				content: {
+					'application/json': {
+						schema: {
+							$ref: '#/components/schemas/Error',
+						},
+					},
+				},
+			},
+		},
+	}
+
+	doc.head.apiDoc = {
+		description: 'Asset download information.',
+		operationId: 'assetInfo',
+		tags: ['asset', 'metadata'],
+		responses: {
+			200: {
+				description: 'Asset info.',
+			},
+			default: {
+				description: 'Unexpected error',
+				content: {
+					'application/json': {
+						schema: {
+							$ref: '#/components/schemas/Error',
+						},
+					},
+				},
+			},
+		},
+	}
+
+	return doc
+}

+ 79 - 81
storage-node/packages/colossus/paths/discover/v0/{id}.js

@@ -1,91 +1,89 @@
 const { discover } = require('@joystream/service-discovery')
-const debug = require('debug')('joystream:colossus:api:discovery');
+const debug = require('debug')('joystream:colossus:api:discovery')
 
-const MAX_CACHE_AGE = 30 * 60 * 1000;
-const USE_CACHE = true;
+const MAX_CACHE_AGE = 30 * 60 * 1000
+const USE_CACHE = true
 
-module.exports = function(runtime)
-{
-  var doc = {
-    // parameters for all operations in this path
-    parameters: [
-      {
-        name: 'id',
-        in: 'path',
-        required: true,
-        description: 'Actor accouuntId',
-        schema: {
-          type: 'string', // integer ?
-        },
-      },
-    ],
+module.exports = function (runtime) {
+	const doc = {
+		// parameters for all operations in this path
+		parameters: [
+			{
+				name: 'id',
+				in: 'path',
+				required: true,
+				description: 'Actor accouuntId',
+				schema: {
+					type: 'string', // integer ?
+				},
+			},
+		],
 
-    // Resolve Service Information
-    get: async function(req, res)
-    {
-        try {
-          var parsedId = parseInt(req.params.id);
-        } catch (err) {
-          return res.status(400).end();
-        }
+		// Resolve Service Information
+		async get(req, res) {
+			try {
+				var parsedId = parseInt(req.params.id)
+			} catch (err) {
+				return res.status(400).end()
+			}
 
-        const id = parsedId
-        let cacheMaxAge = req.query.max_age;
+			const id = parsedId
+			let cacheMaxAge = req.query.max_age
 
-        if (cacheMaxAge) {
-          try {
-            cacheMaxAge = parseInt(cacheMaxAge);
-          } catch(err) {
-            cacheMaxAge = MAX_CACHE_AGE
-          }
-        } else {
-          cacheMaxAge = 0
-        }
+			if (cacheMaxAge) {
+				try {
+					cacheMaxAge = parseInt(cacheMaxAge)
+				} catch (err) {
+					cacheMaxAge = MAX_CACHE_AGE
+				}
+			} else {
+				cacheMaxAge = 0
+			}
 
-        // todo - validate id before querying
+			// todo - validate id before querying
 
-        try {
-          debug(`resolving ${id}`);
-          const info = await discover.discover(id, runtime, USE_CACHE, cacheMaxAge);
-          if (info == null) {
-            debug('info not found');
-            res.status(404).end();
-          } else {
-            res.status(200).send(info);
-          }
-        } catch (err) {
-          debug(`${err}`);
-          res.status(404).end()
-        }
-    }
-  };
+			try {
+				debug(`resolving ${id}`)
+				const info = await discover.discover(id, runtime, USE_CACHE, cacheMaxAge)
+				if (info == null) {
+					debug('info not found')
+					res.status(404).end()
+				} else {
+					res.status(200).send(info)
+				}
+			} catch (err) {
+				debug(`${err}`)
+				res.status(404).end()
+			}
+		},
+	}
 
-    // OpenAPI specs
-    doc.get.apiDoc = {
-        description: 'Resolve Service Information',
-        operationId: 'discover',
-        //tags: ['asset', 'data'],
-        responses: {
-            200: {
-                description: 'Wrapped JSON Service Information',
-                content: {
-                  'application/json': {
-                    schema: {
-                      required: ['serialized'],
-                      properties: {
-                        'serialized': {
-                          type: 'string'
-                        },
-                        'signature': {
-                          type: 'string'
-                        }
-                      },
-                    },
-                  }
-                }
-            }
-        }
-    }
+	// OpenAPI specs
+	doc.get.apiDoc = {
+		description: 'Resolve Service Information',
+		operationId: 'discover',
+		// tags: ['asset', 'data'],
+		responses: {
+			200: {
+				description: 'Wrapped JSON Service Information',
+				content: {
+					'application/json': {
+						schema: {
+							required: ['serialized'],
+							properties: {
+								serialized: {
+									type: 'string',
+								},
+								signature: {
+									type: 'string',
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	}
 
-    return doc;
-};
+	return doc
+}

+ 1 - 1
storage-node/packages/colossus/test/index.js

@@ -1 +1 @@
-// Add Tests!
+// Add Tests!

+ 148 - 152
storage-node/packages/discovery/discover.js

@@ -9,21 +9,21 @@ const { newExternallyControlledPromise } = require('@joystream/storage-utils/ext
 /**
  * Determines if code is running in a browser by testing for the global window object
  */
-function inBrowser () {
-  return typeof window !== 'undefined'
+function inBrowser() {
+	return typeof window !== 'undefined'
 }
 
 /**
  * Map storage-provider id to a Promise of a discovery result. The purpose
  * is to avoid concurrent active discoveries for the same provider.
  */
-var activeDiscoveries = {}
+const activeDiscoveries = {}
 
 /**
  * Map of storage provider id to string
  * Cache of past discovery lookup results
  */
-var accountInfoCache = {}
+const accountInfoCache = {}
 
 /**
  * After what period of time a cached record is considered stale, and would
@@ -38,17 +38,16 @@ const CACHE_TTL = 60 * 60 * 1000
  * @param { RuntimeApi } runtimeApi - api instance to query the chain
  * @returns { Promise<string | null> } - ipns multiformat address
  */
-async function getIpnsIdentity (storageProviderId, runtimeApi) {
-  storageProviderId = new BN(storageProviderId)
-  // lookup ipns identity from chain corresponding to storageProviderId
-  const info = await runtimeApi.discovery.getAccountInfo(storageProviderId)
-
-  if (info == null) {
-    // no identity found on chain for account
-    return null
-  } else {
-    return info.identity.toString()
-  }
+async function getIpnsIdentity(storageProviderId, runtimeApi) {
+	storageProviderId = new BN(storageProviderId)
+	// lookup ipns identity from chain corresponding to storageProviderId
+	const info = await runtimeApi.discovery.getAccountInfo(storageProviderId)
+
+	if (info == null) {
+		// no identity found on chain for account
+		return null
+	}
+	return info.identity.toString()
 }
 
 /**
@@ -61,30 +60,28 @@ async function getIpnsIdentity (storageProviderId, runtimeApi) {
  * @param {string} gateway - optional ipfs http gateway url to perform ipfs queries
  * @returns { Promise<object> } - the published service information
  */
-async function discover_over_ipfs_http_gateway (
-  storageProviderId, runtimeApi, gateway = 'http://localhost:8080') {
+async function discover_over_ipfs_http_gateway(storageProviderId, runtimeApi, gateway = 'http://localhost:8080') {
+	storageProviderId = new BN(storageProviderId)
+	const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
 
-  storageProviderId = new BN(storageProviderId)
-  let isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
+	if (!isProvider) {
+		throw new Error('Cannot discover non storage providers')
+	}
 
-  if (!isProvider) {
-    throw new Error('Cannot discover non storage providers')
-  }
+	const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
 
-  const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
+	if (identity == null) {
+		// dont waste time trying to resolve if no identity was found
+		throw new Error('no identity to resolve')
+	}
 
-  if (identity == null) {
-    // dont waste time trying to resolve if no identity was found
-    throw new Error('no identity to resolve')
-  }
+	gateway = stripEndingSlash(gateway)
 
-  gateway = stripEndingSlash(gateway)
+	const url = `${gateway}/ipns/${identity}`
 
-  const url = `${gateway}/ipns/${identity}`
+	const response = await axios.get(url)
 
-  const response = await axios.get(url)
-
-  return response.data
+	return response.data
 }
 
 /**
@@ -97,38 +94,38 @@ async function discover_over_ipfs_http_gateway (
  * @param {string} discoverApiEndpoint - url for a colossus discovery api endpoint
  * @returns { Promise<object> } - the published service information
  */
-async function discover_over_joystream_discovery_service (storageProviderId, runtimeApi, discoverApiEndpoint) {
-  storageProviderId = new BN(storageProviderId)
-  let isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
+async function discover_over_joystream_discovery_service(storageProviderId, runtimeApi, discoverApiEndpoint) {
+	storageProviderId = new BN(storageProviderId)
+	const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
 
-  if (!isProvider) {
-    throw new Error('Cannot discover non storage providers')
-  }
+	if (!isProvider) {
+		throw new Error('Cannot discover non storage providers')
+	}
 
-  const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
+	const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
 
-  // dont waste time trying to resolve if no identity was found
-  if (identity == null) {
-    throw new Error('no identity to resolve')
-  }
+	// dont waste time trying to resolve if no identity was found
+	if (identity == null) {
+		throw new Error('no identity to resolve')
+	}
 
-  if (!discoverApiEndpoint) {
-    // Use bootstrap nodes
-    let discoveryBootstrapNodes = await runtimeApi.discovery.getBootstrapEndpoints()
+	if (!discoverApiEndpoint) {
+		// Use bootstrap nodes
+		const discoveryBootstrapNodes = await runtimeApi.discovery.getBootstrapEndpoints()
 
-    if (discoveryBootstrapNodes.length) {
-      discoverApiEndpoint = stripEndingSlash(discoveryBootstrapNodes[0].toString())
-    } else {
-      throw new Error('No known discovery bootstrap nodes found on network')
-    }
-  }
+		if (discoveryBootstrapNodes.length) {
+			discoverApiEndpoint = stripEndingSlash(discoveryBootstrapNodes[0].toString())
+		} else {
+			throw new Error('No known discovery bootstrap nodes found on network')
+		}
+	}
 
-  const url = `${discoverApiEndpoint}/discover/v0/${storageProviderId.toNumber()}`
+	const url = `${discoverApiEndpoint}/discover/v0/${storageProviderId.toNumber()}`
 
-  // should have parsed if data was json?
-  const response = await axios.get(url)
+	// should have parsed if data was json?
+	const response = await axios.get(url)
 
-  return response.data
+	return response.data
 }
 
 /**
@@ -139,38 +136,38 @@ async function discover_over_joystream_discovery_service (storageProviderId, run
  * @param {RuntimeApi} runtimeApi - api instance to query the chain
  * @returns { Promise<object> } - the published service information
  */
-async function discover_over_local_ipfs_node (storageProviderId, runtimeApi) {
-  storageProviderId = new BN(storageProviderId)
-  let isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
+async function discover_over_local_ipfs_node(storageProviderId, runtimeApi) {
+	storageProviderId = new BN(storageProviderId)
+	const isProvider = await runtimeApi.workers.isStorageProvider(storageProviderId)
 
-  if (!isProvider) {
-    throw new Error('Cannot discover non storage providers')
-  }
+	if (!isProvider) {
+		throw new Error('Cannot discover non storage providers')
+	}
 
-  const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
+	const identity = await getIpnsIdentity(storageProviderId, runtimeApi)
 
-  if (identity == null) {
-    // dont waste time trying to resolve if no identity was found
-    throw new Error('no identity to resolve')
-  }
+	if (identity == null) {
+		// dont waste time trying to resolve if no identity was found
+		throw new Error('no identity to resolve')
+	}
 
-  const ipns_address = `/ipns/${identity}/`
+	const ipns_address = `/ipns/${identity}/`
 
-  debug('resolved ipns to ipfs object')
-  // Can this call hang forever!? can/should we set a timeout?
-  let ipfs_name = await ipfs.name.resolve(ipns_address, {
-    // don't recurse, there should only be one indirection to the service info file
-    recursive: false,
-    nocache: false
-  })
+	debug('resolved ipns to ipfs object')
+	// Can this call hang forever!? can/should we set a timeout?
+	const ipfs_name = await ipfs.name.resolve(ipns_address, {
+		// don't recurse, there should only be one indirection to the service info file
+		recursive: false,
+		nocache: false,
+	})
 
-  debug('getting ipfs object', ipfs_name)
-  let data = await ipfs.get(ipfs_name) // this can sometimes hang forever!?! can we set a timeout?
+	debug('getting ipfs object', ipfs_name)
+	const data = await ipfs.get(ipfs_name) // this can sometimes hang forever!?! can we set a timeout?
 
-  // there should only be one file published under the resolved path
-  let content = data[0].content
+	// there should only be one file published under the resolved path
+	const content = data[0].content
 
-  return JSON.parse(content)
+	return JSON.parse(content)
 }
 
 /**
@@ -187,27 +184,26 @@ async function discover_over_local_ipfs_node (storageProviderId, runtimeApi) {
  * @param {number} maxCacheAge - maximum age of a cached query that triggers automatic re-discovery
  * @returns { Promise<object | null> } - the published service information
  */
-async function discover (storageProviderId, runtimeApi, useCachedValue = false, maxCacheAge = 0) {
-  storageProviderId = new BN(storageProviderId)
-  const id = storageProviderId.toNumber()
-  const cached = accountInfoCache[id]
-
-  if (cached && useCachedValue) {
-    if (maxCacheAge > 0) {
-      // get latest value
-      if (Date.now() > (cached.updated + maxCacheAge)) {
-        return _discover(storageProviderId, runtimeApi)
-      }
-    }
-    // refresh if cache if stale, new value returned on next cached query
-    if (Date.now() > (cached.updated + CACHE_TTL)) {
-      _discover(storageProviderId, runtimeApi)
-    }
-    // return best known value
-    return cached.value
-  } else {
-    return _discover(storageProviderId, runtimeApi)
-  }
+async function discover(storageProviderId, runtimeApi, useCachedValue = false, maxCacheAge = 0) {
+	storageProviderId = new BN(storageProviderId)
+	const id = storageProviderId.toNumber()
+	const cached = accountInfoCache[id]
+
+	if (cached && useCachedValue) {
+		if (maxCacheAge > 0) {
+			// get latest value
+			if (Date.now() > cached.updated + maxCacheAge) {
+				return _discover(storageProviderId, runtimeApi)
+			}
+		}
+		// refresh if cache if stale, new value returned on next cached query
+		if (Date.now() > cached.updated + CACHE_TTL) {
+			_discover(storageProviderId, runtimeApi)
+		}
+		// return best known value
+		return cached.value
+	}
+	return _discover(storageProviderId, runtimeApi)
 }
 
 /**
@@ -218,58 +214,58 @@ async function discover (storageProviderId, runtimeApi, useCachedValue = false,
  * @param {RuntimeApi} runtimeApi - api instance for querying the chain
  * @returns { Promise<object | null> } - the published service information
  */
-async function _discover (storageProviderId, runtimeApi) {
-  storageProviderId = new BN(storageProviderId)
-  const id = storageProviderId.toNumber()
-
-  const discoveryResult = activeDiscoveries[id]
-  if (discoveryResult) {
-    debug('discovery in progress waiting for result for', id)
-    return discoveryResult
-  }
-
-  debug('starting new discovery for', id)
-  const deferredDiscovery = newExternallyControlledPromise()
-  activeDiscoveries[id] = deferredDiscovery.promise
-
-  let result
-  try {
-    if (inBrowser()) {
-      result = await discover_over_joystream_discovery_service(storageProviderId, runtimeApi)
-    } else {
-      result = await discover_over_local_ipfs_node(storageProviderId, runtimeApi)
-    }
-
-    debug(result)
-    result = JSON.stringify(result)
-    accountInfoCache[id] = {
-      value: result,
-      updated: Date.now()
-    }
-
-    deferredDiscovery.resolve(result)
-    delete activeDiscoveries[id]
-    return result
-  } catch (err) {
-    // we catch the error so we can update all callers
-    // and throw again to inform the first caller.
-    debug(err.message)
-    delete activeDiscoveries[id]
-    // deferredDiscovery.reject(err)
-    deferredDiscovery.resolve(null) // resolve to null until we figure out the issue below
-    // throw err // <-- throwing but this isn't being
-    // caught correctly in express server! Is it because there is an uncaught promise somewhere
-    // in the prior .reject() call ?
-    // I've only seen this behaviour when error is from ipfs-client
-    // ... is this unique to errors thrown from ipfs-client?
-    // Problem is its crashing the node so just return null for now
-    return null
-  }
+async function _discover(storageProviderId, runtimeApi) {
+	storageProviderId = new BN(storageProviderId)
+	const id = storageProviderId.toNumber()
+
+	const discoveryResult = activeDiscoveries[id]
+	if (discoveryResult) {
+		debug('discovery in progress waiting for result for', id)
+		return discoveryResult
+	}
+
+	debug('starting new discovery for', id)
+	const deferredDiscovery = newExternallyControlledPromise()
+	activeDiscoveries[id] = deferredDiscovery.promise
+
+	let result
+	try {
+		if (inBrowser()) {
+			result = await discover_over_joystream_discovery_service(storageProviderId, runtimeApi)
+		} else {
+			result = await discover_over_local_ipfs_node(storageProviderId, runtimeApi)
+		}
+
+		debug(result)
+		result = JSON.stringify(result)
+		accountInfoCache[id] = {
+			value: result,
+			updated: Date.now(),
+		}
+
+		deferredDiscovery.resolve(result)
+		delete activeDiscoveries[id]
+		return result
+	} catch (err) {
+		// we catch the error so we can update all callers
+		// and throw again to inform the first caller.
+		debug(err.message)
+		delete activeDiscoveries[id]
+		// deferredDiscovery.reject(err)
+		deferredDiscovery.resolve(null) // resolve to null until we figure out the issue below
+		// throw err // <-- throwing but this isn't being
+		// caught correctly in express server! Is it because there is an uncaught promise somewhere
+		// in the prior .reject() call ?
+		// I've only seen this behaviour when error is from ipfs-client
+		// ... is this unique to errors thrown from ipfs-client?
+		// Problem is its crashing the node so just return null for now
+		return null
+	}
 }
 
 module.exports = {
-  discover,
-  discover_over_joystream_discovery_service,
-  discover_over_ipfs_http_gateway,
-  discover_over_local_ipfs_node
+	discover,
+	discover_over_joystream_discovery_service,
+	discover_over_ipfs_http_gateway,
+	discover_over_local_ipfs_node,
 }

+ 29 - 32
storage-node/packages/discovery/example.js

@@ -3,38 +3,35 @@ const { RuntimeApi } = require('@joystream/storage-runtime-api')
 const { discover, publish } = require('./')
 
 async function main() {
-    // The assigned storage-provider id
-    const provider_id = 0
-
-    const runtimeApi = await RuntimeApi.create({
-        // Path to the role account key file of the provider
-        account_file: "/path/to/role_account_key_file.json",
-        storageProviderId: provider_id
-    })
-
-    let ipns_id = await publish.publish(
-        {
-            asset: {
-                version: 1,
-                endpoint: 'http://endpoint.com'
-            }
-        },
-        runtimeApi
-    )
-
-    console.log(ipns_id)
-
-    // register ipns_id on chain
-    await runtimeApi.setAccountInfo(ipfs_id)
-
-    let serviceInfo = await discover.discover(
-        provider_id,
-        runtimeApi
-    )
-
-    console.log(serviceInfo)
-
-    runtimeApi.api.disconnect()
+	// The assigned storage-provider id
+	const provider_id = 0
+
+	const runtimeApi = await RuntimeApi.create({
+		// Path to the role account key file of the provider
+		account_file: '/path/to/role_account_key_file.json',
+		storageProviderId: provider_id,
+	})
+
+	const ipns_id = await publish.publish(
+		{
+			asset: {
+				version: 1,
+				endpoint: 'http://endpoint.com',
+			},
+		},
+		runtimeApi
+	)
+
+	console.log(ipns_id)
+
+	// register ipns_id on chain
+	await runtimeApi.setAccountInfo(ipfs_id)
+
+	const serviceInfo = await discover.discover(provider_id, runtimeApi)
+
+	console.log(serviceInfo)
+
+	runtimeApi.api.disconnect()
 }
 
 main()

+ 3 - 4
storage-node/packages/discovery/index.js

@@ -1,5 +1,4 @@
-
 module.exports = {
-    discover : require('./discover'),
-    publish : require('./publish'),
-}
+	discover: require('./discover'),
+	publish: require('./publish'),
+}

+ 43 - 42
storage-node/packages/discovery/publish.js

@@ -1,4 +1,5 @@
 const ipfsClient = require('ipfs-http-client')
+
 const ipfs = ipfsClient('localhost', '5001', { protocol: 'http' })
 
 const debug = require('debug')('joystream:discovery:publish')
@@ -16,8 +17,8 @@ const PUBLISH_KEY = 'self'
  * @param {object} data - json object
  * @returns {Buffer}
  */
-function bufferFrom (data) {
-  return Buffer.from(JSON.stringify(data), 'utf-8')
+function bufferFrom(data) {
+	return Buffer.from(JSON.stringify(data), 'utf-8')
 }
 
 /**
@@ -26,10 +27,10 @@ function bufferFrom (data) {
  * @param {object} info - json object
  * @returns {Buffer}
  */
-function encodeServiceInfo (info) {
-  return bufferFrom({
-    serialized: JSON.stringify(info)
-  })
+function encodeServiceInfo(info) {
+	return bufferFrom({
+		serialized: JSON.stringify(info),
+	})
 }
 
 /**
@@ -39,49 +40,49 @@ function encodeServiceInfo (info) {
  * @param {object} service_info - the service information to publish
  * @returns {string} - the ipns id
  */
-async function publish (service_info) {
-  const keys = await ipfs.key.list()
-  let services_key = keys.find((key) => key.name === PUBLISH_KEY)
+async function publish(service_info) {
+	const keys = await ipfs.key.list()
+	let services_key = keys.find((key) => key.name === PUBLISH_KEY)
 
-  // An ipfs node will always have the self key.
-  // If the publish key is specified as anything else and it doesn't exist
-  // we create it.
-  if (PUBLISH_KEY !== 'self' && !services_key) {
-    debug('generating ipns services key')
-    services_key = await ipfs.key.gen(PUBLISH_KEY, {
-      type: 'rsa',
-      size: 2048
-    })
-  }
+	// An ipfs node will always have the self key.
+	// If the publish key is specified as anything else and it doesn't exist
+	// we create it.
+	if (PUBLISH_KEY !== 'self' && !services_key) {
+		debug('generating ipns services key')
+		services_key = await ipfs.key.gen(PUBLISH_KEY, {
+			type: 'rsa',
+			size: 2048,
+		})
+	}
 
-  if (!services_key) {
-    throw new Error('No IPFS publishing key available!')
-  }
+	if (!services_key) {
+		throw new Error('No IPFS publishing key available!')
+	}
 
-  debug('adding service info file to node')
-  const files = await ipfs.add(encodeServiceInfo(service_info))
+	debug('adding service info file to node')
+	const files = await ipfs.add(encodeServiceInfo(service_info))
 
-  debug('publishing...')
-  const published = await ipfs.name.publish(files[0].hash, {
-    key: PUBLISH_KEY,
-    resolve: false
-    // lifetime: // string - Time duration of the record. Default: 24h
-    // ttl:      // string - Time duration this record should be cached
-  })
+	debug('publishing...')
+	const published = await ipfs.name.publish(files[0].hash, {
+		key: PUBLISH_KEY,
+		resolve: false,
+		// lifetime: // string - Time duration of the record. Default: 24h
+		// ttl:      // string - Time duration this record should be cached
+	})
 
-  // The name and ipfs hash of the published service information file, eg.
-  // {
-  //   name: 'QmUNQCkaU1TRnc1WGixqEP3Q3fazM8guSdFRsdnSJTN36A',
-  //   value: '/ipfs/QmcSjtVMfDSSNYCxNAb9PxNpEigCw7h1UZ77gip3ghfbnA'
-  // }
-  // .. The name is equivalent to the key id that was used.
-  debug(published)
+	// The name and ipfs hash of the published service information file, eg.
+	// {
+	//   name: 'QmUNQCkaU1TRnc1WGixqEP3Q3fazM8guSdFRsdnSJTN36A',
+	//   value: '/ipfs/QmcSjtVMfDSSNYCxNAb9PxNpEigCw7h1UZ77gip3ghfbnA'
+	// }
+	// .. The name is equivalent to the key id that was used.
+	debug(published)
 
-  // Return the key id under which the content was published. Which is used
-  // to lookup the actual ipfs content id of the published service information
-  return services_key.id
+	// Return the key id under which the content was published. Which is used
+	// to lookup the actual ipfs content id of the published service information
+	return services_key.id
 }
 
 module.exports = {
-  publish
+	publish,
 }

+ 1 - 1
storage-node/packages/discovery/test/index.js

@@ -1 +1 @@
-// Add Tests!
+// Add Tests!

+ 180 - 163
storage-node/packages/helios/bin/cli.js

@@ -6,178 +6,195 @@ const { discover } = require('@joystream/service-discovery')
 const axios = require('axios')
 const stripEndingSlash = require('@joystream/storage-utils/stripEndingSlash')
 
-async function main () {
-  const runtime = await RuntimeApi.create()
-  const { api } = runtime
-
-  // get current blockheight
-  const currentHeader = await api.rpc.chain.getHeader()
-  const currentHeight = currentHeader.number.toBn()
-
-  // get all providers
-  const { ids: storageProviders } = await runtime.workers.getAllProviders()
-  console.log(`Found ${storageProviders.length} staked providers`)
-
-  const storageProviderAccountInfos = await Promise.all(storageProviders.map(async (providerId) => {
-    return ({
-      providerId,
-      info: await runtime.discovery.getAccountInfo(providerId)
-    })
-  }))
-
-  // providers that have updated their account info and published ipfs id
-  // considered live if the record hasn't expired yet
-  const liveProviders = storageProviderAccountInfos.filter(({info}) => {
-    return info && info.expires_at.gte(currentHeight)
-  })
-
-  const downProviders = storageProviderAccountInfos.filter(({info}) => {
-    return info == null
-  })
-
-  const expiredTtlProviders = storageProviderAccountInfos.filter(({info}) => {
-    return info && currentHeight.gte(info.expires_at)
-  })
-
-  let providersStatuses = mapInfoToStatus(liveProviders, currentHeight)
-  console.log('\n== Live Providers\n', providersStatuses)
-
-  let expiredProviderStatuses = mapInfoToStatus(expiredTtlProviders, currentHeight)
-  console.log('\n== Expired Providers\n', expiredProviderStatuses)
-
-  console.log('\n== Down Providers!\n', downProviders.map(provider => {
-    return ({
-      providerId: provider.providerId
-    })
-  }))
-
-  // Resolve IPNS identities of providers
-  console.log('\nResolving live provider API Endpoints...')
-  let endpoints = await Promise.all(providersStatuses.map(async ({providerId}) => {
-    try {
-      let serviceInfo = await discover.discover_over_joystream_discovery_service(providerId, runtime)
-
-      if (serviceInfo == null) {
-        console.log(`provider ${providerId} has not published service information`)
-        return { providerId, endpoint: null }
-      }
-
-      let info = JSON.parse(serviceInfo.serialized)
-      console.log(`${providerId} -> ${info.asset.endpoint}`)
-      return { providerId, endpoint: info.asset.endpoint }
-    } catch (err) {
-      console.log('resolve failed for id', providerId, err.message)
-      return { providerId, endpoint: null }
-    }
-  }))
-
-  console.log('\nChecking API Endpoints are online')
-  await Promise.all(endpoints.map(async (provider) => {
-    if (!provider.endpoint) {
-      console.log('skipping', provider.address)
-      return
-    }
-    const swaggerUrl = `${stripEndingSlash(provider.endpoint)}/swagger.json`
-    let error
-    try {
-      await axios.get(swaggerUrl)
-      // maybe print out api version information to detect which version of colossus is running?
-      // or add anothe api endpoint for diagnostics information
-    } catch (err) { error = err }
-    console.log(`${provider.endpoint} - ${error ? error.message : 'OK'}`)
-  }))
-
-  let knownContentIds = await runtime.assets.getKnownContentIds()
-  console.log(`\nData Directory has ${knownContentIds.length} assets`)
-
-  // Check which providers are reporting a ready relationship for each asset
-  await Promise.all(knownContentIds.map(async (contentId) => {
-    let [relationshipsCount, judgement] = await assetRelationshipState(api, contentId, storageProviders)
-    console.log(`${encodeAddress(contentId)} replication ${relationshipsCount}/${storageProviders.length} - ${judgement}`)
-  }))
-
-  // interesting disconnect doesn't work unless an explicit provider was created
-  // for underlying api instance
-  // We no longer need a connection to the chain
-  api.disconnect()
-
-  console.log(`\nChecking available assets on providers (this can take some time)...`)
-  endpoints.forEach(async ({ providerId, endpoint }) => {
-    if (!endpoint) { return }
-    const total = knownContentIds.length
-    let { found } = await countContentAvailability(knownContentIds, endpoint)
-    console.log(`provider ${providerId}: has ${found} out of ${total}`)
-  })
+async function main() {
+	const runtime = await RuntimeApi.create()
+	const { api } = runtime
+
+	// get current blockheight
+	const currentHeader = await api.rpc.chain.getHeader()
+	const currentHeight = currentHeader.number.toBn()
+
+	// get all providers
+	const { ids: storageProviders } = await runtime.workers.getAllProviders()
+	console.log(`Found ${storageProviders.length} staked providers`)
+
+	const storageProviderAccountInfos = await Promise.all(
+		storageProviders.map(async (providerId) => {
+			return {
+				providerId,
+				info: await runtime.discovery.getAccountInfo(providerId),
+			}
+		})
+	)
+
+	// providers that have updated their account info and published ipfs id
+	// considered live if the record hasn't expired yet
+	const liveProviders = storageProviderAccountInfos.filter(({ info }) => {
+		return info && info.expires_at.gte(currentHeight)
+	})
+
+	const downProviders = storageProviderAccountInfos.filter(({ info }) => {
+		return info == null
+	})
+
+	const expiredTtlProviders = storageProviderAccountInfos.filter(({ info }) => {
+		return info && currentHeight.gte(info.expires_at)
+	})
+
+	const providersStatuses = mapInfoToStatus(liveProviders, currentHeight)
+	console.log('\n== Live Providers\n', providersStatuses)
+
+	const expiredProviderStatuses = mapInfoToStatus(expiredTtlProviders, currentHeight)
+	console.log('\n== Expired Providers\n', expiredProviderStatuses)
+
+	console.log(
+		'\n== Down Providers!\n',
+		downProviders.map((provider) => {
+			return {
+				providerId: provider.providerId,
+			}
+		})
+	)
+
+	// Resolve IPNS identities of providers
+	console.log('\nResolving live provider API Endpoints...')
+	const endpoints = await Promise.all(
+		providersStatuses.map(async ({ providerId }) => {
+			try {
+				const serviceInfo = await discover.discover_over_joystream_discovery_service(providerId, runtime)
+
+				if (serviceInfo == null) {
+					console.log(`provider ${providerId} has not published service information`)
+					return { providerId, endpoint: null }
+				}
+
+				const info = JSON.parse(serviceInfo.serialized)
+				console.log(`${providerId} -> ${info.asset.endpoint}`)
+				return { providerId, endpoint: info.asset.endpoint }
+			} catch (err) {
+				console.log('resolve failed for id', providerId, err.message)
+				return { providerId, endpoint: null }
+			}
+		})
+	)
+
+	console.log('\nChecking API Endpoints are online')
+	await Promise.all(
+		endpoints.map(async (provider) => {
+			if (!provider.endpoint) {
+				console.log('skipping', provider.address)
+				return
+			}
+			const swaggerUrl = `${stripEndingSlash(provider.endpoint)}/swagger.json`
+			let error
+			try {
+				await axios.get(swaggerUrl)
+				// maybe print out api version information to detect which version of colossus is running?
+				// or add anothe api endpoint for diagnostics information
+			} catch (err) {
+				error = err
+			}
+			console.log(`${provider.endpoint} - ${error ? error.message : 'OK'}`)
+		})
+	)
+
+	const knownContentIds = await runtime.assets.getKnownContentIds()
+	console.log(`\nData Directory has ${knownContentIds.length} assets`)
+
+	// Check which providers are reporting a ready relationship for each asset
+	await Promise.all(
+		knownContentIds.map(async (contentId) => {
+			const [relationshipsCount, judgement] = await assetRelationshipState(api, contentId, storageProviders)
+			console.log(
+				`${encodeAddress(contentId)} replication ${relationshipsCount}/${
+					storageProviders.length
+				} - ${judgement}`
+			)
+		})
+	)
+
+	// interesting disconnect doesn't work unless an explicit provider was created
+	// for underlying api instance
+	// We no longer need a connection to the chain
+	api.disconnect()
+
+	console.log(`\nChecking available assets on providers (this can take some time)...`)
+	endpoints.forEach(async ({ providerId, endpoint }) => {
+		if (!endpoint) {
+			return
+		}
+		const total = knownContentIds.length
+		const { found } = await countContentAvailability(knownContentIds, endpoint)
+		console.log(`provider ${providerId}: has ${found} out of ${total}`)
+	})
 }
 
-function mapInfoToStatus (providers, currentHeight) {
-  return providers.map(({providerId, info}) => {
-    if (info) {
-      return {
-        providerId,
-        identity: info.identity.toString(),
-        expiresIn: info.expires_at.sub(currentHeight).toNumber(),
-        expired: currentHeight.gte(info.expires_at)
-      }
-    } else {
-      return {
-        providerId,
-        identity: null,
-        status: 'down'
-      }
-    }
-  })
+function mapInfoToStatus(providers, currentHeight) {
+	return providers.map(({ providerId, info }) => {
+		if (info) {
+			return {
+				providerId,
+				identity: info.identity.toString(),
+				expiresIn: info.expires_at.sub(currentHeight).toNumber(),
+				expired: currentHeight.gte(info.expires_at),
+			}
+		}
+		return {
+			providerId,
+			identity: null,
+			status: 'down',
+		}
+	})
 }
 
 // HTTP HEAD with axios all known content ids on each provider
-async function countContentAvailability (contentIds, source) {
-  let content = {}
-  let found = 0
-  let missing = 0
-  for (let i = 0; i < contentIds.length; i++) {
-    const assetUrl = makeAssetUrl(contentIds[i], source)
-    try {
-      let info = await axios.head(assetUrl)
-      content[encodeAddress(contentIds[i])] = {
-        type: info.headers['content-type'],
-        bytes: info.headers['content-length']
-      }
-      // TODO: cross check against dataobject size
-      found++
-    } catch (err) {
-      missing++
-    }
-  }
-
-  return { found, missing, content }
+async function countContentAvailability(contentIds, source) {
+	const content = {}
+	let found = 0
+	let missing = 0
+	for (let i = 0; i < contentIds.length; i++) {
+		const assetUrl = makeAssetUrl(contentIds[i], source)
+		try {
+			const info = await axios.head(assetUrl)
+			content[encodeAddress(contentIds[i])] = {
+				type: info.headers['content-type'],
+				bytes: info.headers['content-length'],
+			}
+			// TODO: cross check against dataobject size
+			found++
+		} catch (err) {
+			missing++
+		}
+	}
+
+	return { found, missing, content }
 }
 
-function makeAssetUrl (contentId, source) {
-  source = stripEndingSlash(source)
-  return `${source}/asset/v0/${encodeAddress(contentId)}`
+function makeAssetUrl(contentId, source) {
+	source = stripEndingSlash(source)
+	return `${source}/asset/v0/${encodeAddress(contentId)}`
 }
 
-async function assetRelationshipState (api, contentId, providers) {
-  let dataObject = await api.query.dataDirectory.dataObjectByContentId(contentId)
-
-  let relationshipIds = await api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
-
-  // how many relationships associated with active providers and in ready state
-  let activeRelationships = await Promise.all(relationshipIds.map(async (id) => {
-    let relationship = await api.query.dataObjectStorageRegistry.relationships(id)
-    relationship = relationship.unwrap()
-    // only interested in ready relationships
-    if (!relationship.ready) {
-      return undefined
-    }
-    // Does the relationship belong to an active provider ?
-    return providers.find((provider) => relationship.storage_provider.eq(provider))
-  }))
-
-  return ([
-    activeRelationships.filter(active => active).length,
-    dataObject.unwrap().liaison_judgement
-  ])
+async function assetRelationshipState(api, contentId, providers) {
+	const dataObject = await api.query.dataDirectory.dataObjectByContentId(contentId)
+
+	const relationshipIds = await api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
+
+	// how many relationships associated with active providers and in ready state
+	const activeRelationships = await Promise.all(
+		relationshipIds.map(async (id) => {
+			let relationship = await api.query.dataObjectStorageRegistry.relationships(id)
+			relationship = relationship.unwrap()
+			// only interested in ready relationships
+			if (!relationship.ready) {
+				return undefined
+			}
+			// Does the relationship belong to an active provider ?
+			return providers.find((provider) => relationship.storage_provider.eq(provider))
+		})
+	)
+
+	return [activeRelationships.filter((active) => active).length, dataObject.unwrap().liaison_judgement]
 }
 
 main()

+ 1 - 1
storage-node/packages/helios/test/index.js

@@ -1 +1 @@
-// Add Tests!
+// Add Tests!

+ 151 - 151
storage-node/packages/runtime-api/assets.js

@@ -3,164 +3,164 @@
 const debug = require('debug')('joystream:runtime:assets')
 const { decodeAddress } = require('@polkadot/keyring')
 
-function parseContentId (contentId) {
-  try {
-    return decodeAddress(contentId)
-  } catch (err) {
-    return contentId
-  }
+function parseContentId(contentId) {
+	try {
+		return decodeAddress(contentId)
+	} catch (err) {
+		return contentId
+	}
 }
 
 /*
  * Add asset related functionality to the substrate API.
  */
 class AssetsApi {
-  static async create (base) {
-    const ret = new AssetsApi()
-    ret.base = base
-    await ret.init()
-    return ret
-  }
-
-  async init () {
-    debug('Init')
-  }
-
-  /*
-   * Create and return a data object.
-   */
-  async createDataObject (accountId, memberId, contentId, doTypeId, size, ipfsCid) {
-    contentId = parseContentId(contentId)
-    const tx = this.base.api.tx.dataDirectory.addContent(memberId, contentId, doTypeId, size, ipfsCid)
-    await this.base.signAndSend(accountId, tx)
-
-    // If the data object constructed properly, we should now be able to return
-    // the data object from the state.
-    return this.getDataObject(contentId)
-  }
-
-  /*
-   * Return the Data Object for a contendId
-   */
-  async getDataObject (contentId) {
-    contentId = parseContentId(contentId)
-    return this.base.api.query.dataDirectory.dataObjectByContentId(contentId)
-  }
-
-  /*
-   * Verify the liaison state for a DataObject:
-   * - Check the content ID has a DataObject
-   * - Check the storageProviderId is the liaison
-   * - Check the liaison state is Pending
-   *
-   * Each failure errors out, success returns the data object.
-   */
-  async checkLiaisonForDataObject (storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-
-    let obj = await this.getDataObject(contentId)
-
-    if (obj.isNone) {
-      throw new Error(`No DataObject created for content ID: ${contentId}`)
-    }
-
-    obj = obj.unwrap()
-
-    if (!obj.liaison.eq(storageProviderId)) {
-      throw new Error(`This storage node is not liaison for the content ID: ${contentId}`)
-    }
-
-    if (obj.liaison_judgement.type !== 'Pending') {
-      throw new Error(`Expected Pending judgement, but found: ${obj.liaison_judgement.type}`)
-    }
-
-    return obj
-  }
-
-  /*
-   * Sets the data object liaison judgement to Accepted
-   */
-  async acceptContent (providerAccoundId, storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    const tx = this.base.api.tx.dataDirectory.acceptContent(storageProviderId, contentId)
-    return this.base.signAndSend(providerAccoundId, tx)
-  }
-
-  /*
-   * Sets the data object liaison judgement to Rejected
-   */
-  async rejectContent (providerAccountId, storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    const tx = this.base.api.tx.dataDirectory.rejectContent(storageProviderId, contentId)
-    return this.base.signAndSend(providerAccountId, tx)
-  }
-
-  /*
-   * Creates storage relationship for a data object and provider
-   */
-  async createStorageRelationship (providerAccountId, storageProviderId, contentId, callback) {
-    contentId = parseContentId(contentId)
-    const tx = this.base.api.tx.dataObjectStorageRegistry.addRelationship(storageProviderId, contentId)
-
-    const subscribed = [['dataObjectStorageRegistry', 'DataObjectStorageRelationshipAdded']]
-    return this.base.signAndSend(providerAccountId, tx, 3, subscribed, callback)
-  }
-
-  /*
-   * Gets storage relationship for contentId for the given provider
-   */
-  async getStorageRelationshipAndId (storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    let rids = await this.base.api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
-
-    while (rids.length) {
-      const relationshipId = rids.shift()
-      let relationship = await this.base.api.query.dataObjectStorageRegistry.relationships(relationshipId)
-      relationship = relationship.unwrap()
-      if (relationship.storage_provider.eq(storageProviderId)) {
-        return ({ relationship, relationshipId })
-      }
-    }
-
-    return {}
-  }
-
-  /*
-   * Creates storage relationship for a data object and provider and returns the relationship id
-   */
-  async createAndReturnStorageRelationship (providerAccountId, storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    return new Promise(async (resolve, reject) => {
-      try {
-        await this.createStorageRelationship(providerAccountId, storageProviderId, contentId, (events) => {
-          events.forEach((event) => {
-            resolve(event[1].DataObjectStorageRelationshipId)
-          })
-        })
-      } catch (err) {
-        reject(err)
-      }
-    })
-  }
-
-  /*
-   * Set the ready state for a data object storage relationship to the new value
-   */
-  async toggleStorageRelationshipReady (providerAccountId, storageProviderId, dosrId, ready) {
-    var tx = ready
-      ? this.base.api.tx.dataObjectStorageRegistry.setRelationshipReady(storageProviderId, dosrId)
-      : this.base.api.tx.dataObjectStorageRegistry.unsetRelationshipReady(storageProviderId, dosrId)
-    return this.base.signAndSend(providerAccountId, tx)
-  }
-
-  /*
-   * Returns array of know content ids
-   */
-  async getKnownContentIds () {
-    return this.base.api.query.dataDirectory.knownContentIds()
-  }
+	static async create(base) {
+		const ret = new AssetsApi()
+		ret.base = base
+		await ret.init()
+		return ret
+	}
+
+	async init() {
+		debug('Init')
+	}
+
+	/*
+	 * Create and return a data object.
+	 */
+	async createDataObject(accountId, memberId, contentId, doTypeId, size, ipfsCid) {
+		contentId = parseContentId(contentId)
+		const tx = this.base.api.tx.dataDirectory.addContent(memberId, contentId, doTypeId, size, ipfsCid)
+		await this.base.signAndSend(accountId, tx)
+
+		// If the data object constructed properly, we should now be able to return
+		// the data object from the state.
+		return this.getDataObject(contentId)
+	}
+
+	/*
+	 * Return the Data Object for a contendId
+	 */
+	async getDataObject(contentId) {
+		contentId = parseContentId(contentId)
+		return this.base.api.query.dataDirectory.dataObjectByContentId(contentId)
+	}
+
+	/*
+	 * Verify the liaison state for a DataObject:
+	 * - Check the content ID has a DataObject
+	 * - Check the storageProviderId is the liaison
+	 * - Check the liaison state is Pending
+	 *
+	 * Each failure errors out, success returns the data object.
+	 */
+	async checkLiaisonForDataObject(storageProviderId, contentId) {
+		contentId = parseContentId(contentId)
+
+		let obj = await this.getDataObject(contentId)
+
+		if (obj.isNone) {
+			throw new Error(`No DataObject created for content ID: ${contentId}`)
+		}
+
+		obj = obj.unwrap()
+
+		if (!obj.liaison.eq(storageProviderId)) {
+			throw new Error(`This storage node is not liaison for the content ID: ${contentId}`)
+		}
+
+		if (obj.liaison_judgement.type !== 'Pending') {
+			throw new Error(`Expected Pending judgement, but found: ${obj.liaison_judgement.type}`)
+		}
+
+		return obj
+	}
+
+	/*
+	 * Sets the data object liaison judgement to Accepted
+	 */
+	async acceptContent(providerAccoundId, storageProviderId, contentId) {
+		contentId = parseContentId(contentId)
+		const tx = this.base.api.tx.dataDirectory.acceptContent(storageProviderId, contentId)
+		return this.base.signAndSend(providerAccoundId, tx)
+	}
+
+	/*
+	 * Sets the data object liaison judgement to Rejected
+	 */
+	async rejectContent(providerAccountId, storageProviderId, contentId) {
+		contentId = parseContentId(contentId)
+		const tx = this.base.api.tx.dataDirectory.rejectContent(storageProviderId, contentId)
+		return this.base.signAndSend(providerAccountId, tx)
+	}
+
+	/*
+	 * Creates storage relationship for a data object and provider
+	 */
+	async createStorageRelationship(providerAccountId, storageProviderId, contentId, callback) {
+		contentId = parseContentId(contentId)
+		const tx = this.base.api.tx.dataObjectStorageRegistry.addRelationship(storageProviderId, contentId)
+
+		const subscribed = [['dataObjectStorageRegistry', 'DataObjectStorageRelationshipAdded']]
+		return this.base.signAndSend(providerAccountId, tx, 3, subscribed, callback)
+	}
+
+	/*
+	 * Gets storage relationship for contentId for the given provider
+	 */
+	async getStorageRelationshipAndId(storageProviderId, contentId) {
+		contentId = parseContentId(contentId)
+		const rids = await this.base.api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
+
+		while (rids.length) {
+			const relationshipId = rids.shift()
+			let relationship = await this.base.api.query.dataObjectStorageRegistry.relationships(relationshipId)
+			relationship = relationship.unwrap()
+			if (relationship.storage_provider.eq(storageProviderId)) {
+				return { relationship, relationshipId }
+			}
+		}
+
+		return {}
+	}
+
+	/*
+	 * Creates storage relationship for a data object and provider and returns the relationship id
+	 */
+	async createAndReturnStorageRelationship(providerAccountId, storageProviderId, contentId) {
+		contentId = parseContentId(contentId)
+		return new Promise(async (resolve, reject) => {
+			try {
+				await this.createStorageRelationship(providerAccountId, storageProviderId, contentId, (events) => {
+					events.forEach((event) => {
+						resolve(event[1].DataObjectStorageRelationshipId)
+					})
+				})
+			} catch (err) {
+				reject(err)
+			}
+		})
+	}
+
+	/*
+	 * Set the ready state for a data object storage relationship to the new value
+	 */
+	async toggleStorageRelationshipReady(providerAccountId, storageProviderId, dosrId, ready) {
+		const tx = ready
+			? this.base.api.tx.dataObjectStorageRegistry.setRelationshipReady(storageProviderId, dosrId)
+			: this.base.api.tx.dataObjectStorageRegistry.unsetRelationshipReady(storageProviderId, dosrId)
+		return this.base.signAndSend(providerAccountId, tx)
+	}
+
+	/*
+	 * Returns array of know content ids
+	 */
+	async getKnownContentIds() {
+		return this.base.api.query.dataDirectory.knownContentIds()
+	}
 }
 
 module.exports = {
-  AssetsApi
+	AssetsApi,
 }

+ 47 - 56
storage-node/packages/runtime-api/balances.js

@@ -16,75 +16,66 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const debug = require('debug')('joystream:runtime:balances');
+const debug = require('debug')('joystream:runtime:balances')
 
-const { IdentitiesApi } = require('@joystream/storage-runtime-api/identities');
+const { IdentitiesApi } = require('@joystream/storage-runtime-api/identities')
 
 /*
  * Bundle API calls related to account balances.
  */
-class BalancesApi
-{
-  static async create(base)
-  {
-    const ret = new BalancesApi();
-    ret.base = base;
-    await ret.init();
-    return ret;
-  }
+class BalancesApi {
+	static async create(base) {
+		const ret = new BalancesApi()
+		ret.base = base
+		await ret.init()
+		return ret
+	}
 
-  async init(account_file)
-  {
-    debug('Init');
-  }
+	async init(account_file) {
+		debug('Init')
+	}
 
-  /*
-   * Return true/false if the account has the minimum balance given.
-   */
-  async hasMinimumBalanceOf(accountId, min)
-  {
-    const balance = await this.freeBalance(accountId);
-    if (typeof min === 'number') {
-      return balance.cmpn(min) >= 0;
-    }
-    else {
-      return balance.cmp(min) >= 0;
-    }
-  }
+	/*
+	 * Return true/false if the account has the minimum balance given.
+	 */
+	async hasMinimumBalanceOf(accountId, min) {
+		const balance = await this.freeBalance(accountId)
+		if (typeof min === 'number') {
+			return balance.cmpn(min) >= 0
+		}
+		return balance.cmp(min) >= 0
+	}
 
-  /*
-   * Return the account's current free balance.
-   */
-  async freeBalance(accountId)
-  {
-    const decoded = this.base.identities.keyring.decodeAddress(accountId, true);
-    return this.base.api.query.balances.freeBalance(decoded);
-  }
+	/*
+	 * Return the account's current free balance.
+	 */
+	async freeBalance(accountId) {
+		const decoded = this.base.identities.keyring.decodeAddress(accountId, true)
+		return this.base.api.query.balances.freeBalance(decoded)
+	}
 
-  /*
-   * Return the base transaction fee.
-   */
-  baseTransactionFee()
-  {
-    return this.base.api.consts.transactionPayment.transactionBaseFee;
-  }
+	/*
+	 * Return the base transaction fee.
+	 */
+	baseTransactionFee() {
+		return this.base.api.consts.transactionPayment.transactionBaseFee
+	}
 
-  /*
-   * Transfer amount currency from one address to another. The sending
-   * address must be an unlocked key pair!
-   */
-  async transfer(from, to, amount)
-  {
-    const decode = require('@polkadot/keyring').decodeAddress;
-    const to_decoded = decode(to, true);
+	/*
+	 * Transfer amount currency from one address to another. The sending
+	 * address must be an unlocked key pair!
+	 */
+	async transfer(from, to, amount) {
+		const decode = require('@polkadot/keyring').decodeAddress
+		const to_decoded = decode(to, true)
 
-    const tx = this.base.api.tx.balances.transfer(to_decoded, amount);
-    return this.base.signAndSend(from, tx);
-  }
+		const tx = this.base.api.tx.balances.transfer(to_decoded, amount)
+		return this.base.signAndSend(from, tx)
+	}
 }
 
 module.exports = {
-  BalancesApi: BalancesApi,
+	BalancesApi,
 }

+ 54 - 58
storage-node/packages/runtime-api/discovery.js

@@ -6,71 +6,67 @@ const debug = require('debug')('joystream:runtime:discovery')
  * Add discovery related functionality to the substrate API.
  */
 class DiscoveryApi {
-  static async create (base) {
-    const ret = new DiscoveryApi()
-    ret.base = base
-    await ret.init()
-    return ret
-  }
+	static async create(base) {
+		const ret = new DiscoveryApi()
+		ret.base = base
+		await ret.init()
+		return ret
+	}
 
-  async init () {
-    debug('Init')
-  }
+	async init() {
+		debug('Init')
+	}
 
-  /*
-   * Get Bootstrap endpoints
-   */
-  async getBootstrapEndpoints () {
-    return this.base.api.query.discovery.bootstrapEndpoints()
-  }
+	/*
+	 * Get Bootstrap endpoints
+	 */
+	async getBootstrapEndpoints() {
+		return this.base.api.query.discovery.bootstrapEndpoints()
+	}
 
-  /*
-   * Set Bootstrap endpoints, requires the sudo account to be provided and unlocked
-   */
-  async setBootstrapEndpoints (sudoAccount, endpoints) {
-    const tx = this.base.api.tx.discovery.setBootstrapEndpoints(endpoints)
-    // make sudo call
-    return this.base.signAndSend(
-      sudoAccount,
-      this.base.api.tx.sudo.sudo(tx)
-    )
-  }
+	/*
+	 * Set Bootstrap endpoints, requires the sudo account to be provided and unlocked
+	 */
+	async setBootstrapEndpoints(sudoAccount, endpoints) {
+		const tx = this.base.api.tx.discovery.setBootstrapEndpoints(endpoints)
+		// make sudo call
+		return this.base.signAndSend(sudoAccount, this.base.api.tx.sudo.sudo(tx))
+	}
 
-  /*
-   * Get AccountInfo of a storage provider
-   */
-  async getAccountInfo (storageProviderId) {
-    const info = await this.base.api.query.discovery.accountInfoByStorageProviderId(storageProviderId)
-    // Not an Option so we use default value check to know if info was found
-    return info.expires_at.eq(0) ? null : info
-  }
+	/*
+	 * Get AccountInfo of a storage provider
+	 */
+	async getAccountInfo(storageProviderId) {
+		const info = await this.base.api.query.discovery.accountInfoByStorageProviderId(storageProviderId)
+		// Not an Option so we use default value check to know if info was found
+		return info.expires_at.eq(0) ? null : info
+	}
 
-  /*
-   * Set AccountInfo of our storage provider
-   */
-  async setAccountInfo (ipnsId) {
-    const roleAccountId = this.base.identities.key.address
-    const storageProviderId = this.base.storageProviderId
-    const isProvider = await this.base.workers.isStorageProvider(storageProviderId)
-    if (isProvider) {
-      const tx = this.base.api.tx.discovery.setIpnsId(storageProviderId, ipnsId)
-      return this.base.signAndSend(roleAccountId, tx)
-    } else {
-      throw new Error('Cannot set AccountInfo, id is not a storage provider')
-    }
-  }
+	/*
+	 * Set AccountInfo of our storage provider
+	 */
+	async setAccountInfo(ipnsId) {
+		const roleAccountId = this.base.identities.key.address
+		const storageProviderId = this.base.storageProviderId
+		const isProvider = await this.base.workers.isStorageProvider(storageProviderId)
+		if (isProvider) {
+			const tx = this.base.api.tx.discovery.setIpnsId(storageProviderId, ipnsId)
+			return this.base.signAndSend(roleAccountId, tx)
+		}
+		throw new Error('Cannot set AccountInfo, id is not a storage provider')
+	}
 
-  /*
-   * Clear AccountInfo of our storage provider
-   */
-  async unsetAccountInfo () {
-    const roleAccountId = this.base.identities.key.address
-    const storageProviderId = this.base.storageProviderId
-    var tx = this.base.api.tx.discovery.unsetIpnsId(storageProviderId)
-    return this.base.signAndSend(roleAccountId, tx)
-  }
+	/*
+	 * Clear AccountInfo of our storage provider
+	 */
+	async unsetAccountInfo() {
+		const roleAccountId = this.base.identities.key.address
+		const storageProviderId = this.base.storageProviderId
+		const tx = this.base.api.tx.discovery.unsetIpnsId(storageProviderId)
+		return this.base.signAndSend(roleAccountId, tx)
+	}
 }
 
 module.exports = {
-  DiscoveryApi
+	DiscoveryApi,
 }

+ 198 - 198
storage-node/packages/runtime-api/identities.js

@@ -32,205 +32,205 @@ const util_crypto = require('@polkadot/util-crypto')
  * This loosely groups: accounts, key management, and membership.
  */
 class IdentitiesApi {
-  static async create (base, {account_file, passphrase, canPromptForPassphrase}) {
-    const ret = new IdentitiesApi()
-    ret.base = base
-    await ret.init(account_file, passphrase, canPromptForPassphrase)
-    return ret
-  }
-
-  async init (account_file, passphrase, canPromptForPassphrase) {
-    debug('Init')
-
-    // Creatre keyring
-    this.keyring = new Keyring()
-
-    this.canPromptForPassphrase = canPromptForPassphrase || false
-
-    // Load account file, if possible.
-    try {
-      this.key = await this.loadUnlock(account_file, passphrase)
-    } catch (err) {
-      debug('Error loading account file:', err.message)
-    }
-  }
-
-  /*
-   * Load a key file and unlock it if necessary.
-   */
-  async loadUnlock (account_file, passphrase) {
-    const fullname = path.resolve(account_file)
-    debug('Initializing key from', fullname)
-    const key = this.keyring.addFromJson(require(fullname))
-    await this.tryUnlock(key, passphrase)
-    debug('Successfully initialized with address', key.address)
-    return key
-  }
-
-  /*
-   * Try to unlock a key if it isn't already unlocked.
-   * passphrase should be supplied as argument.
-   */
-  async tryUnlock (key, passphrase) {
-    if (!key.isLocked) {
-      debug('Key is not locked, not attempting to unlock')
-      return
-    }
-
-    // First try with an empty passphrase - for convenience
-    try {
-      key.decodePkcs8('')
-
-      if (passphrase) {
-        debug('Key was not encrypted, supplied passphrase was ignored')
-      }
-
-      return
-    } catch (err) {
-      // pass
-    }
-
-    // Then with supplied passphrase
-    try {
-      debug('Decrypting with supplied passphrase')
-      key.decodePkcs8(passphrase)
-      return
-    } catch (err) {
-      // pass
-    }
-
-    // If that didn't work, ask for a passphrase if appropriate
-    if (this.canPromptForPassphrase) {
-      passphrase = await this.askForPassphrase(key.address)
-      key.decodePkcs8(passphrase)
-      return
-    }
-
-    throw new Error('invalid passphrase supplied')
-  }
-
-  /*
-   * Ask for a passphrase
-   */
-  askForPassphrase (address) {
-    // Query for passphrase
-    const prompt = require('password-prompt')
-    return prompt(`Enter passphrase for ${address}: `, { required: false })
-  }
-
-  /*
-   * Return true if the account is a root account of a member
-   */
-  async isMember (accountId) {
-    const memberIds = await this.memberIdsOf(accountId) // return array of member ids
-    return memberIds.length > 0 // true if at least one member id exists for the acccount
-  }
-
-  /*
-   * Return all the member IDs of an account by the root account id
-   */
-  async memberIdsOf (accountId) {
-    const decoded = this.keyring.decodeAddress(accountId)
-    return this.base.api.query.members.memberIdsByRootAccountId(decoded)
-  }
-
-  /*
-   * Return the first member ID of an account, or undefined if not a member root account.
-   */
-  async firstMemberIdOf (accountId) {
-    const decoded = this.keyring.decodeAddress(accountId)
-    let ids = await this.base.api.query.members.memberIdsByRootAccountId(decoded)
-    return ids[0]
-  }
-
-  /*
-   * Export a key pair to JSON. Will ask for a passphrase.
-   */
-  async exportKeyPair (accountId) {
-    const passphrase = await this.askForPassphrase(accountId)
-
-    // Produce JSON output
-    return this.keyring.toJson(accountId, passphrase)
-  }
-
-  /*
-   * Export a key pair and write it to a JSON file with the account ID as the
-   * name.
-   */
-  async writeKeyPairExport (accountId, prefix) {
-    // Generate JSON
-    const data = await this.exportKeyPair(accountId)
-
-    // Write JSON
-    var filename = `${data.address}.json`
-
-    if (prefix) {
-      const path = require('path')
-      filename = path.resolve(prefix, filename)
-    }
-
-    fs.writeFileSync(filename, JSON.stringify(data), {
-      encoding: 'utf8',
-      mode: 0o600
-    })
-
-    return filename
-  }
-
-  /*
-   * Register account id with userInfo as a new member
-   * using default policy 0, returns new member id
-   */
-  async registerMember (accountId, userInfo) {
-    const tx = this.base.api.tx.members.buyMembership(0, userInfo)
-
-    return this.base.signAndSendThenGetEventResult(accountId, tx, {
-      eventModule: 'members',
-      eventName: 'MemberRegistered',
-      eventProperty: 'MemberId'
-    })
-  }
-
-  /*
-   * Injects a keypair and sets it as the default identity
-   */
-  useKeyPair (keyPair) {
-    this.key = this.keyring.addPair(keyPair)
-  }
-
-  /*
-   * Create a new role key. If no name is given,
-   * default to 'storage'.
-   */
-  async createNewRoleKey (name) {
-    name = name || 'storage-provider'
-
-    // Generate new key pair
-    const keyPair = util_crypto.naclKeypairFromRandom()
-
-    // Encode to an address.
-    const addr = this.keyring.encodeAddress(keyPair.publicKey)
-    debug('Generated new key pair with address', addr)
-
-    // Add to key wring. We set the meta to identify the account as
-    // a role key.
-    const meta = {
-      name: `${name} role account`
-    }
-
-    const createPair = require('@polkadot/keyring/pair').default
-    const pair = createPair('ed25519', keyPair, meta)
-
-    this.keyring.addPair(pair)
-
-    return pair
-  }
-
-  getSudoAccount() {
-    return this.base.api.query.sudo.key()
-  }
+	static async create(base, { account_file, passphrase, canPromptForPassphrase }) {
+		const ret = new IdentitiesApi()
+		ret.base = base
+		await ret.init(account_file, passphrase, canPromptForPassphrase)
+		return ret
+	}
+
+	async init(account_file, passphrase, canPromptForPassphrase) {
+		debug('Init')
+
+		// Creatre keyring
+		this.keyring = new Keyring()
+
+		this.canPromptForPassphrase = canPromptForPassphrase || false
+
+		// Load account file, if possible.
+		try {
+			this.key = await this.loadUnlock(account_file, passphrase)
+		} catch (err) {
+			debug('Error loading account file:', err.message)
+		}
+	}
+
+	/*
+	 * Load a key file and unlock it if necessary.
+	 */
+	async loadUnlock(account_file, passphrase) {
+		const fullname = path.resolve(account_file)
+		debug('Initializing key from', fullname)
+		const key = this.keyring.addFromJson(require(fullname))
+		await this.tryUnlock(key, passphrase)
+		debug('Successfully initialized with address', key.address)
+		return key
+	}
+
+	/*
+	 * Try to unlock a key if it isn't already unlocked.
+	 * passphrase should be supplied as argument.
+	 */
+	async tryUnlock(key, passphrase) {
+		if (!key.isLocked) {
+			debug('Key is not locked, not attempting to unlock')
+			return
+		}
+
+		// First try with an empty passphrase - for convenience
+		try {
+			key.decodePkcs8('')
+
+			if (passphrase) {
+				debug('Key was not encrypted, supplied passphrase was ignored')
+			}
+
+			return
+		} catch (err) {
+			// pass
+		}
+
+		// Then with supplied passphrase
+		try {
+			debug('Decrypting with supplied passphrase')
+			key.decodePkcs8(passphrase)
+			return
+		} catch (err) {
+			// pass
+		}
+
+		// If that didn't work, ask for a passphrase if appropriate
+		if (this.canPromptForPassphrase) {
+			passphrase = await this.askForPassphrase(key.address)
+			key.decodePkcs8(passphrase)
+			return
+		}
+
+		throw new Error('invalid passphrase supplied')
+	}
+
+	/*
+	 * Ask for a passphrase
+	 */
+	askForPassphrase(address) {
+		// Query for passphrase
+		const prompt = require('password-prompt')
+		return prompt(`Enter passphrase for ${address}: `, { required: false })
+	}
+
+	/*
+	 * Return true if the account is a root account of a member
+	 */
+	async isMember(accountId) {
+		const memberIds = await this.memberIdsOf(accountId) // return array of member ids
+		return memberIds.length > 0 // true if at least one member id exists for the acccount
+	}
+
+	/*
+	 * Return all the member IDs of an account by the root account id
+	 */
+	async memberIdsOf(accountId) {
+		const decoded = this.keyring.decodeAddress(accountId)
+		return this.base.api.query.members.memberIdsByRootAccountId(decoded)
+	}
+
+	/*
+	 * Return the first member ID of an account, or undefined if not a member root account.
+	 */
+	async firstMemberIdOf(accountId) {
+		const decoded = this.keyring.decodeAddress(accountId)
+		const ids = await this.base.api.query.members.memberIdsByRootAccountId(decoded)
+		return ids[0]
+	}
+
+	/*
+	 * Export a key pair to JSON. Will ask for a passphrase.
+	 */
+	async exportKeyPair(accountId) {
+		const passphrase = await this.askForPassphrase(accountId)
+
+		// Produce JSON output
+		return this.keyring.toJson(accountId, passphrase)
+	}
+
+	/*
+	 * Export a key pair and write it to a JSON file with the account ID as the
+	 * name.
+	 */
+	async writeKeyPairExport(accountId, prefix) {
+		// Generate JSON
+		const data = await this.exportKeyPair(accountId)
+
+		// Write JSON
+		let filename = `${data.address}.json`
+
+		if (prefix) {
+			const path = require('path')
+			filename = path.resolve(prefix, filename)
+		}
+
+		fs.writeFileSync(filename, JSON.stringify(data), {
+			encoding: 'utf8',
+			mode: 0o600,
+		})
+
+		return filename
+	}
+
+	/*
+	 * Register account id with userInfo as a new member
+	 * using default policy 0, returns new member id
+	 */
+	async registerMember(accountId, userInfo) {
+		const tx = this.base.api.tx.members.buyMembership(0, userInfo)
+
+		return this.base.signAndSendThenGetEventResult(accountId, tx, {
+			eventModule: 'members',
+			eventName: 'MemberRegistered',
+			eventProperty: 'MemberId',
+		})
+	}
+
+	/*
+	 * Injects a keypair and sets it as the default identity
+	 */
+	useKeyPair(keyPair) {
+		this.key = this.keyring.addPair(keyPair)
+	}
+
+	/*
+	 * Create a new role key. If no name is given,
+	 * default to 'storage'.
+	 */
+	async createNewRoleKey(name) {
+		name = name || 'storage-provider'
+
+		// Generate new key pair
+		const keyPair = util_crypto.naclKeypairFromRandom()
+
+		// Encode to an address.
+		const addr = this.keyring.encodeAddress(keyPair.publicKey)
+		debug('Generated new key pair with address', addr)
+
+		// Add to key wring. We set the meta to identify the account as
+		// a role key.
+		const meta = {
+			name: `${name} role account`,
+		}
+
+		const createPair = require('@polkadot/keyring/pair').default
+		const pair = createPair('ed25519', keyPair, meta)
+
+		this.keyring.addPair(pair)
+
+		return pair
+	}
+
+	getSudoAccount() {
+		return this.base.api.query.sudo.key()
+	}
 }
 
 module.exports = {
-  IdentitiesApi
+	IdentitiesApi,
 }

+ 257 - 255
storage-node/packages/runtime-api/index.js

@@ -35,267 +35,269 @@ const { newExternallyControlledPromise } = require('@joystream/storage-utils/ext
  * Initialize runtime (substrate) API and keyring.
  */
 class RuntimeApi {
-  static async create (options) {
-    const runtime_api = new RuntimeApi()
-    await runtime_api.init(options || {})
-    return runtime_api
-  }
-
-  async init (options) {
-    debug('Init')
-
-    options = options || {}
-
-    // Register joystream types
-    registerJoystreamTypes()
-
-    const provider = new WsProvider(options.provider_url || 'ws://localhost:9944')
-
-    // Create the API instrance
-    this.api = await ApiPromise.create({ provider })
-
-    this.asyncLock = new AsyncLock()
-
-    // Keep track locally of account nonces.
-    this.nonces = {}
-
-    // The storage provider id to use
-    this.storageProviderId = parseInt(options.storageProviderId) // u64 instead ?
-
-    // Ok, create individual APIs
-    this.identities = await IdentitiesApi.create(this, {
-      account_file: options.account_file,
-      passphrase: options.passphrase,
-      canPromptForPassphrase: options.canPromptForPassphrase
-    })
-    this.balances = await BalancesApi.create(this)
-    this.workers = await WorkersApi.create(this)
-    this.assets = await AssetsApi.create(this)
-    this.discovery = await DiscoveryApi.create(this)
-  }
-
-  disconnect () {
-    this.api.disconnect()
-  }
-
-  executeWithAccountLock (account_id, func) {
-    return this.asyncLock.acquire(`${account_id}`, func)
-  }
-
-  /*
-   * Wait for an event. Filters out any events that don't match the module and
-   * event name.
-   *
-   * The result of the Promise is an array containing first the full event
-   * name, and then the event fields as an object.
-   */
-  async waitForEvent (module, name) {
-    return this.waitForEvents([[module, name]])
-  }
-
-  _matchingEvents(subscribed, events) {
-    debug(`Number of events: ${events.length} subscribed to ${subscribed}`)
-
-    const filtered = events.filter((record) => {
-      const { event, phase } = record
-
-      // Show what we are busy with
-      debug(`\t${event.section}:${event.method}:: (phase=${phase.toString()})`)
-      debug(`\t\t${event.meta.documentation.toString()}`)
-
-      // Skip events we're not interested in.
-      const matching = subscribed.filter((value) => {
-        return event.section === value[0] && event.method === value[1]
-      })
-      return matching.length > 0
-    })
-    debug(`Filtered: ${filtered.length}`)
-
-    const mapped = filtered.map((record) => {
-      const { event } = record
-      const types = event.typeDef
-
-      // Loop through each of the parameters, displaying the type and data
-      const payload = {}
-      event.data.forEach((data, index) => {
-        debug(`\t\t\t${types[index].type}: ${data.toString()}`)
-        payload[types[index].type] = data
-      })
-
-      const full_name = `${event.section}.${event.method}`
-      return [full_name, payload]
-    })
-    debug('Mapped', mapped)
-
-    return mapped
-  }
-
-  /*
-   * Same as waitForEvent, but filter on multiple events. The parameter is an
-   * array of arrays containing module and name. Calling waitForEvent is
-   * identical to calling this with [[module, name]].
-   *
-   * Returns the first matched event *only*.
-   */
-  async waitForEvents (subscribed) {
-    return new Promise((resolve, reject) => {
-      this.api.query.system.events((events) => {
-        const matches = this._matchingEvents(subscribed, events)
-        if (matches && matches.length) {
-          resolve(matches)
-        }
-      })
-    })
-  }
-
-  /*
-   * Nonce-aware signAndSend(). Also allows you to use the accountId instead
-   * of the key, making calls a little simpler. Will lock to prevent concurrent
-   * calls so correct nonce is used.
-   *
-   * If the subscribed events are given, and a callback as well, then the
-   * callback is invoked with matching events.
-   */
-  async signAndSend (accountId, tx, attempts, subscribed, callback) {
-    accountId = this.identities.keyring.encodeAddress(accountId)
-
-    // Key must be unlocked
-    const from_key = this.identities.keyring.getPair(accountId)
-    if (from_key.isLocked) {
-      throw new Error('Must unlock key before using it to sign!')
-    }
-
-    const finalizedPromise = newExternallyControlledPromise()
-
-    let unsubscribe = await this.executeWithAccountLock(accountId, async () => {
-      // Try to get the next nonce to use
-      let nonce = this.nonces[accountId]
-
-      let incrementNonce = () => {
-        // only increment once
-        incrementNonce = () => {} // turn it into a no-op
-        nonce = nonce.addn(1)
-        this.nonces[accountId] = nonce
-      }
-
-      // If the nonce isn't available, get it from chain.
-      if (!nonce) {
-        // current nonce
-        nonce = await this.api.query.system.accountNonce(accountId)
-        debug(`Got nonce for ${accountId} from chain: ${nonce}`)
-      }
-
-      return new Promise((resolve, reject) => {
-        debug('Signing and sending tx')
-        // send(statusUpdates) returns a function for unsubscribing from status updates
-        let unsubscribe = tx.sign(from_key, { nonce })
-          .send(({events = [], status}) => {
-            debug(`TX status: ${status.type}`)
-
-            // Whatever events we get, process them if there's someone interested.
-            // It is critical that this event handling doesn't prevent
-            try {
-              if (subscribed && callback) {
-                const matched = this._matchingEvents(subscribed, events)
-                debug('Matching events:', matched)
-                if (matched.length) {
-                  callback(matched)
-                }
-              }
-            } catch (err) {
-              debug(`Error handling events ${err.stack}`)
-            }
-
-            // We want to release lock as early as possible, sometimes Ready status
-            // doesn't occur, so we do it on Broadcast instead
-            if (status.isReady) {
-              debug('TX Ready.')
-              incrementNonce()
-              resolve(unsubscribe) // releases lock
-            } else if (status.isBroadcast) {
-              debug('TX Broadcast.')
-              incrementNonce()
-              resolve(unsubscribe) // releases lock
-            } else if (status.isFinalized) {
-              debug('TX Finalized.')
-              finalizedPromise.resolve(status)
-            } else if (status.isFuture) {
-              // comes before ready.
-              // does that mean it will remain in mempool or in api internal queue?
-              // nonce was set in the future. Treating it as an error for now.
-              debug('TX Future!')
-              // nonce is likely out of sync, delete it so we reload it from chain on next attempt
-              delete this.nonces[accountId]
-              const err = new Error('transaction nonce set in future')
-              finalizedPromise.reject(err)
-              reject(err)
-            }
-
-            /* why don't we see these status updates on local devchain (single node)
+	static async create(options) {
+		const runtime_api = new RuntimeApi()
+		await runtime_api.init(options || {})
+		return runtime_api
+	}
+
+	async init(options) {
+		debug('Init')
+
+		options = options || {}
+
+		// Register joystream types
+		registerJoystreamTypes()
+
+		const provider = new WsProvider(options.provider_url || 'ws://localhost:9944')
+
+		// Create the API instrance
+		this.api = await ApiPromise.create({ provider })
+
+		this.asyncLock = new AsyncLock()
+
+		// Keep track locally of account nonces.
+		this.nonces = {}
+
+		// The storage provider id to use
+		this.storageProviderId = parseInt(options.storageProviderId) // u64 instead ?
+
+		// Ok, create individual APIs
+		this.identities = await IdentitiesApi.create(this, {
+			account_file: options.account_file,
+			passphrase: options.passphrase,
+			canPromptForPassphrase: options.canPromptForPassphrase,
+		})
+		this.balances = await BalancesApi.create(this)
+		this.workers = await WorkersApi.create(this)
+		this.assets = await AssetsApi.create(this)
+		this.discovery = await DiscoveryApi.create(this)
+	}
+
+	disconnect() {
+		this.api.disconnect()
+	}
+
+	executeWithAccountLock(account_id, func) {
+		return this.asyncLock.acquire(`${account_id}`, func)
+	}
+
+	/*
+	 * Wait for an event. Filters out any events that don't match the module and
+	 * event name.
+	 *
+	 * The result of the Promise is an array containing first the full event
+	 * name, and then the event fields as an object.
+	 */
+	async waitForEvent(module, name) {
+		return this.waitForEvents([[module, name]])
+	}
+
+	_matchingEvents(subscribed, events) {
+		debug(`Number of events: ${events.length} subscribed to ${subscribed}`)
+
+		const filtered = events.filter((record) => {
+			const { event, phase } = record
+
+			// Show what we are busy with
+			debug(`\t${event.section}:${event.method}:: (phase=${phase.toString()})`)
+			debug(`\t\t${event.meta.documentation.toString()}`)
+
+			// Skip events we're not interested in.
+			const matching = subscribed.filter((value) => {
+				return event.section === value[0] && event.method === value[1]
+			})
+			return matching.length > 0
+		})
+		debug(`Filtered: ${filtered.length}`)
+
+		const mapped = filtered.map((record) => {
+			const { event } = record
+			const types = event.typeDef
+
+			// Loop through each of the parameters, displaying the type and data
+			const payload = {}
+			event.data.forEach((data, index) => {
+				debug(`\t\t\t${types[index].type}: ${data.toString()}`)
+				payload[types[index].type] = data
+			})
+
+			const full_name = `${event.section}.${event.method}`
+			return [full_name, payload]
+		})
+		debug('Mapped', mapped)
+
+		return mapped
+	}
+
+	/*
+	 * Same as waitForEvent, but filter on multiple events. The parameter is an
+	 * array of arrays containing module and name. Calling waitForEvent is
+	 * identical to calling this with [[module, name]].
+	 *
+	 * Returns the first matched event *only*.
+	 */
+	async waitForEvents(subscribed) {
+		return new Promise((resolve, reject) => {
+			this.api.query.system.events((events) => {
+				const matches = this._matchingEvents(subscribed, events)
+				if (matches && matches.length) {
+					resolve(matches)
+				}
+			})
+		})
+	}
+
+	/*
+	 * Nonce-aware signAndSend(). Also allows you to use the accountId instead
+	 * of the key, making calls a little simpler. Will lock to prevent concurrent
+	 * calls so correct nonce is used.
+	 *
+	 * If the subscribed events are given, and a callback as well, then the
+	 * callback is invoked with matching events.
+	 */
+	async signAndSend(accountId, tx, attempts, subscribed, callback) {
+		accountId = this.identities.keyring.encodeAddress(accountId)
+
+		// Key must be unlocked
+		const from_key = this.identities.keyring.getPair(accountId)
+		if (from_key.isLocked) {
+			throw new Error('Must unlock key before using it to sign!')
+		}
+
+		const finalizedPromise = newExternallyControlledPromise()
+
+		const unsubscribe = await this.executeWithAccountLock(accountId, async () => {
+			// Try to get the next nonce to use
+			let nonce = this.nonces[accountId]
+
+			let incrementNonce = () => {
+				// only increment once
+				incrementNonce = () => {} // turn it into a no-op
+				nonce = nonce.addn(1)
+				this.nonces[accountId] = nonce
+			}
+
+			// If the nonce isn't available, get it from chain.
+			if (!nonce) {
+				// current nonce
+				nonce = await this.api.query.system.accountNonce(accountId)
+				debug(`Got nonce for ${accountId} from chain: ${nonce}`)
+			}
+
+			return new Promise((resolve, reject) => {
+				debug('Signing and sending tx')
+				// send(statusUpdates) returns a function for unsubscribing from status updates
+				const unsubscribe = tx
+					.sign(from_key, { nonce })
+					.send(({ events = [], status }) => {
+						debug(`TX status: ${status.type}`)
+
+						// Whatever events we get, process them if there's someone interested.
+						// It is critical that this event handling doesn't prevent
+						try {
+							if (subscribed && callback) {
+								const matched = this._matchingEvents(subscribed, events)
+								debug('Matching events:', matched)
+								if (matched.length) {
+									callback(matched)
+								}
+							}
+						} catch (err) {
+							debug(`Error handling events ${err.stack}`)
+						}
+
+						// We want to release lock as early as possible, sometimes Ready status
+						// doesn't occur, so we do it on Broadcast instead
+						if (status.isReady) {
+							debug('TX Ready.')
+							incrementNonce()
+							resolve(unsubscribe) // releases lock
+						} else if (status.isBroadcast) {
+							debug('TX Broadcast.')
+							incrementNonce()
+							resolve(unsubscribe) // releases lock
+						} else if (status.isFinalized) {
+							debug('TX Finalized.')
+							finalizedPromise.resolve(status)
+						} else if (status.isFuture) {
+							// comes before ready.
+							// does that mean it will remain in mempool or in api internal queue?
+							// nonce was set in the future. Treating it as an error for now.
+							debug('TX Future!')
+							// nonce is likely out of sync, delete it so we reload it from chain on next attempt
+							delete this.nonces[accountId]
+							const err = new Error('transaction nonce set in future')
+							finalizedPromise.reject(err)
+							reject(err)
+						}
+
+						/* why don't we see these status updates on local devchain (single node)
             isUsurped
             isBroadcast
             isDropped
             isInvalid
             */
-          })
-          .catch((err) => {
-            // 1014 error: Most likely you are sending transaction with the same nonce,
-            // so it assumes you want to replace existing one, but the priority is too low to replace it (priority = fee = len(encoded_transaction) currently)
-            // Remember this can also happen if in the past we sent a tx with a future nonce, and the current nonce
-            // now matches it.
-            if (err) {
-              const errstr = err.toString()
-              // not the best way to check error code.
-              // https://github.com/polkadot-js/api/blob/master/packages/rpc-provider/src/coder/index.ts#L52
-              if (errstr.indexOf('Error: 1014:') < 0 && // low priority
-                  errstr.indexOf('Error: 1010:') < 0) // bad transaction
-              {
-                // Error but not nonce related. (bad arguments maybe)
-                debug('TX error', err)
-              } else {
-                // nonce is likely out of sync, delete it so we reload it from chain on next attempt
-                delete this.nonces[accountId]
-              }
-            }
-
-            finalizedPromise.reject(err)
-            // releases lock
-            reject(err)
-          })
-      })
-    })
-
-    // when does it make sense to manyally unsubscribe?
-    // at this point unsubscribe.then and unsubscribe.catch have been deleted
-    // unsubscribe() // don't unsubscribe if we want to wait for additional status
-    // updates to know when the tx has been finalized
-    return finalizedPromise.promise
-  }
-
-  /*
-   * Sign and send a transaction expect event from
-   * module and return eventProperty from the event.
-   */
-  async signAndSendThenGetEventResult (senderAccountId, tx, { eventModule, eventName, eventProperty }) {
-    // event from a module,
-    const subscribed = [[eventModule, eventName]]
-    return new Promise(async (resolve, reject) => {
-      try {
-        await this.signAndSend(senderAccountId, tx, 1, subscribed, (events) => {
-          events.forEach((event) => {
-            // fix - we may not necessarily want the first event
-            // if there are multiple events emitted,
-            resolve(event[1][eventProperty])
-          })
-        })
-      } catch (err) {
-        reject(err)
-      }
-    })
-  }
-
+					})
+					.catch((err) => {
+						// 1014 error: Most likely you are sending transaction with the same nonce,
+						// so it assumes you want to replace existing one, but the priority is too low to replace it (priority = fee = len(encoded_transaction) currently)
+						// Remember this can also happen if in the past we sent a tx with a future nonce, and the current nonce
+						// now matches it.
+						if (err) {
+							const errstr = err.toString()
+							// not the best way to check error code.
+							// https://github.com/polkadot-js/api/blob/master/packages/rpc-provider/src/coder/index.ts#L52
+							if (
+								errstr.indexOf('Error: 1014:') < 0 && // low priority
+								errstr.indexOf('Error: 1010:') < 0
+							) {
+								// bad transaction
+								// Error but not nonce related. (bad arguments maybe)
+								debug('TX error', err)
+							} else {
+								// nonce is likely out of sync, delete it so we reload it from chain on next attempt
+								delete this.nonces[accountId]
+							}
+						}
+
+						finalizedPromise.reject(err)
+						// releases lock
+						reject(err)
+					})
+			})
+		})
+
+		// when does it make sense to manyally unsubscribe?
+		// at this point unsubscribe.then and unsubscribe.catch have been deleted
+		// unsubscribe() // don't unsubscribe if we want to wait for additional status
+		// updates to know when the tx has been finalized
+		return finalizedPromise.promise
+	}
+
+	/*
+	 * Sign and send a transaction expect event from
+	 * module and return eventProperty from the event.
+	 */
+	async signAndSendThenGetEventResult(senderAccountId, tx, { eventModule, eventName, eventProperty }) {
+		// event from a module,
+		const subscribed = [[eventModule, eventName]]
+		return new Promise(async (resolve, reject) => {
+			try {
+				await this.signAndSend(senderAccountId, tx, 1, subscribed, (events) => {
+					events.forEach((event) => {
+						// fix - we may not necessarily want the first event
+						// if there are multiple events emitted,
+						resolve(event[1][eventProperty])
+					})
+				})
+			} catch (err) {
+				reject(err)
+			}
+		})
+	}
 }
 
 module.exports = {
-  RuntimeApi
+	RuntimeApi,
 }

+ 26 - 26
storage-node/packages/runtime-api/test/assets.js

@@ -16,36 +16,36 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const mocha = require('mocha');
-const expect = require('chai').expect;
-const sinon = require('sinon');
+const mocha = require('mocha')
+const expect = require('chai').expect
+const sinon = require('sinon')
 
-const { RuntimeApi } = require('@joystream/storage-runtime-api');
+const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
 describe('Assets', () => {
-  var api;
-  var key;
-  before(async () => {
-    api = await RuntimeApi.create();
-    key = await api.identities.loadUnlock('test/data/edwards_unlocked.json');
-  });
+	let api
+	let key
+	before(async () => {
+		api = await RuntimeApi.create()
+		key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+	})
 
-  it('returns DataObjects for a content ID', async () => {
-    const obj = await api.assets.getDataObject('foo');
-    expect(obj.isNone).to.be.true;
-  });
+	it('returns DataObjects for a content ID', async () => {
+		const obj = await api.assets.getDataObject('foo')
+		expect(obj.isNone).to.be.true
+	})
 
-  it('can check the liaison for a DataObject', async () => {
-    expect(async _ => {
-      await api.assets.checkLiaisonForDataObject('foo', 'bar');
-    }).to.throw;
-  });
+	it('can check the liaison for a DataObject', async () => {
+		expect(async (_) => {
+			await api.assets.checkLiaisonForDataObject('foo', 'bar')
+		}).to.throw
+	})
 
-  // Needs properly staked accounts
-  it('can accept content');
-  it('can reject content');
-  it('can create a storage relationship for content');
-  it('can toggle a storage relatsionship to ready state');
-});
+	// Needs properly staked accounts
+	it('can accept content')
+	it('can reject content')
+	it('can create a storage relationship for content')
+	it('can toggle a storage relatsionship to ready state')
+})

+ 27 - 27
storage-node/packages/runtime-api/test/balances.js

@@ -16,37 +16,37 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const mocha = require('mocha');
-const expect = require('chai').expect;
-const sinon = require('sinon');
+const mocha = require('mocha')
+const expect = require('chai').expect
+const sinon = require('sinon')
 
-const { RuntimeApi } = require('@joystream/storage-runtime-api');
+const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
 describe('Balances', () => {
-  var api;
-  var key;
-  before(async () => {
-    api = await RuntimeApi.create();
-    key = await api.identities.loadUnlock('test/data/edwards_unlocked.json');
-  });
+	let api
+	let key
+	before(async () => {
+		api = await RuntimeApi.create()
+		key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+	})
 
-  it('returns free balance for an account', async () => {
-    const balance = await api.balances.freeBalance(key.address);
-    // Should be exactly zero
-    expect(balance.cmpn(0)).to.equal(0);
-  });
+	it('returns free balance for an account', async () => {
+		const balance = await api.balances.freeBalance(key.address)
+		// Should be exactly zero
+		expect(balance.cmpn(0)).to.equal(0)
+	})
 
-  it('checks whether a minimum balance exists', async () => {
-    // A minimum of 0 should exist, but no more.
-    expect(await api.balances.hasMinimumBalanceOf(key.address, 0)).to.be.true;
-    expect(await api.balances.hasMinimumBalanceOf(key.address, 1)).to.be.false;
-  });
+	it('checks whether a minimum balance exists', async () => {
+		// A minimum of 0 should exist, but no more.
+		expect(await api.balances.hasMinimumBalanceOf(key.address, 0)).to.be.true
+		expect(await api.balances.hasMinimumBalanceOf(key.address, 1)).to.be.false
+	})
 
-  it('returns the base transaction fee of the chain', async () => {
-    const fee = await api.balances.baseTransactionFee();
-    // >= 0 comparison works
-    expect(fee.cmpn(0)).to.be.at.least(0);
-  });
-});
+	it('returns the base transaction fee of the chain', async () => {
+		const fee = await api.balances.baseTransactionFee()
+		// >= 0 comparison works
+		expect(fee.cmpn(0)).to.be.at.least(0)
+	})
+})

+ 60 - 60
storage-node/packages/runtime-api/test/identities.js

@@ -16,84 +16,84 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const mocha = require('mocha');
-const expect = require('chai').expect;
-const sinon = require('sinon');
-const temp = require('temp').track();
+const mocha = require('mocha')
+const expect = require('chai').expect
+const sinon = require('sinon')
+const temp = require('temp').track()
 
-const { RuntimeApi } = require('@joystream/storage-runtime-api');
+const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
 describe('Identities', () => {
-  var api;
-  before(async () => {
-    api = await RuntimeApi.create({ canPromptForPassphrase: true });
-  });
+	let api
+	before(async () => {
+		api = await RuntimeApi.create({ canPromptForPassphrase: true })
+	})
 
-  it('imports keys', async () => {
-    // Unlocked keys can be imported without asking for a passphrase
-    await api.identities.loadUnlock('test/data/edwards_unlocked.json');
+	it('imports keys', async () => {
+		// Unlocked keys can be imported without asking for a passphrase
+		await api.identities.loadUnlock('test/data/edwards_unlocked.json')
 
-    // Edwards and schnorr keys should unlock
-    const passphrase_stub = sinon.stub(api.identities, 'askForPassphrase').callsFake(_ => 'asdf');
-    await api.identities.loadUnlock('test/data/edwards.json');
-    await api.identities.loadUnlock('test/data/schnorr.json');
-    passphrase_stub.restore();
+		// Edwards and schnorr keys should unlock
+		const passphrase_stub = sinon.stub(api.identities, 'askForPassphrase').callsFake((_) => 'asdf')
+		await api.identities.loadUnlock('test/data/edwards.json')
+		await api.identities.loadUnlock('test/data/schnorr.json')
+		passphrase_stub.restore()
 
-    // Except if the wrong passphrase is given
-    const passphrase_stub_bad = sinon.stub(api.identities, 'askForPassphrase').callsFake(_ => 'bad');
-    expect(async () => {
-      await api.identities.loadUnlock('test/data/edwards.json');
-    }).to.throw;
-    passphrase_stub_bad.restore();
-  });
+		// Except if the wrong passphrase is given
+		const passphrase_stub_bad = sinon.stub(api.identities, 'askForPassphrase').callsFake((_) => 'bad')
+		expect(async () => {
+			await api.identities.loadUnlock('test/data/edwards.json')
+		}).to.throw
+		passphrase_stub_bad.restore()
+	})
 
-  it('knows about membership', async () => {
-    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json');
-    const addr = key.address;
+	it('knows about membership', async () => {
+		const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
+		const addr = key.address
 
-    // Without seeding the runtime with data, we can only verify that the API
-    // reacts well in the absence of membership
-    expect(await api.identities.isMember(addr)).to.be.false;
-    const member_id = await api.identities.firstMemberIdOf(addr);
+		// Without seeding the runtime with data, we can only verify that the API
+		// reacts well in the absence of membership
+		expect(await api.identities.isMember(addr)).to.be.false
+		const member_id = await api.identities.firstMemberIdOf(addr)
 
-    expect(member_id).to.be.undefined;
-  });
+		expect(member_id).to.be.undefined
+	})
 
-  it('exports keys', async () => {
-    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json');
+	it('exports keys', async () => {
+		const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
 
-    const passphrase_stub = sinon.stub(api.identities, 'askForPassphrase').callsFake(_ => 'asdf');
-    const exported = await api.identities.exportKeyPair(key.address);
-    passphrase_stub.restore();
+		const passphrase_stub = sinon.stub(api.identities, 'askForPassphrase').callsFake((_) => 'asdf')
+		const exported = await api.identities.exportKeyPair(key.address)
+		passphrase_stub.restore()
 
-    expect(exported).to.have.property('address');
-    expect(exported.address).to.equal(key.address);
+		expect(exported).to.have.property('address')
+		expect(exported.address).to.equal(key.address)
 
-    expect(exported).to.have.property('encoding');
+		expect(exported).to.have.property('encoding')
 
-    expect(exported.encoding).to.have.property('version', '2');
+		expect(exported.encoding).to.have.property('version', '2')
 
-    expect(exported.encoding).to.have.property('content');
-    expect(exported.encoding.content).to.include('pkcs8');
-    expect(exported.encoding.content).to.include('ed25519');
+		expect(exported.encoding).to.have.property('content')
+		expect(exported.encoding.content).to.include('pkcs8')
+		expect(exported.encoding.content).to.include('ed25519')
 
-    expect(exported.encoding).to.have.property('type');
-    expect(exported.encoding.type).to.include('salsa20');
-  });
+		expect(exported.encoding).to.have.property('type')
+		expect(exported.encoding.type).to.include('salsa20')
+	})
 
-  it('writes key export files', async () => {
-    const prefix = temp.mkdirSync('joystream-runtime-api-test');
+	it('writes key export files', async () => {
+		const prefix = temp.mkdirSync('joystream-runtime-api-test')
 
-    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json');
+		const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
 
-    const passphrase_stub = sinon.stub(api.identities, 'askForPassphrase').callsFake(_ => 'asdf');
-    const filename = await api.identities.writeKeyPairExport(key.address, prefix);
-    passphrase_stub.restore();
+		const passphrase_stub = sinon.stub(api.identities, 'askForPassphrase').callsFake((_) => 'asdf')
+		const filename = await api.identities.writeKeyPairExport(key.address, prefix)
+		passphrase_stub.restore()
 
-    const fs = require('fs');
-    const stat = fs.statSync(filename);
-    expect(stat.isFile()).to.be.true;
-  });
-});
+		const fs = require('fs')
+		const stat = fs.statSync(filename)
+		expect(stat.isFile()).to.be.true
+	})
+})

+ 9 - 9
storage-node/packages/runtime-api/test/index.js

@@ -16,16 +16,16 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const mocha = require('mocha');
-const expect = require('chai').expect;
+const mocha = require('mocha')
+const expect = require('chai').expect
 
-const { RuntimeApi } = require('@joystream/storage-runtime-api');
+const { RuntimeApi } = require('@joystream/storage-runtime-api')
 
 describe('RuntimeApi', () => {
-  it('can be created', async () => {
-    const api = await RuntimeApi.create();
-    api.disconnect();
-  });
-});
+	it('can be created', async () => {
+		const api = await RuntimeApi.create()
+		api.disconnect()
+	})
+})

+ 257 - 254
storage-node/packages/runtime-api/workers.js

@@ -26,243 +26,246 @@ const { Worker } = require('@joystream/types/working-group')
  * Add worker related functionality to the substrate API.
  */
 class WorkersApi {
-  static async create (base) {
-    const ret = new WorkersApi()
-    ret.base = base
-    await ret.init()
-    return ret
-  }
-
-
-  // eslint-disable-next-line class-methods-use-this, require-await
-  async init () {
-    debug('Init')
-  }
-
-  /*
-   * Check whether the given account and id represent an enrolled storage provider
-   */
-  async isRoleAccountOfStorageProvider (storageProviderId, roleAccountId) {
-    const id = new BN(storageProviderId)
-    const roleAccount = this.base.identities.keyring.decodeAddress(roleAccountId)
-    const providerAccount = await this.storageProviderRoleAccount(id)
-    return providerAccount && providerAccount.eq(roleAccount)
-  }
-
-  /*
-   * Returns true if the provider id is enrolled
-   */
-  async isStorageProvider (storageProviderId) {
-    const worker = await this.storageWorkerByProviderId(storageProviderId)
-    return worker !== null
-  }
-
-  /*
-   * Returns a provider's role account or null if provider doesn't exist
-   */
-  async storageProviderRoleAccount (storageProviderId) {
-    const worker = await this.storageWorkerByProviderId(storageProviderId)
-    return worker ? worker.role_account_id : null
-  }
-
-  /*
-   * Returns a Worker instance or null if provider does not exist
-   */
-  async storageWorkerByProviderId (storageProviderId) {
-    const id = new BN(storageProviderId)
-    const { providers } = await this.getAllProviders()
-    return providers[id.toNumber()] || null
-  }
-
-  /*
-   * Returns the the first found provider id with a role account or null if not found
-   */
-  async findProviderIdByRoleAccount (roleAccount) {
-    const { ids, providers } = await this.getAllProviders()
-
-    for (let i = 0; i < ids.length; i++) {
-      const id = ids[i]
-      if (providers[id].role_account_id.eq(roleAccount)) {
-        return id
-      }
-    }
-
-    return null
-  }
-
-  /*
-   * Returns the set of ids and Worker instances of providers enrolled on the network
-   */
-  async getAllProviders () {
-    // const workerEntries = await this.base.api.query.storageWorkingGroup.workerById()
-    // can't rely on .isEmpty or isNone property to detect empty map
-    // return workerEntries.isNone ? [] : workerEntries[0]
-    // return workerEntries.isEmpty ? [] : workerEntries[0]
-    // So we iterate over possible ids which may or may not exist, by reading directly
-    // from storage value
-    const nextWorkerId = (await this.base.api.query.storageWorkingGroup.nextWorkerId()).toNumber()
-    const ids = []
-    const providers = {}
-    for (let id = 0; id < nextWorkerId; id++) {
-      // We get back an Option. Will be None if value doesn't exist
-      // eslint-disable-next-line no-await-in-loop
-      let value = await this.base.api.rpc.state.getStorage(
-        this.base.api.query.storageWorkingGroup.workerById.key(id)
-      )
-
-      if (!value.isNone) {
-        // no need to read from storage again!
-        // const worker = (await this.base.api.query.storageWorkingGroup.workerById(id))[0]
-        value = value.unwrap()
-        // construct the Worker type from raw data
-        // const worker = createType('WorkerOf', value)
-        // const worker = new Worker(value)
-        ids.push(id)
-        providers[id] = new Worker(value)
-      }
-    }
-
-    return { ids, providers }
-  }
-
-  async getLeadRoleAccount() {
-    const currentLead = await this.base.api.query.storageWorkingGroup.currentLead()
-    if (currentLead.isSome) {
-      const leadWorkerId = currentLead.unwrap()
-      const worker = await this.base.api.query.storageWorkingGroup.workerById(leadWorkerId)
-      return worker[0].role_account_id
-    }
-    return null
-  }
-
-  // Helper methods below don't really belong in the colossus runtime api library.
-  // They are only used by the dev-init command in the cli to setup a development environment
-
-  /*
-   * Add a new storage group opening using the lead account. Returns the
-   * new opening id.
-   */
-  async dev_addStorageOpening() {
-    const openTx = this.dev_makeAddOpeningTx('Worker')
-    return this.dev_submitAddOpeningTx(openTx, await this.getLeadRoleAccount())
-  }
-
-  /*
-   * Add a new storage working group lead opening using sudo account. Returns the
-   * new opening id.
-   */
-  async dev_addStorageLeadOpening() {
-    const openTx = this.dev_makeAddOpeningTx('Leader')
-    const sudoTx = this.base.api.tx.sudo.sudo(openTx)
-    return this.dev_submitAddOpeningTx(sudoTx, await this.base.identities.getSudoAccount())
-  }
-
-  /*
-   * Constructs an addOpening tx of openingType
-   */
-  dev_makeAddOpeningTx(openingType) {
-    return this.base.api.tx.storageWorkingGroup.addOpening(
-      'CurrentBlock',
-      {
-        application_rationing_policy: {
-          'max_active_applicants': 1
-        },
-        max_review_period_length: 1000
-        // default values for everything else..
-      },
-      'dev-opening',
-      openingType
-    )
-  }
-
-  /*
-   * Submits a tx (expecting it to dispatch storageWorkingGroup.addOpening) and returns
-   * the OpeningId from the resulting event.
-   */
-  async dev_submitAddOpeningTx(tx, senderAccount) {
-    return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
-      eventModule: 'storageWorkingGroup',
-      eventName: 'OpeningAdded',
-      eventProperty: 'OpeningId'
-    })
-  }
-
-  /*
-   * Apply on an opening, returns the application id.
-   */
-  async dev_applyOnOpening(openingId, memberId, memberAccount, roleAccount) {
-    const applyTx = this.base.api.tx.storageWorkingGroup.applyOnOpening(
-      memberId, openingId, roleAccount, null, null, `colossus-${memberId}`
-    )
-
-    return this.base.signAndSendThenGetEventResult(memberAccount, applyTx, {
-      eventModule: 'storageWorkingGroup',
-      eventName: 'AppliedOnOpening',
-      eventProperty: 'ApplicationId'
-    })
-  }
-
-  /*
-   * Move lead opening to review state using sudo account
-   */
-  async dev_beginLeadOpeningReview(openingId) {
-    const beginReviewTx = this.dev_makeBeginOpeningReviewTx(openingId)
-    const sudoTx = this.base.api.tx.sudo.sudo(beginReviewTx)
-    return this.base.signAndSend(await this.base.identities.getSudoAccount(), sudoTx)
-  }
-
-  /*
-   * Move a storage opening to review state using lead account
-   */
-  async dev_beginStorageOpeningReview(openingId) {
-    const beginReviewTx = this.dev_makeBeginOpeningReviewTx(openingId)
-    return this.base.signAndSend(await this.getLeadRoleAccount(), beginReviewTx)
-  }
-
-  /*
-   * Constructs a beingApplicantReview tx for openingId, which puts an opening into the review state
-   */
-  dev_makeBeginOpeningReviewTx(openingId) {
-    return this.base.api.tx.storageWorkingGroup.beginApplicantReview(openingId)
-  }
-
-  /*
-   * Fill a lead opening, return the assigned worker id, using the sudo account
-   */
-  async dev_fillLeadOpening(openingId, applicationId) {
-    const fillTx = this.dev_makeFillOpeningTx(openingId, applicationId)
-    const sudoTx = this.base.api.tx.sudo.sudo(fillTx)
-    const filled = await this.dev_submitFillOpeningTx(
-      await this.base.identities.getSudoAccount(), sudoTx)
-    return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
-  }
-
-  /*
-   * Fill a storage opening, return the assigned worker id, using the lead account
-   */
-  async dev_fillStorageOpening(openingId, applicationId) {
-    const fillTx = this.dev_makeFillOpeningTx(openingId, applicationId)
-    const filled = await this.dev_submitFillOpeningTx(await this.getLeadRoleAccount(), fillTx)
-    return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
-  }
-
-  /*
-   * Constructs a FillOpening transaction
-   */
-  dev_makeFillOpeningTx(openingId, applicationId) {
-    return this.base.api.tx.storageWorkingGroup.fillOpening(openingId, [applicationId], null)
-  }
-
-  /*
-   * Dispatches a fill opening tx and returns a map of the application id to their new assigned worker ids.
-   */
-  async dev_submitFillOpeningTx(senderAccount, tx) {
-    return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
-      eventModule: 'storageWorkingGroup',
-      eventName: 'OpeningFilled',
-      eventProperty: 'ApplicationIdToWorkerIdMap'
-    })
-  }
+	static async create(base) {
+		const ret = new WorkersApi()
+		ret.base = base
+		await ret.init()
+		return ret
+	}
+
+	// eslint-disable-next-line class-methods-use-this, require-await
+	async init() {
+		debug('Init')
+	}
+
+	/*
+	 * Check whether the given account and id represent an enrolled storage provider
+	 */
+	async isRoleAccountOfStorageProvider(storageProviderId, roleAccountId) {
+		const id = new BN(storageProviderId)
+		const roleAccount = this.base.identities.keyring.decodeAddress(roleAccountId)
+		const providerAccount = await this.storageProviderRoleAccount(id)
+		return providerAccount && providerAccount.eq(roleAccount)
+	}
+
+	/*
+	 * Returns true if the provider id is enrolled
+	 */
+	async isStorageProvider(storageProviderId) {
+		const worker = await this.storageWorkerByProviderId(storageProviderId)
+		return worker !== null
+	}
+
+	/*
+	 * Returns a provider's role account or null if provider doesn't exist
+	 */
+	async storageProviderRoleAccount(storageProviderId) {
+		const worker = await this.storageWorkerByProviderId(storageProviderId)
+		return worker ? worker.role_account_id : null
+	}
+
+	/*
+	 * Returns a Worker instance or null if provider does not exist
+	 */
+	async storageWorkerByProviderId(storageProviderId) {
+		const id = new BN(storageProviderId)
+		const { providers } = await this.getAllProviders()
+		return providers[id.toNumber()] || null
+	}
+
+	/*
+	 * Returns the the first found provider id with a role account or null if not found
+	 */
+	async findProviderIdByRoleAccount(roleAccount) {
+		const { ids, providers } = await this.getAllProviders()
+
+		for (let i = 0; i < ids.length; i++) {
+			const id = ids[i]
+			if (providers[id].role_account_id.eq(roleAccount)) {
+				return id
+			}
+		}
+
+		return null
+	}
+
+	/*
+	 * Returns the set of ids and Worker instances of providers enrolled on the network
+	 */
+	async getAllProviders() {
+		// const workerEntries = await this.base.api.query.storageWorkingGroup.workerById()
+		// can't rely on .isEmpty or isNone property to detect empty map
+		// return workerEntries.isNone ? [] : workerEntries[0]
+		// return workerEntries.isEmpty ? [] : workerEntries[0]
+		// So we iterate over possible ids which may or may not exist, by reading directly
+		// from storage value
+		const nextWorkerId = (await this.base.api.query.storageWorkingGroup.nextWorkerId()).toNumber()
+		const ids = []
+		const providers = {}
+		for (let id = 0; id < nextWorkerId; id++) {
+			// We get back an Option. Will be None if value doesn't exist
+			// eslint-disable-next-line no-await-in-loop
+			let value = await this.base.api.rpc.state.getStorage(
+				this.base.api.query.storageWorkingGroup.workerById.key(id)
+			)
+
+			if (!value.isNone) {
+				// no need to read from storage again!
+				// const worker = (await this.base.api.query.storageWorkingGroup.workerById(id))[0]
+				value = value.unwrap()
+				// construct the Worker type from raw data
+				// const worker = createType('WorkerOf', value)
+				// const worker = new Worker(value)
+				ids.push(id)
+				providers[id] = new Worker(value)
+			}
+		}
+
+		return { ids, providers }
+	}
+
+	async getLeadRoleAccount() {
+		const currentLead = await this.base.api.query.storageWorkingGroup.currentLead()
+		if (currentLead.isSome) {
+			const leadWorkerId = currentLead.unwrap()
+			const worker = await this.base.api.query.storageWorkingGroup.workerById(leadWorkerId)
+			return worker[0].role_account_id
+		}
+		return null
+	}
+
+	// Helper methods below don't really belong in the colossus runtime api library.
+	// They are only used by the dev-init command in the cli to setup a development environment
+
+	/*
+	 * Add a new storage group opening using the lead account. Returns the
+	 * new opening id.
+	 */
+	async dev_addStorageOpening() {
+		const openTx = this.dev_makeAddOpeningTx('Worker')
+		return this.dev_submitAddOpeningTx(openTx, await this.getLeadRoleAccount())
+	}
+
+	/*
+	 * Add a new storage working group lead opening using sudo account. Returns the
+	 * new opening id.
+	 */
+	async dev_addStorageLeadOpening() {
+		const openTx = this.dev_makeAddOpeningTx('Leader')
+		const sudoTx = this.base.api.tx.sudo.sudo(openTx)
+		return this.dev_submitAddOpeningTx(sudoTx, await this.base.identities.getSudoAccount())
+	}
+
+	/*
+	 * Constructs an addOpening tx of openingType
+	 */
+	dev_makeAddOpeningTx(openingType) {
+		return this.base.api.tx.storageWorkingGroup.addOpening(
+			'CurrentBlock',
+			{
+				application_rationing_policy: {
+					max_active_applicants: 1,
+				},
+				max_review_period_length: 1000,
+				// default values for everything else..
+			},
+			'dev-opening',
+			openingType
+		)
+	}
+
+	/*
+	 * Submits a tx (expecting it to dispatch storageWorkingGroup.addOpening) and returns
+	 * the OpeningId from the resulting event.
+	 */
+	async dev_submitAddOpeningTx(tx, senderAccount) {
+		return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
+			eventModule: 'storageWorkingGroup',
+			eventName: 'OpeningAdded',
+			eventProperty: 'OpeningId',
+		})
+	}
+
+	/*
+	 * Apply on an opening, returns the application id.
+	 */
+	async dev_applyOnOpening(openingId, memberId, memberAccount, roleAccount) {
+		const applyTx = this.base.api.tx.storageWorkingGroup.applyOnOpening(
+			memberId,
+			openingId,
+			roleAccount,
+			null,
+			null,
+			`colossus-${memberId}`
+		)
+
+		return this.base.signAndSendThenGetEventResult(memberAccount, applyTx, {
+			eventModule: 'storageWorkingGroup',
+			eventName: 'AppliedOnOpening',
+			eventProperty: 'ApplicationId',
+		})
+	}
+
+	/*
+	 * Move lead opening to review state using sudo account
+	 */
+	async dev_beginLeadOpeningReview(openingId) {
+		const beginReviewTx = this.dev_makeBeginOpeningReviewTx(openingId)
+		const sudoTx = this.base.api.tx.sudo.sudo(beginReviewTx)
+		return this.base.signAndSend(await this.base.identities.getSudoAccount(), sudoTx)
+	}
+
+	/*
+	 * Move a storage opening to review state using lead account
+	 */
+	async dev_beginStorageOpeningReview(openingId) {
+		const beginReviewTx = this.dev_makeBeginOpeningReviewTx(openingId)
+		return this.base.signAndSend(await this.getLeadRoleAccount(), beginReviewTx)
+	}
+
+	/*
+	 * Constructs a beingApplicantReview tx for openingId, which puts an opening into the review state
+	 */
+	dev_makeBeginOpeningReviewTx(openingId) {
+		return this.base.api.tx.storageWorkingGroup.beginApplicantReview(openingId)
+	}
+
+	/*
+	 * Fill a lead opening, return the assigned worker id, using the sudo account
+	 */
+	async dev_fillLeadOpening(openingId, applicationId) {
+		const fillTx = this.dev_makeFillOpeningTx(openingId, applicationId)
+		const sudoTx = this.base.api.tx.sudo.sudo(fillTx)
+		const filled = await this.dev_submitFillOpeningTx(await this.base.identities.getSudoAccount(), sudoTx)
+		return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
+	}
+
+	/*
+	 * Fill a storage opening, return the assigned worker id, using the lead account
+	 */
+	async dev_fillStorageOpening(openingId, applicationId) {
+		const fillTx = this.dev_makeFillOpeningTx(openingId, applicationId)
+		const filled = await this.dev_submitFillOpeningTx(await this.getLeadRoleAccount(), fillTx)
+		return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
+	}
+
+	/*
+	 * Constructs a FillOpening transaction
+	 */
+	dev_makeFillOpeningTx(openingId, applicationId) {
+		return this.base.api.tx.storageWorkingGroup.fillOpening(openingId, [applicationId], null)
+	}
+
+	/*
+	 * Dispatches a fill opening tx and returns a map of the application id to their new assigned worker ids.
+	 */
+	async dev_submitFillOpeningTx(senderAccount, tx) {
+		return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
+			eventModule: 'storageWorkingGroup',
+			eventName: 'OpeningFilled',
+			eventProperty: 'ApplicationIdToWorkerIdMap',
+		})
+	}
 }
 
 /*
@@ -270,29 +273,29 @@ class WorkersApi {
  * ApplicationIdToWorkerIdMap map in the OpeningFilled event. Expects map to
  * contain at least one entry.
  */
-function getWorkerIdFromApplicationIdToWorkerIdMap (filledMap, applicationId) {
-  if (filledMap.size === 0) {
-    throw new Error('Expected opening to be filled!')
-  }
+function getWorkerIdFromApplicationIdToWorkerIdMap(filledMap, applicationId) {
+	if (filledMap.size === 0) {
+		throw new Error('Expected opening to be filled!')
+	}
 
-  let ourApplicationIdKey
+	let ourApplicationIdKey
 
-  for (let key of filledMap.keys()) {
-    if (key.eq(applicationId)) {
-      ourApplicationIdKey = key
-      break
-    }
-  }
+	for (const key of filledMap.keys()) {
+		if (key.eq(applicationId)) {
+			ourApplicationIdKey = key
+			break
+		}
+	}
 
-  if (!ourApplicationIdKey) {
-    throw new Error('Expected application id to have been filled!')
-  }
+	if (!ourApplicationIdKey) {
+		throw new Error('Expected application id to have been filled!')
+	}
 
-  const workerId = filledMap.get(ourApplicationIdKey)
+	const workerId = filledMap.get(ourApplicationIdKey)
 
-  return workerId
+	return workerId
 }
 
 module.exports = {
-  WorkersApi
+	WorkersApi,
 }

+ 82 - 90
storage-node/packages/storage/filter.js

@@ -16,74 +16,67 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const debug = require('debug')('joystream:storage:filter');
+const debug = require('debug')('joystream:storage:filter')
 
-const DEFAULT_MAX_FILE_SIZE = 500 * 1024 * 1024;
-const DEFAULT_ACCEPT_TYPES = [
-  'video/*',
-  'audio/*',
-  'image/*',
-];
-const DEFAULT_REJECT_TYPES = [];
+const DEFAULT_MAX_FILE_SIZE = 500 * 1024 * 1024
+const DEFAULT_ACCEPT_TYPES = ['video/*', 'audio/*', 'image/*']
+const DEFAULT_REJECT_TYPES = []
 
 // Configuration defaults
-function config_defaults(config)
-{
-  const filter =  config.filter || {};
+function config_defaults(config) {
+	const filter = config.filter || {}
 
-  // We accept zero as switching this check off.
-  if (typeof filter.max_size == 'undefined' || typeof filter.max_size == 'null') {
-    filter.max_size = DEFAULT_MAX_FILE_SIZE;
-  }
+	// We accept zero as switching this check off.
+	if (typeof filter.max_size === 'undefined' || typeof filter.max_size === 'null') {
+		filter.max_size = DEFAULT_MAX_FILE_SIZE
+	}
 
-  // Figure out mime types
-  filter.mime = filter.mime || [];
-  filter.mime.accept = filter.mime.accept || DEFAULT_ACCEPT_TYPES;
-  filter.mime.reject = filter.mime.reject || DEFAULT_REJECT_TYPES;
+	// Figure out mime types
+	filter.mime = filter.mime || []
+	filter.mime.accept = filter.mime.accept || DEFAULT_ACCEPT_TYPES
+	filter.mime.reject = filter.mime.reject || DEFAULT_REJECT_TYPES
 
-  return filter;
+	return filter
 }
 
 // Mime type matching
-function mime_matches(acceptable, provided)
-{
-  if (acceptable.endsWith('*')) {
-    // Wildcard match
-    const prefix = acceptable.slice(0, acceptable.length - 1);
-    debug('wildcard matching', provided, 'against', acceptable, '/', prefix);
-    return provided.startsWith(prefix);
-  }
-  // Exact match
-  debug('exact matching', provided, 'against', acceptable);
-  return provided == acceptable;
+function mime_matches(acceptable, provided) {
+	if (acceptable.endsWith('*')) {
+		// Wildcard match
+		const prefix = acceptable.slice(0, acceptable.length - 1)
+		debug('wildcard matching', provided, 'against', acceptable, '/', prefix)
+		return provided.startsWith(prefix)
+	}
+	// Exact match
+	debug('exact matching', provided, 'against', acceptable)
+	return provided == acceptable
 }
 
-function mime_matches_any(accept, reject, provided)
-{
-  // Pass accept
-  var accepted = false;
-  for (var item of accept) {
-    if (mime_matches(item, provided)) {
-      debug('Content type matches', item, 'which is acceptable.');
-      accepted = true;
-      break;
-    }
-  }
-  if (!accepted) {
-    return false;
-  }
-
-  // Don't pass reject
-  for (var item of reject) {
-    if (mime_matches(item, provided)) {
-      debug('Content type matches', item, 'which is unacceptable.');
-      return false;
-    }
-  }
-
-  return true;
+function mime_matches_any(accept, reject, provided) {
+	// Pass accept
+	let accepted = false
+	for (var item of accept) {
+		if (mime_matches(item, provided)) {
+			debug('Content type matches', item, 'which is acceptable.')
+			accepted = true
+			break
+		}
+	}
+	if (!accepted) {
+		return false
+	}
+
+	// Don't pass reject
+	for (var item of reject) {
+		if (mime_matches(item, provided)) {
+			debug('Content type matches', item, 'which is unacceptable.')
+			return false
+		}
+	}
+
+	return true
 }
 
 /**
@@ -94,39 +87,38 @@ function mime_matches_any(accept, reject, provided)
  * https://github.com/Joystream/storage-node-joystream/issues/14 - but should
  * most likely be improved on in future.
  **/
-function filter_func(config, headers, mime_type)
-{
-  const filter = config_defaults(config);
-
-  // Enforce maximum file upload size
-  if (filter.max_size) {
-    const size = parseInt(headers['content-length'], 10);
-    if (!size) {
-      return {
-        code: 411,
-        message: 'A Content-Length header is required.',
-      };
-    }
-
-    if (size > filter.max_size) {
-      return {
-        code: 413,
-        message: 'The provided Content-Length is too large.',
-      };
-    }
-  }
-
-  // Enforce mime type based filtering
-  if (!mime_matches_any(filter.mime.accept, filter.mime.reject, mime_type)) {
-    return {
-      code: 415,
-      message: 'Content has an unacceptable MIME type.',
-    };
-  }
-
-  return {
-    code: 200,
-  };
+function filter_func(config, headers, mime_type) {
+	const filter = config_defaults(config)
+
+	// Enforce maximum file upload size
+	if (filter.max_size) {
+		const size = parseInt(headers['content-length'], 10)
+		if (!size) {
+			return {
+				code: 411,
+				message: 'A Content-Length header is required.',
+			}
+		}
+
+		if (size > filter.max_size) {
+			return {
+				code: 413,
+				message: 'The provided Content-Length is too large.',
+			}
+		}
+	}
+
+	// Enforce mime type based filtering
+	if (!mime_matches_any(filter.mime.accept, filter.mime.reject, mime_type)) {
+		return {
+			code: 415,
+			message: 'Content has an unacceptable MIME type.',
+		}
+	}
+
+	return {
+		code: 200,
+	}
 }
 
-module.exports = filter_func;
+module.exports = filter_func

+ 4 - 4
storage-node/packages/storage/index.js

@@ -16,10 +16,10 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const { Storage } = require('./storage');
+const { Storage } = require('./storage')
 
 module.exports = {
-  Storage: Storage,
-};
+	Storage,
+}

+ 326 - 347
storage-node/packages/storage/storage.js

@@ -16,39 +16,39 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const { Transform } = require('stream');
-const fs = require('fs');
+const { Transform } = require('stream')
+const fs = require('fs')
 
-const debug = require('debug')('joystream:storage:storage');
+const debug = require('debug')('joystream:storage:storage')
+
+const Promise = require('bluebird')
 
-const Promise = require('bluebird');
 Promise.config({
-  cancellation: true,
-});
+	cancellation: true,
+})
 
-const file_type = require('file-type');
-const ipfs_client = require('ipfs-http-client');
-const temp = require('temp').track();
-const _ = require('lodash');
+const file_type = require('file-type')
+const ipfs_client = require('ipfs-http-client')
+const temp = require('temp').track()
+const _ = require('lodash')
 
 // Default request timeout; imposed on top of the IPFS client, because the
 // client doesn't seem to care.
-const DEFAULT_TIMEOUT = 30 * 1000;
+const DEFAULT_TIMEOUT = 30 * 1000
 
 // Default/dummy resolution implementation.
 const DEFAULT_RESOLVE_CONTENT_ID = async (original) => {
-  debug('Warning: Default resolution returns original CID', original);
-  return original;
+	debug('Warning: Default resolution returns original CID', original)
+	return original
 }
 
 // Default file info if nothing could be detected.
 const DEFAULT_FILE_INFO = {
-  mime_type: 'application/octet-stream',
-  ext: 'bin',
-};
-
+	mime_type: 'application/octet-stream',
+	ext: 'bin',
+}
 
 /*
  * fileType is a weird name, because we're really looking at MIME types.
@@ -57,132 +57,122 @@ const DEFAULT_FILE_INFO = {
  * Nitpicking, but it also means we can add our default type if things
  * go wrong.
  */
-function fix_file_info(info)
-{
-  if (!info) {
-    info = DEFAULT_FILE_INFO;
-  }
-  else {
-    info.mime_type = info.mime;
-    delete(info.mime);
-  }
-  return info;
+function fix_file_info(info) {
+	if (!info) {
+		info = DEFAULT_FILE_INFO
+	} else {
+		info.mime_type = info.mime
+		delete info.mime
+	}
+	return info
 }
 
-function fix_file_info_on_stream(stream)
-{
-  var info = fix_file_info(stream.fileType);
-  delete(stream.fileType);
-  stream.file_info = info;
-  return stream;
+function fix_file_info_on_stream(stream) {
+	const info = fix_file_info(stream.fileType)
+	delete stream.fileType
+	stream.file_info = info
+	return stream
 }
 
-
 /*
  * Internal Transform stream for helping write to a temporary location, adding
  * MIME type detection, and a commit() function.
  */
-class StorageWriteStream extends Transform
-{
-  constructor(storage, options)
-  {
-    options = _.clone(options || {});
-
-    super(options);
-
-    this.storage = storage;
-
-    // Create temp target.
-    this.temp = temp.createWriteStream();
-    this.buf = Buffer.alloc(0);
-  }
-
-  _transform(chunk, encoding, callback)
-  {
-    // Deal with buffers only
-    if (typeof chunk === 'string') {
-      chunk = Buffer.from(chunk);
-    }
-
-    // Logging this all the time is too verbose
-    // debug('Writing temporary chunk', chunk.length, chunk);
-    this.temp.write(chunk);
-
-    // Try to detect file type during streaming.
-    if (!this.file_info && this.buf < file_type.minimumBytes) {
-      this.buf = Buffer.concat([this.buf, chunk]);
-
-      if (this.buf >= file_type.minimumBytes) {
-        const info = file_type(this.buf);
-        // No info? We can try again at the end of the stream.
-        if (info) {
-          this.file_info = fix_file_info(info);
-          this.emit('file_info', this.file_info);
-        }
-      }
-    }
-
-    callback(null);
-  }
-
-  _flush(callback)
-  {
-    debug('Flushing temporary stream:', this.temp.path);
-    this.temp.end();
-
-    // Since we're finished, we can try to detect the file type again.
-    if (!this.file_info) {
-      const read = fs.createReadStream(this.temp.path);
-      file_type.stream(read)
-        .then((stream) => {
-          this.file_info = fix_file_info_on_stream(stream).file_info;
-          this.emit('file_info', this.file_info);
-        })
-        .catch((err) => {
-          debug('Error trying to detect file type at end-of-stream:', err);
-        });
-    }
-
-    callback(null);
-  }
-
-  /*
-   * Commit this stream to the IPFS backend.
-   */
-  commit()
-  {
-    // Create a read stream from the temp file.
-    if (!this.temp) {
-      throw new Error('Cannot commit a temporary stream that does not exist. Did you call cleanup()?');
-    }
-
-    debug('Committing temporary stream: ', this.temp.path);
-    this.storage.ipfs.addFromFs(this.temp.path)
-      .then(async (result) => {
-        const hash = result[0].hash;
-        debug('Stream committed as', hash);
-        this.emit('committed', hash);
-        await this.storage.ipfs.pin.add(hash);
-      })
-      .catch((err) => {
-        debug('Error committing stream', err);
-        this.emit('error', err);
-      })
-  }
-
-  /*
-   * Clean up temporary data.
-   */
-  cleanup()
-  {
-    debug('Cleaning up temporary file: ', this.temp.path);
-    fs.unlink(this.temp.path, () => {}); // Ignore errors
-    delete(this.temp);
-  }
+class StorageWriteStream extends Transform {
+	constructor(storage, options) {
+		options = _.clone(options || {})
+
+		super(options)
+
+		this.storage = storage
+
+		// Create temp target.
+		this.temp = temp.createWriteStream()
+		this.buf = Buffer.alloc(0)
+	}
+
+	_transform(chunk, encoding, callback) {
+		// Deal with buffers only
+		if (typeof chunk === 'string') {
+			chunk = Buffer.from(chunk)
+		}
+
+		// Logging this all the time is too verbose
+		// debug('Writing temporary chunk', chunk.length, chunk);
+		this.temp.write(chunk)
+
+		// Try to detect file type during streaming.
+		if (!this.file_info && this.buf < file_type.minimumBytes) {
+			this.buf = Buffer.concat([this.buf, chunk])
+
+			if (this.buf >= file_type.minimumBytes) {
+				const info = file_type(this.buf)
+				// No info? We can try again at the end of the stream.
+				if (info) {
+					this.file_info = fix_file_info(info)
+					this.emit('file_info', this.file_info)
+				}
+			}
+		}
+
+		callback(null)
+	}
+
+	_flush(callback) {
+		debug('Flushing temporary stream:', this.temp.path)
+		this.temp.end()
+
+		// Since we're finished, we can try to detect the file type again.
+		if (!this.file_info) {
+			const read = fs.createReadStream(this.temp.path)
+			file_type
+				.stream(read)
+				.then((stream) => {
+					this.file_info = fix_file_info_on_stream(stream).file_info
+					this.emit('file_info', this.file_info)
+				})
+				.catch((err) => {
+					debug('Error trying to detect file type at end-of-stream:', err)
+				})
+		}
+
+		callback(null)
+	}
+
+	/*
+	 * Commit this stream to the IPFS backend.
+	 */
+	commit() {
+		// Create a read stream from the temp file.
+		if (!this.temp) {
+			throw new Error('Cannot commit a temporary stream that does not exist. Did you call cleanup()?')
+		}
+
+		debug('Committing temporary stream: ', this.temp.path)
+		this.storage.ipfs
+			.addFromFs(this.temp.path)
+			.then(async (result) => {
+				const hash = result[0].hash
+				debug('Stream committed as', hash)
+				this.emit('committed', hash)
+				await this.storage.ipfs.pin.add(hash)
+			})
+			.catch((err) => {
+				debug('Error committing stream', err)
+				this.emit('error', err)
+			})
+	}
+
+	/*
+	 * Clean up temporary data.
+	 */
+	cleanup() {
+		debug('Cleaning up temporary file: ', this.temp.path)
+		fs.unlink(this.temp.path, () => {}) // Ignore errors
+		delete this.temp
+	}
 }
 
-
-
 /*
  * Manages the storage backend interaction. This provides a Promise-based API.
  *
@@ -191,219 +181,208 @@ class StorageWriteStream extends Transform
  *   const store = await Storage.create({ ... });
  *   store.open(...);
  */
-class Storage
-{
-  /*
-   * Create a Storage instance. Options include:
-   *
-   * - an `ipfs` property, which is itself a hash containing
-   *   - `connect_options` to be passed to the IPFS client library for
-   *     connecting to an IPFS node.
-   * - a `resolve_content_id` function, which translates Joystream
-   *   content IDs to IPFS content IDs or vice versa. The default is to
-   *   not perform any translation, which is not practical for a production
-   *   system, but serves its function during development and testing. The
-   *   function must be asynchronous.
-   * - a `timeout` parameter, defaulting to DEFAULT_TIMEOUT. After this time,
-   *   requests to the IPFS backend time out.
-   *
-   * Functions in this class accept an optional timeout parameter. If the
-   * timeout is given, it is used - otherwise, the `option.timeout` value
-   * above is used.
-   */
-  static create(options)
-  {
-    const storage = new Storage();
-    storage._init(options);
-    return storage;
-  }
-
-  _init(options)
-  {
-    this.options = _.clone(options || {});
-    this.options.ipfs = this.options.ipfs || {};
-
-    this._timeout = this.options.timeout || DEFAULT_TIMEOUT;
-    this._resolve_content_id = this.options.resolve_content_id || DEFAULT_RESOLVE_CONTENT_ID;
-
-    this.ipfs = ipfs_client(this.options.ipfs.connect_options);
-
-    this.pins = {};
-
-    this.ipfs.id((err, identity) => {
-      if (err) {
-        debug(`Warning IPFS daemon not running: ${err.message}`);
-      } else {
-        debug(`IPFS node is up with identity: ${identity.id}`);
-      }
-    });
-  }
-
-  /*
-   * Uses bluebird's timeout mechanism to return a Promise that times out after
-   * the given timeout interval, and tries to execute the given operation within
-   * that time.
-   */
-  async _with_specified_timeout(timeout, operation)
-  {
-    return new Promise(async (resolve, reject) => {
-      try {
-        resolve(await new Promise(operation));
-      } catch (err) {
-        reject(err);
-      }
-    }).timeout(timeout || this._timeout);
-  }
-
-  /*
-   * Resolve content ID with timeout.
-   */
-  async _resolve_content_id_with_timeout(timeout, content_id)
-  {
-    return await this._with_specified_timeout(timeout, async (resolve, reject) => {
-      try {
-        resolve(await this._resolve_content_id(content_id));
-      } catch (err) {
-        reject(err);
-      }
-    });
-  }
-
-  /*
-   * Stat a content ID.
-   */
-  async stat(content_id, timeout)
-  {
-    const resolved = await this._resolve_content_id_with_timeout(timeout, content_id);
-
-    return await this._with_specified_timeout(timeout, (resolve, reject) => {
-      this.ipfs.files.stat(`/ipfs/${resolved}`, { withLocal: true }, (err, res) => {
-        if (err) {
-          reject(err);
-          return;
-        }
-        resolve(res);
-      });
-    });
-  }
-
-  /*
-   * Return the size of a content ID.
-   */
-  async size(content_id, timeout)
-  {
-    const stat = await this.stat(content_id, timeout);
-    return stat.size;
-  }
-
-  /*
-   * Opens the specified content in read or write mode, and returns a Promise
-   * with the stream.
-   *
-   * Read streams will contain a file_info property, with:
-   *  - a `mime_type` field providing the file's MIME type, or a default.
-   *  - an `ext` property, providing a file extension suggestion, or a default.
-   *
-   * Write streams have a slightly different flow, in order to allow for MIME
-   * type detection and potential filtering. First off, they are written to a
-   * temporary location, and only committed to the backend once their
-   * `commit()` function is called.
-   *
-   * When the commit has finished, a `committed` event is emitted, which
-   * contains the IPFS backend's content ID.
-   *
-   * Write streams also emit a `file_info` event during writing. It is passed
-   * the `file_info` field as described above. Event listeners may now opt to
-   * abort the write or continue and eventually `commit()` the file. There is
-   * an explicit `cleanup()` function that removes temporary files as well,
-   * in case comitting is not desired.
-   */
-  async open(content_id, mode, timeout)
-  {
-    if (mode != 'r' && mode != 'w') {
-      throw Error('The only supported modes are "r", "w" and "a".');
-    }
-
-    // Write stream
-    if (mode === 'w') {
-      return await this._create_write_stream(content_id, timeout);
-    }
-
-    // Read stream - with file type detection
-    return await this._create_read_stream(content_id, timeout);
-  }
-
-  async _create_write_stream(content_id)
-  {
-    // IPFS wants us to just dump a stream into its storage, then returns a
-    // content ID (of its own).
-    // We need to instead return a stream immediately, that we eventually
-    // decorate with the content ID when that's available.
-    return new Promise((resolve, reject) => {
-      const stream = new StorageWriteStream(this);
-      resolve(stream);
-    });
-  }
-
-  async _create_read_stream(content_id, timeout)
-  {
-    const resolved = await this._resolve_content_id_with_timeout(timeout, content_id);
-
-    var found = false;
-    return await this._with_specified_timeout(timeout, (resolve, reject) => {
-      const ls = this.ipfs.getReadableStream(resolved);
-      ls.on('data', async (result) => {
-        if (result.path === resolved) {
-          found = true;
-
-          const ft_stream = await file_type.stream(result.content);
-          resolve(fix_file_info_on_stream(ft_stream));
-        }
-      });
-      ls.on('error', (err) => {
-        ls.end();
-        debug(err);
-        reject(err);
-      });
-      ls.on('end', () => {
-        if (!found) {
-          const err = new Error('No matching content found for', content_id);
-          debug(err);
-          reject(err);
-        }
-      });
-      ls.resume();
-    });
-  }
-
-  /*
-   * Synchronize the given content ID
-   */
-  async synchronize(content_id)
-  {
-    const resolved = await this._resolve_content_id_with_timeout(this._timeout, content_id);
-
-    // validate resolved id is proper ipfs_cid, not null or empty string
-
-    if (this.pins[resolved]) {
-      return;
-    }
-
-    debug(`Pinning ${resolved}`);
-
-    // This call blocks until file is retreived..
-    this.ipfs.pin.add(resolved, {quiet: true, pin: true}, (err, res) => {
-      if (err) {
-        debug(`Error Pinning: ${resolved}`)
-        delete this.pins[resolved];
-      } else {
-        debug(`Pinned ${resolved}`);
-        // why aren't we doing this.pins[resolved] = true
-      }
-    });
-  }
+class Storage {
+	/*
+	 * Create a Storage instance. Options include:
+	 *
+	 * - an `ipfs` property, which is itself a hash containing
+	 *   - `connect_options` to be passed to the IPFS client library for
+	 *     connecting to an IPFS node.
+	 * - a `resolve_content_id` function, which translates Joystream
+	 *   content IDs to IPFS content IDs or vice versa. The default is to
+	 *   not perform any translation, which is not practical for a production
+	 *   system, but serves its function during development and testing. The
+	 *   function must be asynchronous.
+	 * - a `timeout` parameter, defaulting to DEFAULT_TIMEOUT. After this time,
+	 *   requests to the IPFS backend time out.
+	 *
+	 * Functions in this class accept an optional timeout parameter. If the
+	 * timeout is given, it is used - otherwise, the `option.timeout` value
+	 * above is used.
+	 */
+	static create(options) {
+		const storage = new Storage()
+		storage._init(options)
+		return storage
+	}
+
+	_init(options) {
+		this.options = _.clone(options || {})
+		this.options.ipfs = this.options.ipfs || {}
+
+		this._timeout = this.options.timeout || DEFAULT_TIMEOUT
+		this._resolve_content_id = this.options.resolve_content_id || DEFAULT_RESOLVE_CONTENT_ID
+
+		this.ipfs = ipfs_client(this.options.ipfs.connect_options)
+
+		this.pins = {}
+
+		this.ipfs.id((err, identity) => {
+			if (err) {
+				debug(`Warning IPFS daemon not running: ${err.message}`)
+			} else {
+				debug(`IPFS node is up with identity: ${identity.id}`)
+			}
+		})
+	}
+
+	/*
+	 * Uses bluebird's timeout mechanism to return a Promise that times out after
+	 * the given timeout interval, and tries to execute the given operation within
+	 * that time.
+	 */
+	async _with_specified_timeout(timeout, operation) {
+		return new Promise(async (resolve, reject) => {
+			try {
+				resolve(await new Promise(operation))
+			} catch (err) {
+				reject(err)
+			}
+		}).timeout(timeout || this._timeout)
+	}
+
+	/*
+	 * Resolve content ID with timeout.
+	 */
+	async _resolve_content_id_with_timeout(timeout, content_id) {
+		return await this._with_specified_timeout(timeout, async (resolve, reject) => {
+			try {
+				resolve(await this._resolve_content_id(content_id))
+			} catch (err) {
+				reject(err)
+			}
+		})
+	}
+
+	/*
+	 * Stat a content ID.
+	 */
+	async stat(content_id, timeout) {
+		const resolved = await this._resolve_content_id_with_timeout(timeout, content_id)
+
+		return await this._with_specified_timeout(timeout, (resolve, reject) => {
+			this.ipfs.files.stat(`/ipfs/${resolved}`, { withLocal: true }, (err, res) => {
+				if (err) {
+					reject(err)
+					return
+				}
+				resolve(res)
+			})
+		})
+	}
+
+	/*
+	 * Return the size of a content ID.
+	 */
+	async size(content_id, timeout) {
+		const stat = await this.stat(content_id, timeout)
+		return stat.size
+	}
+
+	/*
+	 * Opens the specified content in read or write mode, and returns a Promise
+	 * with the stream.
+	 *
+	 * Read streams will contain a file_info property, with:
+	 *  - a `mime_type` field providing the file's MIME type, or a default.
+	 *  - an `ext` property, providing a file extension suggestion, or a default.
+	 *
+	 * Write streams have a slightly different flow, in order to allow for MIME
+	 * type detection and potential filtering. First off, they are written to a
+	 * temporary location, and only committed to the backend once their
+	 * `commit()` function is called.
+	 *
+	 * When the commit has finished, a `committed` event is emitted, which
+	 * contains the IPFS backend's content ID.
+	 *
+	 * Write streams also emit a `file_info` event during writing. It is passed
+	 * the `file_info` field as described above. Event listeners may now opt to
+	 * abort the write or continue and eventually `commit()` the file. There is
+	 * an explicit `cleanup()` function that removes temporary files as well,
+	 * in case comitting is not desired.
+	 */
+	async open(content_id, mode, timeout) {
+		if (mode != 'r' && mode != 'w') {
+			throw Error('The only supported modes are "r", "w" and "a".')
+		}
+
+		// Write stream
+		if (mode === 'w') {
+			return await this._create_write_stream(content_id, timeout)
+		}
+
+		// Read stream - with file type detection
+		return await this._create_read_stream(content_id, timeout)
+	}
+
+	async _create_write_stream(content_id) {
+		// IPFS wants us to just dump a stream into its storage, then returns a
+		// content ID (of its own).
+		// We need to instead return a stream immediately, that we eventually
+		// decorate with the content ID when that's available.
+		return new Promise((resolve, reject) => {
+			const stream = new StorageWriteStream(this)
+			resolve(stream)
+		})
+	}
+
+	async _create_read_stream(content_id, timeout) {
+		const resolved = await this._resolve_content_id_with_timeout(timeout, content_id)
+
+		let found = false
+		return await this._with_specified_timeout(timeout, (resolve, reject) => {
+			const ls = this.ipfs.getReadableStream(resolved)
+			ls.on('data', async (result) => {
+				if (result.path === resolved) {
+					found = true
+
+					const ft_stream = await file_type.stream(result.content)
+					resolve(fix_file_info_on_stream(ft_stream))
+				}
+			})
+			ls.on('error', (err) => {
+				ls.end()
+				debug(err)
+				reject(err)
+			})
+			ls.on('end', () => {
+				if (!found) {
+					const err = new Error('No matching content found for', content_id)
+					debug(err)
+					reject(err)
+				}
+			})
+			ls.resume()
+		})
+	}
+
+	/*
+	 * Synchronize the given content ID
+	 */
+	async synchronize(content_id) {
+		const resolved = await this._resolve_content_id_with_timeout(this._timeout, content_id)
+
+		// validate resolved id is proper ipfs_cid, not null or empty string
+
+		if (this.pins[resolved]) {
+			return
+		}
+
+		debug(`Pinning ${resolved}`)
+
+		// This call blocks until file is retreived..
+		this.ipfs.pin.add(resolved, { quiet: true, pin: true }, (err, res) => {
+			if (err) {
+				debug(`Error Pinning: ${resolved}`)
+				delete this.pins[resolved]
+			} else {
+				debug(`Pinned ${resolved}`)
+				// why aren't we doing this.pins[resolved] = true
+			}
+		})
+	}
 }
 
 module.exports = {
-  Storage: Storage,
-};
+	Storage,
+}

+ 196 - 196
storage-node/packages/storage/test/storage.js

@@ -16,208 +16,208 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
-
-const mocha = require('mocha');
-const chai = require('chai');
-const chai_as_promised = require('chai-as-promised');
-chai.use(chai_as_promised);
-const expect = chai.expect;
-
-const fs = require('fs');
-
-const { Storage } = require('@joystream/storage-node-backend');
-
-const IPFS_CID_REGEX = /^Qm[1-9A-HJ-NP-Za-km-z]{44}$/;
-
-function write(store, content_id, contents, callback)
-{
-  store.open(content_id, 'w')
-    .then((stream) => {
-
-      stream.on('finish', () => {
-        stream.commit();
-      });
-      stream.on('committed', callback);
-
-      if (!stream.write(contents)) {
-        stream.once('drain', () => stream.end())
-      } else {
-        process.nextTick(() => stream.end())
-      }
-    })
-    .catch((err) => {
-      expect.fail(err);
-    });
+'use strict'
+
+const mocha = require('mocha')
+const chai = require('chai')
+const chai_as_promised = require('chai-as-promised')
+
+chai.use(chai_as_promised)
+const expect = chai.expect
+
+const fs = require('fs')
+
+const { Storage } = require('@joystream/storage-node-backend')
+
+const IPFS_CID_REGEX = /^Qm[1-9A-HJ-NP-Za-km-z]{44}$/
+
+function write(store, content_id, contents, callback) {
+	store
+		.open(content_id, 'w')
+		.then((stream) => {
+			stream.on('finish', () => {
+				stream.commit()
+			})
+			stream.on('committed', callback)
+
+			if (!stream.write(contents)) {
+				stream.once('drain', () => stream.end())
+			} else {
+				process.nextTick(() => stream.end())
+			}
+		})
+		.catch((err) => {
+			expect.fail(err)
+		})
 }
 
-function read_all (stream) {
-  return new Promise((resolve, reject) => {
-    const chunks = []
-    stream.on('data', chunk => chunks.push(chunk))
-    stream.on('end', () => resolve(Buffer.concat(chunks)))
-    stream.on('error', err => reject(err))
-    stream.resume()
-  })
+function read_all(stream) {
+	return new Promise((resolve, reject) => {
+		const chunks = []
+		stream.on('data', (chunk) => chunks.push(chunk))
+		stream.on('end', () => resolve(Buffer.concat(chunks)))
+		stream.on('error', (err) => reject(err))
+		stream.resume()
+	})
 }
 
-function create_known_object(content_id, contents, callback)
-{
-  var hash;
-  const store = Storage.create({
-    resolve_content_id: () => {
-      return hash;
-    },
-  })
+function create_known_object(content_id, contents, callback) {
+	let hash
+	const store = Storage.create({
+		resolve_content_id: () => {
+			return hash
+		},
+	})
 
-  write(store, content_id, contents, (the_hash) => {
-    hash = the_hash;
-
-    callback(store, hash);
-  });
+	write(store, content_id, contents, (the_hash) => {
+		hash = the_hash
 
+		callback(store, hash)
+	})
 }
 
 describe('storage/storage', () => {
-  var storage;
-  before(async () => {
-    storage = await Storage.create({ timeout: 1900 });
-  });
-
-  describe('open()', () => {
-    it('can write a stream', (done) => {
-      write(storage, 'foobar', 'test-content', (hash) => {
-        expect(hash).to.not.be.undefined;
-        expect(hash).to.match(IPFS_CID_REGEX)
-        done();
-      });
-    });
-
-    it('detects the MIME type of a write stream', (done) => {
-      const contents = fs.readFileSync('../../storage-node_new.svg');
-      storage.open('mime-test', 'w')
-        .then((stream) => {
-          var file_info;
-          stream.on('file_info', (info) => {
-            // Could filter & abort here now, but we're just going to set this,
-            // and expect it to be set later...
-            file_info = info;
-          });
-
-          stream.on('finish', () => {
-            stream.commit();
-          });
-
-          stream.on('committed', (hash) => {
-            // ... if file_info is not set here, there's an issue.
-            expect(file_info).to.have.property('mime_type', 'application/xml');
-            expect(file_info).to.have.property('ext', 'xml');
-            done();
-          });
-
-          if (!stream.write(contents)) {
-            stream.once('drain', () => stream.end())
-          } else {
-            process.nextTick(() => stream.end())
-          }
-        })
-        .catch((err) => {
-          expect.fail(err);
-        });
-    });
-
-    it('can read a stream', (done) => {
-      const contents = 'test-for-reading';
-      create_known_object('foobar', contents, (store, hash) => {
-        store.open('foobar', 'r')
-          .then(async (stream) => {
-            const data = await read_all(stream);
-            expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0);
-            done();
-          })
-          .catch((err) => {
-            expect.fail(err);
-          });
-      });
-    });
-
-    it('detects the MIME type of a read stream', (done) => {
-      const contents = fs.readFileSync('../../storage-node_new.svg');
-      create_known_object('foobar', contents, (store, hash) => {
-        store.open('foobar', 'r')
-          .then(async (stream) => {
-            const data = await read_all(stream);
-            expect(contents.length).to.equal(data.length);
-            expect(Buffer.compare(data, contents)).to.equal(0);
-            expect(stream).to.have.property('file_info');
-
-            // application/xml+svg would be better, but this is good-ish.
-            expect(stream.file_info).to.have.property('mime_type', 'application/xml');
-            expect(stream.file_info).to.have.property('ext', 'xml');
-            done();
-          })
-          .catch((err) => {
-            expect.fail(err);
-          });
-      });
-    });
-
-    it('provides default MIME type for read streams', (done) => {
-      const contents = 'test-for-reading';
-      create_known_object('foobar', contents, (store, hash) => {
-        store.open('foobar', 'r')
-          .then(async (stream) => {
-            const data = await read_all(stream);
-            expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0);
-
-            expect(stream.file_info).to.have.property('mime_type', 'application/octet-stream');
-            expect(stream.file_info).to.have.property('ext', 'bin');
-            done();
-          })
-          .catch((err) => {
-            expect.fail(err);
-          });
-      });
-    });
-
-
-  });
-
-  describe('stat()', () => {
-    it('times out for unknown content', async () => {
-      const content = Buffer.from('this-should-not-exist');
-      const x = await storage.ipfs.add(content, { onlyHash: true });
-      const hash = x[0].hash;
-
-      // Try to stat this entry, it should timeout.
-      expect(storage.stat(hash)).to.eventually.be.rejectedWith('timed out');
-    });
-
-    it('returns stats for a known object', (done) => {
-      const content = 'stat-test';
-      const expected_size = content.length;
-      create_known_object('foobar', content, (store, hash) => {
-        expect(store.stat(hash)).to.eventually.have.property('size', expected_size);
-        done();
-      });
-    });
-  });
-
-  describe('size()', () => {
-    it('times out for unknown content', async () => {
-      const content = Buffer.from('this-should-not-exist');
-      const x = await storage.ipfs.add(content, { onlyHash: true });
-      const hash = x[0].hash;
-
-      // Try to stat this entry, it should timeout.
-      expect(storage.size(hash)).to.eventually.be.rejectedWith('timed out');
-    });
-
-    it('returns the size of a known object', (done) => {
-      create_known_object('foobar', 'stat-test', (store, hash) => {
-        expect(store.size(hash)).to.eventually.equal(15);
-        done();
-      });
-    });
-  });
-});
+	let storage
+	before(async () => {
+		storage = await Storage.create({ timeout: 1900 })
+	})
+
+	describe('open()', () => {
+		it('can write a stream', (done) => {
+			write(storage, 'foobar', 'test-content', (hash) => {
+				expect(hash).to.not.be.undefined
+				expect(hash).to.match(IPFS_CID_REGEX)
+				done()
+			})
+		})
+
+		it('detects the MIME type of a write stream', (done) => {
+			const contents = fs.readFileSync('../../storage-node_new.svg')
+			storage
+				.open('mime-test', 'w')
+				.then((stream) => {
+					let file_info
+					stream.on('file_info', (info) => {
+						// Could filter & abort here now, but we're just going to set this,
+						// and expect it to be set later...
+						file_info = info
+					})
+
+					stream.on('finish', () => {
+						stream.commit()
+					})
+
+					stream.on('committed', (hash) => {
+						// ... if file_info is not set here, there's an issue.
+						expect(file_info).to.have.property('mime_type', 'application/xml')
+						expect(file_info).to.have.property('ext', 'xml')
+						done()
+					})
+
+					if (!stream.write(contents)) {
+						stream.once('drain', () => stream.end())
+					} else {
+						process.nextTick(() => stream.end())
+					}
+				})
+				.catch((err) => {
+					expect.fail(err)
+				})
+		})
+
+		it('can read a stream', (done) => {
+			const contents = 'test-for-reading'
+			create_known_object('foobar', contents, (store, hash) => {
+				store
+					.open('foobar', 'r')
+					.then(async (stream) => {
+						const data = await read_all(stream)
+						expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0)
+						done()
+					})
+					.catch((err) => {
+						expect.fail(err)
+					})
+			})
+		})
+
+		it('detects the MIME type of a read stream', (done) => {
+			const contents = fs.readFileSync('../../storage-node_new.svg')
+			create_known_object('foobar', contents, (store, hash) => {
+				store
+					.open('foobar', 'r')
+					.then(async (stream) => {
+						const data = await read_all(stream)
+						expect(contents.length).to.equal(data.length)
+						expect(Buffer.compare(data, contents)).to.equal(0)
+						expect(stream).to.have.property('file_info')
+
+						// application/xml+svg would be better, but this is good-ish.
+						expect(stream.file_info).to.have.property('mime_type', 'application/xml')
+						expect(stream.file_info).to.have.property('ext', 'xml')
+						done()
+					})
+					.catch((err) => {
+						expect.fail(err)
+					})
+			})
+		})
+
+		it('provides default MIME type for read streams', (done) => {
+			const contents = 'test-for-reading'
+			create_known_object('foobar', contents, (store, hash) => {
+				store
+					.open('foobar', 'r')
+					.then(async (stream) => {
+						const data = await read_all(stream)
+						expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0)
+
+						expect(stream.file_info).to.have.property('mime_type', 'application/octet-stream')
+						expect(stream.file_info).to.have.property('ext', 'bin')
+						done()
+					})
+					.catch((err) => {
+						expect.fail(err)
+					})
+			})
+		})
+	})
+
+	describe('stat()', () => {
+		it('times out for unknown content', async () => {
+			const content = Buffer.from('this-should-not-exist')
+			const x = await storage.ipfs.add(content, { onlyHash: true })
+			const hash = x[0].hash
+
+			// Try to stat this entry, it should timeout.
+			expect(storage.stat(hash)).to.eventually.be.rejectedWith('timed out')
+		})
+
+		it('returns stats for a known object', (done) => {
+			const content = 'stat-test'
+			const expected_size = content.length
+			create_known_object('foobar', content, (store, hash) => {
+				expect(store.stat(hash)).to.eventually.have.property('size', expected_size)
+				done()
+			})
+		})
+	})
+
+	describe('size()', () => {
+		it('times out for unknown content', async () => {
+			const content = Buffer.from('this-should-not-exist')
+			const x = await storage.ipfs.add(content, { onlyHash: true })
+			const hash = x[0].hash
+
+			// Try to stat this entry, it should timeout.
+			expect(storage.size(hash)).to.eventually.be.rejectedWith('timed out')
+		})
+
+		it('returns the size of a known object', (done) => {
+			create_known_object('foobar', 'stat-test', (store, hash) => {
+				expect(store.size(hash)).to.eventually.equal(15)
+				done()
+			})
+		})
+	})
+})

+ 8 - 8
storage-node/packages/util/externalPromise.js

@@ -3,17 +3,17 @@
  * so it can be fulfilled 'externally'. This is a bit of a hack, but most useful application is when
  * concurrent async operations are initiated that are all waiting on the same result value.
  */
-function newExternallyControlledPromise () {
-    let resolve, reject
+function newExternallyControlledPromise() {
+	let resolve, reject
 
-    const promise = new Promise((res, rej) => {
-      resolve = res
-      reject = rej
-    })
+	const promise = new Promise((res, rej) => {
+		resolve = res
+		reject = rej
+	})
 
-    return ({ resolve, reject, promise })
+	return { resolve, reject, promise }
 }
 
 module.exports = {
-    newExternallyControlledPromise
+	newExternallyControlledPromise,
 }

+ 28 - 30
storage-node/packages/util/fs/resolve.js

@@ -16,11 +16,11 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const path = require('path');
+const path = require('path')
 
-const debug = require('debug')('joystream:util:fs:resolve');
+const debug = require('debug')('joystream:util:fs:resolve')
 
 /*
  * Resolves name relative to base, throwing an error if the given
@@ -31,37 +31,35 @@ const debug = require('debug')('joystream:util:fs:resolve');
  * useless for our case because it does not care about breaking out of
  * a base directory.
  */
-function resolve(base, name)
-{
-  debug('Resolving', name);
+function resolve(base, name) {
+	debug('Resolving', name)
 
-  // In a firs step, we strip leading slashes from the name, because they're
-  // just saying "relative to the base" in our use case.
-  var res = name.replace(/^\/+/, '');
-  debug('Stripped', res);
+	// In a firs step, we strip leading slashes from the name, because they're
+	// just saying "relative to the base" in our use case.
+	var res = name.replace(/^\/+/, '')
+	debug('Stripped', res)
 
-  // At this point resolving the path should stay within the base we specify.
-  // We do specify a base other than the file system root, because the file
-  // everything is always relative to the file system root.
-  const test_base = path.join(path.sep, 'test-base');
-  debug('Test base is', test_base);
-  res = path.resolve(test_base, res);
-  debug('Resolved', res);
+	// At this point resolving the path should stay within the base we specify.
+	// We do specify a base other than the file system root, because the file
+	// everything is always relative to the file system root.
+	const test_base = path.join(path.sep, 'test-base')
+	debug('Test base is', test_base)
+	res = path.resolve(test_base, res)
+	debug('Resolved', res)
 
-  // Ok, we can check for violations now.
-  if (res.slice(0, test_base.length) != test_base) {
-    throw Error(`Name "${name}" cannot be resolved to a repo relative path, aborting!`);
-  }
+	// Ok, we can check for violations now.
+	if (res.slice(0, test_base.length) != test_base) {
+		throw Error(`Name "${name}" cannot be resolved to a repo relative path, aborting!`)
+	}
 
-  // If we strip the base now, we have the relative name resolved.
-  res = res.slice(test_base.length + 1);
-  debug('Relative', res);
+	// If we strip the base now, we have the relative name resolved.
+	res = res.slice(test_base.length + 1)
+	debug('Relative', res)
 
-  // Finally we can join this relative name to the requested base.
-  var res = path.join(base, res);
-  debug('Result', res);
-  return res;
+	// Finally we can join this relative name to the requested base.
+	var res = path.join(base, res)
+	debug('Result', res)
+	return res
 }
 
-
-module.exports = resolve;
+module.exports = resolve

+ 108 - 117
storage-node/packages/util/fs/walk.js

@@ -16,113 +16,105 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
-
-const fs = require('fs');
-const path = require('path');
-
-const debug = require('debug')('joystream:util:fs:walk');
-
-class Walker
-{
-  constructor(archive, base, cb)
-  {
-    this.archive = archive;
-    this.base = base;
-    this.slice_offset = this.base.length;
-    if (this.base[this.slice_offset - 1] != '/') {
-      this.slice_offset += 1;
-    }
-    this.cb = cb;
-    this.pending = 0;
-  }
-
-  /*
-   * Check pending
-   */
-  check_pending(name)
-  {
-    // Decrease pending count again.
-    this.pending -= 1;
-    debug('Finishing', name, 'decreases pending to', this.pending);
-    if (!this.pending) {
-      debug('No more pending.');
-      this.cb(null);
-    }
-  }
-
-  /*
-   * Helper function for walk; split out because it's used in two places.
-   */
-  report_and_recurse(relname, fname, lstat, linktarget)
-  {
-    // First report the value
-    this.cb(null, relname, lstat, linktarget);
-
-    // Recurse
-    if (lstat.isDirectory()) {
-      this.walk(fname);
-    }
-
-    this.check_pending(fname);
-  }
-
-
-  walk(dir)
-  {
-    // This is a little hacky - since readdir() may take a while, and we don't
-    // want the pending count to drop to zero before it's finished, we bump
-    // it up and down while readdir() does it's job.
-    // What this achieves is that when processing a parent directory finishes
-    // before walk() on a subdirectory could finish its readdir() call, the
-    // pending count still has a value.
-    // Note that in order not to hang on empty directories, we need to
-    // explicitly check the pending count in cases when there are no files.
-    this.pending += 1;
-    this.archive.readdir(dir, (err, files) => {
-      if (err) {
-        this.cb(err);
-        return;
-      }
-
-      // More pending data.
-      this.pending += files.length;
-      debug('Reading', dir, 'bumps pending to', this.pending);
-
-      files.forEach((name) => {
-        const fname = path.resolve(dir, name);
-        this.archive.lstat(fname, (err2, lstat) => {
-          if (err2) {
-            this.cb(err2);
-            return;
-          }
-
-          // The base is always prefixed, so a simple string slice should do.
-          const relname = fname.slice(this.slice_offset);
-
-          // We have a symbolic link? Resolve it.
-          if (lstat.isSymbolicLink()) {
-            this.archive.readlink(fname, (err3, linktarget) => {
-              if (err3) {
-                this.cb(err3);
-                return;
-              }
-
-              this.report_and_recurse(relname, fname, lstat, linktarget);
-            });
-          }
-          else {
-            this.report_and_recurse(relname, fname, lstat);
-          }
-        });
-      });
-
-      this.check_pending(dir);
-    });
-  }
+'use strict'
+
+const fs = require('fs')
+const path = require('path')
+
+const debug = require('debug')('joystream:util:fs:walk')
+
+class Walker {
+	constructor(archive, base, cb) {
+		this.archive = archive
+		this.base = base
+		this.slice_offset = this.base.length
+		if (this.base[this.slice_offset - 1] != '/') {
+			this.slice_offset += 1
+		}
+		this.cb = cb
+		this.pending = 0
+	}
+
+	/*
+	 * Check pending
+	 */
+	check_pending(name) {
+		// Decrease pending count again.
+		this.pending -= 1
+		debug('Finishing', name, 'decreases pending to', this.pending)
+		if (!this.pending) {
+			debug('No more pending.')
+			this.cb(null)
+		}
+	}
+
+	/*
+	 * Helper function for walk; split out because it's used in two places.
+	 */
+	report_and_recurse(relname, fname, lstat, linktarget) {
+		// First report the value
+		this.cb(null, relname, lstat, linktarget)
+
+		// Recurse
+		if (lstat.isDirectory()) {
+			this.walk(fname)
+		}
+
+		this.check_pending(fname)
+	}
+
+	walk(dir) {
+		// This is a little hacky - since readdir() may take a while, and we don't
+		// want the pending count to drop to zero before it's finished, we bump
+		// it up and down while readdir() does it's job.
+		// What this achieves is that when processing a parent directory finishes
+		// before walk() on a subdirectory could finish its readdir() call, the
+		// pending count still has a value.
+		// Note that in order not to hang on empty directories, we need to
+		// explicitly check the pending count in cases when there are no files.
+		this.pending += 1
+		this.archive.readdir(dir, (err, files) => {
+			if (err) {
+				this.cb(err)
+				return
+			}
+
+			// More pending data.
+			this.pending += files.length
+			debug('Reading', dir, 'bumps pending to', this.pending)
+
+			files.forEach((name) => {
+				const fname = path.resolve(dir, name)
+				this.archive.lstat(fname, (err2, lstat) => {
+					if (err2) {
+						this.cb(err2)
+						return
+					}
+
+					// The base is always prefixed, so a simple string slice should do.
+					const relname = fname.slice(this.slice_offset)
+
+					// We have a symbolic link? Resolve it.
+					if (lstat.isSymbolicLink()) {
+						this.archive.readlink(fname, (err3, linktarget) => {
+							if (err3) {
+								this.cb(err3)
+								return
+							}
+
+							this.report_and_recurse(relname, fname, lstat, linktarget)
+						})
+					} else {
+						this.report_and_recurse(relname, fname, lstat)
+					}
+				})
+			})
+
+			this.check_pending(dir)
+		})
+	}
 }
 
-
 /*
  * Recursively walk a file system hierarchy (in undefined order), returning all
  * entries via the callback(err, relname, lstat, [linktarget]). The name relative
@@ -134,15 +126,14 @@ class Walker
  *
  * The callback is invoked one last time without data to signal the end of data.
  */
-module.exports = function(base, archive, cb)
-{
-  // Archive is optional and defaults to fs, but cb is not.
-  if (!cb) {
-    cb = archive;
-    archive = fs;
-  }
-
-  const resolved = path.resolve(base);
-  const w = new Walker(archive, resolved, cb);
-  w.walk(resolved);
-};
+module.exports = function (base, archive, cb) {
+	// Archive is optional and defaults to fs, but cb is not.
+	if (!cb) {
+		cb = archive
+		archive = fs
+	}
+
+	const resolved = path.resolve(base)
+	const w = new Walker(archive, resolved, cb)
+	w.walk(resolved)
+}

+ 80 - 89
storage-node/packages/util/lru.js

@@ -16,111 +16,102 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const DEFAULT_CAPACITY = 100;
+const DEFAULT_CAPACITY = 100
 
-const debug = require('debug')('joystream:util:lru');
+const debug = require('debug')('joystream:util:lru')
 
 /*
  * Simple least recently used cache.
  */
-class LRUCache
-{
-  constructor(capacity = DEFAULT_CAPACITY)
-  {
-    this.capacity = capacity;
-    this.clear();
-  }
+class LRUCache {
+	constructor(capacity = DEFAULT_CAPACITY) {
+		this.capacity = capacity
+		this.clear()
+	}
 
-  /*
-   * Return the entry with the given key, and update it's usage.
-   */
-  get(key)
-  {
-    const val = this.store.get(key);
-    if (val) {
-      this.access.set(key, Date.now());
-    }
-    return val;
-  }
+	/*
+	 * Return the entry with the given key, and update it's usage.
+	 */
+	get(key) {
+		const val = this.store.get(key)
+		if (val) {
+			this.access.set(key, Date.now())
+		}
+		return val
+	}
 
-  /*
-   * Return true if the key is the cache, false otherwise.
-   */
-  has(key)
-  {
-    return this.store.has(key);
-  }
+	/*
+	 * Return true if the key is the cache, false otherwise.
+	 */
+	has(key) {
+		return this.store.has(key)
+	}
 
-  /*
-   * Put a value into the cache.
-   */
-  put(key, value)
-  {
-    this.store.set(key, value);
-    this.access.set(key, Date.now());
-    this._prune();
-  }
+	/*
+	 * Put a value into the cache.
+	 */
+	put(key, value) {
+		this.store.set(key, value)
+		this.access.set(key, Date.now())
+		this._prune()
+	}
 
-  /*
-   * Delete a value from the cache.
-   */
-  del(key)
-  {
-    this.store.delete(key);
-    this.access.delete(key);
-  }
+	/*
+	 * Delete a value from the cache.
+	 */
+	del(key) {
+		this.store.delete(key)
+		this.access.delete(key)
+	}
 
-  /*
-   * Current size of the cache
-   */
-  size()
-  {
-    return this.store.size;
-  }
+	/*
+	 * Current size of the cache
+	 */
+	size() {
+		return this.store.size
+	}
 
-  /*
-   * Clear the LRU cache entirely.
-   */
-  clear()
-  {
-    this.store = new Map();
-    this.access = new Map();
-  }
+	/*
+	 * Clear the LRU cache entirely.
+	 */
+	clear() {
+		this.store = new Map()
+		this.access = new Map()
+	}
 
-  /*
-   * Internal pruning function.
-   */
-  _prune()
-  {
-    debug('About to prune; have', this.store.size, 'and capacity is', this.capacity);
+	/*
+	 * Internal pruning function.
+	 */
+	_prune() {
+		debug('About to prune; have', this.store.size, 'and capacity is', this.capacity)
 
-    var sorted = Array.from(this.access.entries());
-    sorted.sort((first, second) => {
-      if (first[1] == second[1]) {
-        return 0;
-      }
-      return (first[1] < second[1] ? -1 : 1);
-    });
-    debug('Sorted keys are:', sorted);
+		const sorted = Array.from(this.access.entries())
+		sorted.sort((first, second) => {
+			if (first[1] == second[1]) {
+				return 0
+			}
+			return first[1] < second[1] ? -1 : 1
+		})
+		debug('Sorted keys are:', sorted)
 
-    debug('Have to prune', this.store.size - this.capacity, 'items.');
-    var idx = 0;
-    var to_prune = [];
-    while (idx < sorted.length && to_prune.length < (this.store.size - this.capacity)) {
-      to_prune.push(sorted[idx][0]);
-      ++idx;
-    }
+		debug('Have to prune', this.store.size - this.capacity, 'items.')
+		let idx = 0
+		const to_prune = []
+		while (idx < sorted.length && to_prune.length < this.store.size - this.capacity) {
+			to_prune.push(sorted[idx][0])
+			++idx
+		}
 
-    to_prune.forEach((key) => {
-      this.store.delete(key);
-      this.access.delete(key);
-    });
-    debug('Size after pruning', this.store.size);
-  }
+		to_prune.forEach((key) => {
+			this.store.delete(key)
+			this.access.delete(key)
+		})
+		debug('Size after pruning', this.store.size)
+	}
 }
 
 module.exports = {
-  LRUCache: LRUCache,
-};
+	LRUCache,
+}

+ 122 - 127
storage-node/packages/util/pagination.js

@@ -16,59 +16,59 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const debug = require('debug')('joystream:middleware:pagination');
+const debug = require('debug')('joystream:middleware:pagination')
 
 // Pagination definitions
 const _api_defs = {
-  parameters: {
-    paginationLimit: {
-      name: 'limit',
-      in: 'query',
-      description: 'Number of items per page.',
-      required: false,
-      schema: {
-        type: 'integer',
-        minimum: 1,
-        maximum: 50,
-        default: 20,
-      },
-    },
-    paginationOffset: {
-      name: 'offset',
-      in: 'query',
-      description: 'Page number (offset)',
-      schema: {
-        type: 'integer',
-        minimum: 0,
-      },
-    },
-  },
-  schemas: {
-    PaginationInfo: {
-      type: 'object',
-      required: ['self'],
-      properties: {
-        'self': {
-          type: 'string',
-        },
-        next: {
-          type: 'string',
-        },
-        prev: {
-          type: 'string',
-        },
-        first: {
-          type: 'string',
-        },
-        last: {
-          type: 'string',
-        },
-      },
-    },
-  },
-};
+	parameters: {
+		paginationLimit: {
+			name: 'limit',
+			in: 'query',
+			description: 'Number of items per page.',
+			required: false,
+			schema: {
+				type: 'integer',
+				minimum: 1,
+				maximum: 50,
+				default: 20,
+			},
+		},
+		paginationOffset: {
+			name: 'offset',
+			in: 'query',
+			description: 'Page number (offset)',
+			schema: {
+				type: 'integer',
+				minimum: 0,
+			},
+		},
+	},
+	schemas: {
+		PaginationInfo: {
+			type: 'object',
+			required: ['self'],
+			properties: {
+				self: {
+					type: 'string',
+				},
+				next: {
+					type: 'string',
+				},
+				prev: {
+					type: 'string',
+				},
+				first: {
+					type: 'string',
+				},
+				last: {
+					type: 'string',
+				},
+			},
+		},
+	},
+}
 
 /**
  * Silly pagination because it's faster than getting other modules to work.
@@ -83,81 +83,76 @@ const _api_defs = {
  *      If last_offset is given, create a last link with that offset
  **/
 module.exports = {
-
-  // Add pagination parameters and pagination info responses.
-  parameters: [
-    { '$ref': '#/components/parameters/paginationLimit' },
-    { '$ref': '#/components/parameters/paginationOffset' },
-
-  ],
-
-  response: {
-    '$ref': '#/components/schema/PaginationInfo'
-  },
-
-  // Update swagger/openapi specs with our own parameters and definitions
-  openapi: function(api)
-  {
-    api.components = api.components || {};
-    api.components.parameters = { ...api.components.parameters || {} , ..._api_defs.parameters };
-    api.components.schemas = { ...api.components.schemas || {}, ..._api_defs.schemas };
-    return api;
-  },
-
-  // Pagination function
-  paginate: function(req, res, last_offset)
-  {
-    // Skip if the response is not an object.
-    if (Object.prototype.toString.call(res) != "[object Object]") {
-      debug('Cannot paginate non-objects.');
-      return res;
-    }
-
-    // Defaults for parameters
-    var offset = req.query.offset || 0;
-    var limit = req.query.limit || 20;
-    debug('Create pagination links from offset=' + offset, 'limit=' + limit);
-
-    // Parse current url
-    const url = require('url');
-    var req_url = url.parse(req.protocol + '://' + req.get('host') + req.originalUrl);
-    var params = new url.URLSearchParams(req_url.query);
-
-    // Pagination object
-    var pagination = {
-      'self': req_url.href,
-    }
-
-    var prev = offset - limit;
-    if (prev >= 0) {
-      params.set('offset', prev);
-      req_url.search = params.toString();
-      pagination['prev'] = url.format(req_url);
-
-    }
-
-    var next = offset + limit;
-    if (next >= 0) {
-      params.set('offset', next);
-      req_url.search = params.toString();
-      pagination['next'] = url.format(req_url);
-    }
-
-    if (last_offset) {
-      params.set('offset', last_offset);
-      req_url.search = params.toString();
-      pagination['last'] = url.format(req_url);
-    }
-
-    // First
-    params.set('offset', 0);
-    req_url.search = params.toString();
-    pagination['first'] = url.format(req_url);
-
-    debug('pagination', pagination);
-
-    // Now set pagination values in response.
-    res.pagination = pagination;
-    return res;
-  },
-};
+	// Add pagination parameters and pagination info responses.
+	parameters: [
+		{ $ref: '#/components/parameters/paginationLimit' },
+		{ $ref: '#/components/parameters/paginationOffset' },
+	],
+
+	response: {
+		$ref: '#/components/schema/PaginationInfo',
+	},
+
+	// Update swagger/openapi specs with our own parameters and definitions
+	openapi(api) {
+		api.components = api.components || {}
+		api.components.parameters = { ...(api.components.parameters || {}), ..._api_defs.parameters }
+		api.components.schemas = { ...(api.components.schemas || {}), ..._api_defs.schemas }
+		return api
+	},
+
+	// Pagination function
+	paginate(req, res, last_offset) {
+		// Skip if the response is not an object.
+		if (Object.prototype.toString.call(res) != '[object Object]') {
+			debug('Cannot paginate non-objects.')
+			return res
+		}
+
+		// Defaults for parameters
+		const offset = req.query.offset || 0
+		const limit = req.query.limit || 20
+		debug('Create pagination links from offset=' + offset, 'limit=' + limit)
+
+		// Parse current url
+		const url = require('url')
+		const req_url = url.parse(req.protocol + '://' + req.get('host') + req.originalUrl)
+		const params = new url.URLSearchParams(req_url.query)
+
+		// Pagination object
+		const pagination = {
+			self: req_url.href,
+		}
+
+		const prev = offset - limit
+		if (prev >= 0) {
+			params.set('offset', prev)
+			req_url.search = params.toString()
+			pagination.prev = url.format(req_url)
+		}
+
+		const next = offset + limit
+		if (next >= 0) {
+			params.set('offset', next)
+			req_url.search = params.toString()
+			pagination.next = url.format(req_url)
+		}
+
+		if (last_offset) {
+			params.set('offset', last_offset)
+			req_url.search = params.toString()
+			pagination.last = url.format(req_url)
+		}
+
+		// First
+		params.set('offset', 0)
+		req_url.search = params.toString()
+		pagination.first = url.format(req_url)
+
+		debug('pagination', pagination)
+
+		// Now set pagination values in response.
+		res.pagination = pagination
+		return res
+	},
+}

+ 388 - 424
storage-node/packages/util/ranges.js

@@ -16,12 +16,12 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const uuid = require('uuid');
-const stream_buf = require('stream-buffers');
+const uuid = require('uuid')
+const stream_buf = require('stream-buffers')
 
-const debug = require('debug')('joystream:util:ranges');
+const debug = require('debug')('joystream:util:ranges')
 
 /*
  * Range parsing
@@ -31,24 +31,22 @@ const debug = require('debug')('joystream:util:ranges');
  * Parse a range string, e.g. '0-100' or '-100' or '0-'. Return the values
  * in an array of int or undefined (if not provided).
  */
-function _parse_range(range)
-{
-  var matches = range.match(/^(\d+-\d+|\d+-|-\d+|\*)$/u);
-  if (!matches) {
-    throw new Error(`Not a valid range: ${range}`);
-  }
-
-  var vals = matches[1].split('-').map((v) => {
-    return v === '*' || v === '' ? undefined : parseInt(v, 10);
-  });
-
-  if (vals[1] <= vals[0]) {
-    throw new Error(`Invalid range: start "${vals[0]}" must be before end "${vals[1]}".`);
-  }
-
-  return [vals[0], vals[1]];
-}
+function _parse_range(range) {
+	const matches = range.match(/^(\d+-\d+|\d+-|-\d+|\*)$/u)
+	if (!matches) {
+		throw new Error(`Not a valid range: ${range}`)
+	}
+
+	const vals = matches[1].split('-').map((v) => {
+		return v === '*' || v === '' ? undefined : parseInt(v, 10)
+	})
 
+	if (vals[1] <= vals[0]) {
+		throw new Error(`Invalid range: start "${vals[0]}" must be before end "${vals[1]}".`)
+	}
+
+	return [vals[0], vals[1]]
+}
 
 /*
  * Parse a range header value, e.g. unit=ranges, where ranges
@@ -56,84 +54,77 @@ function _parse_range(range)
  * custom unit string. If the unit (and equal sign) are not given, assume
  * 'bytes'.
  */
-function parse(range_str)
-{
-  var res = {};
-  debug('Parse range header value:', range_str);
-  var matches = range_str.match(/^(([^\s]+)=)?((?:(?:\d+-\d+|-\d+|\d+-),?)+)$/u)
-  if (!matches) {
-    throw new Error(`Not a valid range header: ${range_str}`);
-  }
-
-  res.unit = matches[2] || 'bytes';
-  res.range_str = matches[3];
-  res.ranges = [];
-
-  // Parse individual ranges
-  var ranges = []
-  res.range_str.split(',').forEach((range) => {
-    ranges.push(_parse_range(range));
-  });
-
-  // Merge ranges into result.
-  ranges.forEach((new_range) => {
-    debug('Found range:', new_range);
-
-    var is_merged = false;
-    for (var i in res.ranges) {
-      var old_range = res.ranges[i];
-
-      // Skip if the new range is fully separate from the old range.
-      if (old_range[1] + 1 < new_range[0] || new_range[1] + 1 < old_range[0]) {
-        debug('Range does not overlap with', old_range);
-        continue;
-      }
-
-      // If we know they're adjacent or overlapping, we construct the
-      // merged range from the lower start and the higher end of both
-      // ranges.
-      var merged = [
-        Math.min(old_range[0], new_range[0]),
-        Math.max(old_range[1], new_range[1])
-      ];
-      res.ranges[i] = merged;
-      is_merged = true;
-      debug('Merged', new_range, 'into', old_range, 'as', merged);
-    }
-
-    if (!is_merged) {
-      debug('Non-overlapping range!');
-      res.ranges.push(new_range);
-    }
-  });
-
-  // Finally, sort ranges
-  res.ranges.sort((first, second) => {
-    if (first[0] === second[0]) {
-      // Should not happen due to merging.
-      return 0;
-    }
-    return (first[0] < second[0]) ? -1 : 1;
-  });
-
-  debug('Result of parse is', res);
-  return res;
+function parse(range_str) {
+	const res = {}
+	debug('Parse range header value:', range_str)
+	const matches = range_str.match(/^(([^\s]+)=)?((?:(?:\d+-\d+|-\d+|\d+-),?)+)$/u)
+	if (!matches) {
+		throw new Error(`Not a valid range header: ${range_str}`)
+	}
+
+	res.unit = matches[2] || 'bytes'
+	res.range_str = matches[3]
+	res.ranges = []
+
+	// Parse individual ranges
+	const ranges = []
+	res.range_str.split(',').forEach((range) => {
+		ranges.push(_parse_range(range))
+	})
+
+	// Merge ranges into result.
+	ranges.forEach((new_range) => {
+		debug('Found range:', new_range)
+
+		let is_merged = false
+		for (const i in res.ranges) {
+			const old_range = res.ranges[i]
+
+			// Skip if the new range is fully separate from the old range.
+			if (old_range[1] + 1 < new_range[0] || new_range[1] + 1 < old_range[0]) {
+				debug('Range does not overlap with', old_range)
+				continue
+			}
+
+			// If we know they're adjacent or overlapping, we construct the
+			// merged range from the lower start and the higher end of both
+			// ranges.
+			const merged = [Math.min(old_range[0], new_range[0]), Math.max(old_range[1], new_range[1])]
+			res.ranges[i] = merged
+			is_merged = true
+			debug('Merged', new_range, 'into', old_range, 'as', merged)
+		}
+
+		if (!is_merged) {
+			debug('Non-overlapping range!')
+			res.ranges.push(new_range)
+		}
+	})
+
+	// Finally, sort ranges
+	res.ranges.sort((first, second) => {
+		if (first[0] === second[0]) {
+			// Should not happen due to merging.
+			return 0
+		}
+		return first[0] < second[0] ? -1 : 1
+	})
+
+	debug('Result of parse is', res)
+	return res
 }
 
-
 /*
  * Async version of parse().
  */
-function parseAsync(range_str, cb)
-{
-  try {
-    return cb(parse(range_str));
-  } catch (err) {
-    return cb(null, err);
-  }
+function parseAsync(range_str, cb) {
+	try {
+		return cb(parse(range_str))
+	} catch (err) {
+		return cb(null, err)
+	}
 }
 
-
 /*
  * Range streaming
  */
@@ -150,343 +141,316 @@ function parseAsync(range_str, cb)
  * with file system based streams. We'll see how likely that's going to be in
  * future.
  */
-class RangeSender
-{
-  constructor(response, stream, opts, end_callback)
-  {
-    // Options
-    this.name = opts.name || 'content.bin';
-    this.type = opts.type || 'application/octet-stream';
-    this.size = opts.size;
-    this.ranges = opts.ranges;
-    this.download = opts.download || false;
-
-    // Range handling related state.
-    this.read_offset = 0;             // Nothing read so far
-    this.range_index = -1;            // No range index yet.
-    this.range_boundary = undefined;  // Generate boundary when needed.
-
-    // Event handlers & state
-    this.handlers = {};
-    this.opened = false;
-
-    debug('RangeSender:', this);
-    if (opts.ranges) {
-      debug('Parsed ranges:', opts.ranges.ranges);
-    }
-
-    // Parameters
-    this.response = response;
-    this.stream = stream;
-    this.opts = opts;
-    this.end_callback = end_callback;
-  }
-
-  on_error(err)
-  {
-    // Assume hiding the actual error is best, and default to 404.
-    debug('Error:', err);
-    if (!this.response.headersSent) {
-      this.response.status(err.code || 404).send({
-        message: err.message || `File not found: ${this.name}`
-      });
-    }
-    if (this.end_callback) {
-      this.end_callback(err);
-    }
-  }
-
-  on_end()
-  {
-    debug('End of stream.');
-    this.response.end();
-    if (this.end_callback) {
-      this.end_callback();
-    }
-  }
-
-
-  // **** No ranges
-  on_open_no_range()
-  {
-    // File got opened, so we can set headers/status
-    debug('Open succeeded:', this.name, this.type);
-    this.opened = true;
-
-    this.response.status(200);
-    this.response.contentType(this.type);
-    this.response.header('Accept-Ranges', 'bytes');
-    this.response.header('Content-Transfer-Encoding', 'binary');
-
-    if (this.download) {
-      this.response.header('Content-Disposition', `attachment; filename="${this.name}"`);
-    }
-    else {
-      this.response.header('Content-Disposition', 'inline');
-    }
-
-    if (this.size) {
-      this.response.header('Content-Length', this.size);
-    }
-  }
-
-
-  on_data_no_range(chunk)
-  {
-    if (!this.opened) {
-      this.handlers['open']();
-    }
-
-    // As simple as it can be.
-    this.response.write(Buffer.from(chunk, 'binary'));
-  }
-
-  // *** With ranges
-  next_range_headers()
-  {
-    // Next range
-    this.range_index += 1;
-    if (this.range_index >= this.ranges.ranges.length) {
-      debug('Cannot advance range index; we are done.');
-      return undefined;
-    }
-
-    // Calculate this range's size.
-    var range = this.ranges.ranges[this.range_index];
-    var total_size;
-    if (this.size) {
-      total_size = this.size;
-    }
-    if (typeof range[0] === 'undefined') {
-      range[0] = 0;
-    }
-    if (typeof range[1] === 'undefined') {
-      if (this.size) {
-        range[1] = total_size - 1;
-      }
-    }
-
-    var send_size;
-    if (typeof range[0] !== 'undefined' && typeof range[1] !== 'undefined') {
-      send_size = range[1] - range[0] + 1;
-    }
-
-    // Write headers, but since we may be in a multipart situation, write them
-    // explicitly to the stream.
-    var start = (typeof range[0] === 'undefined') ? '' : `${range[0]}`;
-    var end = (typeof range[1] === 'undefined') ? '' : `${range[1]}`;
-
-    var size_str;
-    if (total_size) {
-      size_str = `${total_size}`;
-    }
-    else {
-      size_str = '*';
-    }
-
-    var ret = {
-      'Content-Range': `bytes ${start}-${end}/${size_str}`,
-      'Content-Type': `${this.type}`,
-    };
-    if (send_size) {
-      ret['Content-Length'] = `${send_size}`;
-    }
-    return ret;
-  }
-
-
-  next_range()
-  {
-    if (this.ranges.ranges.length == 1) {
-      debug('Cannot start new range; only one requested.');
-      this.stream.off('data', this.handlers['data']);
-      return false;
-    }
-
-    var headers = this.next_range_headers();
-
-    if (headers) {
-      var header_buf = new stream_buf.WritableStreamBuffer();
-      // We start a range with a boundary.
-      header_buf.write(`\r\n--${this.range_boundary}\r\n`);
-
-      // The we write the range headers.
-      for (var header in headers) {
-        header_buf.write(`${header}: ${headers[header]}\r\n`);
-      }
-      header_buf.write('\r\n');
-      this.response.write(header_buf.getContents());
-      debug('New range started.');
-      return true;
-    }
-
-    // No headers means we're finishing the last range.
-    this.response.write(`\r\n--${this.range_boundary}--\r\n`);
-    debug('End of ranges sent.');
-    this.stream.off('data', this.handlers['data']);
-    return false;
-  }
-
-
-  on_open_ranges()
-  {
-    // File got opened, so we can set headers/status
-    debug('Open succeeded:', this.name, this.type);
-    this.opened = true;
-
-    this.response.header('Accept-Ranges', 'bytes');
-    this.response.header('Content-Transfer-Encoding', 'binary');
-    this.response.header('Content-Disposition', 'inline');
-
-    // For single ranges, the content length should be the size of the
-    // range. For multiple ranges, we don't send a content length
-    // header.
-    //
-    // Similarly, the type is different whether or not there is more than
-    // one range.
-    if (this.ranges.ranges.length == 1) {
-      this.response.writeHead(206, 'Partial Content', this.next_range_headers());
-    }
-    else {
-      this.range_boundary = uuid.v4();
-      var headers = {
-        'Content-Type': `multipart/byteranges; boundary=${this.range_boundary}`,
-      };
-      this.response.writeHead(206, 'Partial Content', headers);
-      this.next_range();
-    }
-  }
-
-  on_data_ranges(chunk)
-  {
-    if (!this.opened) {
-      this.handlers['open']();
-    }
-    // Crap, node.js streams are stupid. No guarantee for seek support. Sure,
-    // that makes node.js easier to implement, but offloads everything onto the
-    // application developer.
-    //
-    // So, we skip chunks until our read position is within the range we want to
-    // send at the moment. We're relying on ranges being in-order, which this
-    // file's parser luckily (?) provides.
-    //
-    // The simplest optimization would be at ever range start to seek() to the
-    // start.
-    var chunk_range = [this.read_offset, this.read_offset + chunk.length - 1];
-    debug('= Got chunk with byte range', chunk_range);
-    while (true) {
-      var req_range = this.ranges.ranges[this.range_index];
-      if (!req_range) {
-        break;
-      }
-      debug('Current requested range is', req_range);
-      if (!req_range[1]) {
-        req_range = [req_range[0], Number.MAX_SAFE_INTEGER];
-        debug('Treating as', req_range);
-      }
-
-      // No overlap in the chunk and requested range; don't write.
-      if (chunk_range[1] < req_range[0] || chunk_range[0] > req_range[1]) {
-        debug('Ignoring chunk; it is out of range.');
-        break;
-      }
-
-      // Since there is overlap, find the segment that's entirely within the
-      // chunk.
-      var segment = [
-        Math.max(chunk_range[0], req_range[0]),
-        Math.min(chunk_range[1], req_range[1]),
-      ];
-      debug('Segment to send within chunk is', segment);
-
-      // Normalize the segment to a chunk offset
-      var start = segment[0] - this.read_offset;
-      var end = segment[1] - this.read_offset;
-      var len = end - start + 1;
-      debug('Offsets into buffer are', [start, end], 'with length', len);
-
-      // Write the slice that we want to write. We first create a buffer from the
-      // chunk. Then we slice a new buffer from the same underlying ArrayBuffer,
-      // starting at the original buffer's offset, further offset by the segment
-      // start. The segment length bounds the end of our slice.
-      var buf = Buffer.from(chunk, 'binary');
-      this.response.write(Buffer.from(buf.buffer, buf.byteOffset + start, len));
-
-      // If the requested range is finished, we should start the next one.
-      if (req_range[1] > chunk_range[1]) {
-        debug('Chunk is finished, but the requested range is missing bytes.');
-        break;
-      }
-
-      if (req_range[1] <= chunk_range[1]) {
-        debug('Range is finished.');
-        if (!this.next_range(segment)) {
-          break;
-        }
-      }
-    }
-
-    // Update read offset when chunk is finished.
-    this.read_offset += chunk.length;
-  }
-
-
-  start()
-  {
-    // Before we start streaming, let's ensure our ranges don't contain any
-    // without start - if they do, we nuke them all and treat this as a full
-    // request.
-    var nuke = false;
-    if (this.ranges) {
-      for (var i in this.ranges.ranges) {
-        if (typeof this.ranges.ranges[i][0] === 'undefined') {
-          nuke = true;
-          break;
-        }
-      }
-    }
-    if (nuke) {
-      this.ranges = undefined;
-    }
-
-    // Register callbacks. Store them in a handlers object so we can
-    // keep the bound version around for stopping to listen to events.
-    this.handlers['error'] = this.on_error.bind(this);
-    this.handlers['end'] = this.on_end.bind(this);
-
-    if (this.ranges) {
-      debug('Preparing to handle ranges.');
-      this.handlers['open'] = this.on_open_ranges.bind(this);
-      this.handlers['data'] = this.on_data_ranges.bind(this);
-    }
-    else {
-      debug('No ranges, just send the whole file.');
-      this.handlers['open'] = this.on_open_no_range.bind(this);
-      this.handlers['data'] = this.on_data_no_range.bind(this);
-    }
-
-    for (var handler in this.handlers) {
-      this.stream.on(handler, this.handlers[handler]);
-    }
-  }
+class RangeSender {
+	constructor(response, stream, opts, end_callback) {
+		// Options
+		this.name = opts.name || 'content.bin'
+		this.type = opts.type || 'application/octet-stream'
+		this.size = opts.size
+		this.ranges = opts.ranges
+		this.download = opts.download || false
+
+		// Range handling related state.
+		this.read_offset = 0 // Nothing read so far
+		this.range_index = -1 // No range index yet.
+		this.range_boundary = undefined // Generate boundary when needed.
+
+		// Event handlers & state
+		this.handlers = {}
+		this.opened = false
+
+		debug('RangeSender:', this)
+		if (opts.ranges) {
+			debug('Parsed ranges:', opts.ranges.ranges)
+		}
+
+		// Parameters
+		this.response = response
+		this.stream = stream
+		this.opts = opts
+		this.end_callback = end_callback
+	}
+
+	on_error(err) {
+		// Assume hiding the actual error is best, and default to 404.
+		debug('Error:', err)
+		if (!this.response.headersSent) {
+			this.response.status(err.code || 404).send({
+				message: err.message || `File not found: ${this.name}`,
+			})
+		}
+		if (this.end_callback) {
+			this.end_callback(err)
+		}
+	}
+
+	on_end() {
+		debug('End of stream.')
+		this.response.end()
+		if (this.end_callback) {
+			this.end_callback()
+		}
+	}
+
+	// **** No ranges
+	on_open_no_range() {
+		// File got opened, so we can set headers/status
+		debug('Open succeeded:', this.name, this.type)
+		this.opened = true
+
+		this.response.status(200)
+		this.response.contentType(this.type)
+		this.response.header('Accept-Ranges', 'bytes')
+		this.response.header('Content-Transfer-Encoding', 'binary')
+
+		if (this.download) {
+			this.response.header('Content-Disposition', `attachment; filename="${this.name}"`)
+		} else {
+			this.response.header('Content-Disposition', 'inline')
+		}
+
+		if (this.size) {
+			this.response.header('Content-Length', this.size)
+		}
+	}
+
+	on_data_no_range(chunk) {
+		if (!this.opened) {
+			this.handlers.open()
+		}
+
+		// As simple as it can be.
+		this.response.write(Buffer.from(chunk, 'binary'))
+	}
+
+	// *** With ranges
+	next_range_headers() {
+		// Next range
+		this.range_index += 1
+		if (this.range_index >= this.ranges.ranges.length) {
+			debug('Cannot advance range index; we are done.')
+			return undefined
+		}
+
+		// Calculate this range's size.
+		const range = this.ranges.ranges[this.range_index]
+		let total_size
+		if (this.size) {
+			total_size = this.size
+		}
+		if (typeof range[0] === 'undefined') {
+			range[0] = 0
+		}
+		if (typeof range[1] === 'undefined') {
+			if (this.size) {
+				range[1] = total_size - 1
+			}
+		}
+
+		let send_size
+		if (typeof range[0] !== 'undefined' && typeof range[1] !== 'undefined') {
+			send_size = range[1] - range[0] + 1
+		}
+
+		// Write headers, but since we may be in a multipart situation, write them
+		// explicitly to the stream.
+		const start = typeof range[0] === 'undefined' ? '' : `${range[0]}`
+		const end = typeof range[1] === 'undefined' ? '' : `${range[1]}`
+
+		let size_str
+		if (total_size) {
+			size_str = `${total_size}`
+		} else {
+			size_str = '*'
+		}
+
+		const ret = {
+			'Content-Range': `bytes ${start}-${end}/${size_str}`,
+			'Content-Type': `${this.type}`,
+		}
+		if (send_size) {
+			ret['Content-Length'] = `${send_size}`
+		}
+		return ret
+	}
+
+	next_range() {
+		if (this.ranges.ranges.length == 1) {
+			debug('Cannot start new range; only one requested.')
+			this.stream.off('data', this.handlers.data)
+			return false
+		}
+
+		const headers = this.next_range_headers()
+
+		if (headers) {
+			const header_buf = new stream_buf.WritableStreamBuffer()
+			// We start a range with a boundary.
+			header_buf.write(`\r\n--${this.range_boundary}\r\n`)
+
+			// The we write the range headers.
+			for (const header in headers) {
+				header_buf.write(`${header}: ${headers[header]}\r\n`)
+			}
+			header_buf.write('\r\n')
+			this.response.write(header_buf.getContents())
+			debug('New range started.')
+			return true
+		}
+
+		// No headers means we're finishing the last range.
+		this.response.write(`\r\n--${this.range_boundary}--\r\n`)
+		debug('End of ranges sent.')
+		this.stream.off('data', this.handlers.data)
+		return false
+	}
+
+	on_open_ranges() {
+		// File got opened, so we can set headers/status
+		debug('Open succeeded:', this.name, this.type)
+		this.opened = true
+
+		this.response.header('Accept-Ranges', 'bytes')
+		this.response.header('Content-Transfer-Encoding', 'binary')
+		this.response.header('Content-Disposition', 'inline')
+
+		// For single ranges, the content length should be the size of the
+		// range. For multiple ranges, we don't send a content length
+		// header.
+		//
+		// Similarly, the type is different whether or not there is more than
+		// one range.
+		if (this.ranges.ranges.length == 1) {
+			this.response.writeHead(206, 'Partial Content', this.next_range_headers())
+		} else {
+			this.range_boundary = uuid.v4()
+			const headers = {
+				'Content-Type': `multipart/byteranges; boundary=${this.range_boundary}`,
+			}
+			this.response.writeHead(206, 'Partial Content', headers)
+			this.next_range()
+		}
+	}
+
+	on_data_ranges(chunk) {
+		if (!this.opened) {
+			this.handlers.open()
+		}
+		// Crap, node.js streams are stupid. No guarantee for seek support. Sure,
+		// that makes node.js easier to implement, but offloads everything onto the
+		// application developer.
+		//
+		// So, we skip chunks until our read position is within the range we want to
+		// send at the moment. We're relying on ranges being in-order, which this
+		// file's parser luckily (?) provides.
+		//
+		// The simplest optimization would be at ever range start to seek() to the
+		// start.
+		const chunk_range = [this.read_offset, this.read_offset + chunk.length - 1]
+		debug('= Got chunk with byte range', chunk_range)
+		while (true) {
+			let req_range = this.ranges.ranges[this.range_index]
+			if (!req_range) {
+				break
+			}
+			debug('Current requested range is', req_range)
+			if (!req_range[1]) {
+				req_range = [req_range[0], Number.MAX_SAFE_INTEGER]
+				debug('Treating as', req_range)
+			}
+
+			// No overlap in the chunk and requested range; don't write.
+			if (chunk_range[1] < req_range[0] || chunk_range[0] > req_range[1]) {
+				debug('Ignoring chunk; it is out of range.')
+				break
+			}
+
+			// Since there is overlap, find the segment that's entirely within the
+			// chunk.
+			const segment = [Math.max(chunk_range[0], req_range[0]), Math.min(chunk_range[1], req_range[1])]
+			debug('Segment to send within chunk is', segment)
+
+			// Normalize the segment to a chunk offset
+			const start = segment[0] - this.read_offset
+			const end = segment[1] - this.read_offset
+			const len = end - start + 1
+			debug('Offsets into buffer are', [start, end], 'with length', len)
+
+			// Write the slice that we want to write. We first create a buffer from the
+			// chunk. Then we slice a new buffer from the same underlying ArrayBuffer,
+			// starting at the original buffer's offset, further offset by the segment
+			// start. The segment length bounds the end of our slice.
+			const buf = Buffer.from(chunk, 'binary')
+			this.response.write(Buffer.from(buf.buffer, buf.byteOffset + start, len))
+
+			// If the requested range is finished, we should start the next one.
+			if (req_range[1] > chunk_range[1]) {
+				debug('Chunk is finished, but the requested range is missing bytes.')
+				break
+			}
+
+			if (req_range[1] <= chunk_range[1]) {
+				debug('Range is finished.')
+				if (!this.next_range(segment)) {
+					break
+				}
+			}
+		}
+
+		// Update read offset when chunk is finished.
+		this.read_offset += chunk.length
+	}
+
+	start() {
+		// Before we start streaming, let's ensure our ranges don't contain any
+		// without start - if they do, we nuke them all and treat this as a full
+		// request.
+		let nuke = false
+		if (this.ranges) {
+			for (const i in this.ranges.ranges) {
+				if (typeof this.ranges.ranges[i][0] === 'undefined') {
+					nuke = true
+					break
+				}
+			}
+		}
+		if (nuke) {
+			this.ranges = undefined
+		}
+
+		// Register callbacks. Store them in a handlers object so we can
+		// keep the bound version around for stopping to listen to events.
+		this.handlers.error = this.on_error.bind(this)
+		this.handlers.end = this.on_end.bind(this)
+
+		if (this.ranges) {
+			debug('Preparing to handle ranges.')
+			this.handlers.open = this.on_open_ranges.bind(this)
+			this.handlers.data = this.on_data_ranges.bind(this)
+		} else {
+			debug('No ranges, just send the whole file.')
+			this.handlers.open = this.on_open_no_range.bind(this)
+			this.handlers.data = this.on_data_no_range.bind(this)
+		}
+
+		for (const handler in this.handlers) {
+			this.stream.on(handler, this.handlers[handler])
+		}
+	}
 }
 
-
-function send(response, stream, opts, end_callback)
-{
-  var sender = new RangeSender(response, stream, opts, end_callback);
-  sender.start();
+function send(response, stream, opts, end_callback) {
+	const sender = new RangeSender(response, stream, opts, end_callback)
+	sender.start()
 }
 
-
 /*
  * Exports
  */
 
-module.exports =
-{
-  parse: parse,
-  parseAsync: parseAsync,
-  RangeSender: RangeSender,
-  send: send,
-};
+module.exports = {
+	parse,
+	parseAsync,
+	RangeSender,
+	send,
+}

+ 6 - 6
storage-node/packages/util/stripEndingSlash.js

@@ -1,10 +1,10 @@
 // return url with last `/` removed
 function removeEndingForwardSlash(url) {
-    let st = new String(url)
-    if (st.endsWith('/')) {
-        return st.substring(0, st.length - 1);
-    }
-    return st.toString()
+	const st = new String(url)
+	if (st.endsWith('/')) {
+		return st.substring(0, st.length - 1)
+	}
+	return st.toString()
 }
 
-module.exports = removeEndingForwardSlash
+module.exports = removeEndingForwardSlash

+ 39 - 50
storage-node/packages/util/test/fs/resolve.js

@@ -16,65 +16,54 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const mocha = require('mocha');
-const expect = require('chai').expect;
-const path = require('path');
+const mocha = require('mocha')
+const expect = require('chai').expect
+const path = require('path')
 
-const resolve = require('@joystream/storage-utils/fs/resolve');
+const resolve = require('@joystream/storage-utils/fs/resolve')
 
-function tests(base)
-{
-  it('resolves absolute paths relative to the base', function()
-  {
-    const resolved = resolve(base, '/foo');
-    const relative = path.relative(base, resolved);
-    expect(relative).to.equal('foo');
-  });
+function tests(base) {
+	it('resolves absolute paths relative to the base', function () {
+		const resolved = resolve(base, '/foo')
+		const relative = path.relative(base, resolved)
+		expect(relative).to.equal('foo')
+	})
 
-  it('allows for relative paths that stay in the base', function()
-  {
-    const resolved = resolve(base, 'foo/../bar');
-    const relative = path.relative(base, resolved);
-    expect(relative).to.equal('bar');
-  });
+	it('allows for relative paths that stay in the base', function () {
+		const resolved = resolve(base, 'foo/../bar')
+		const relative = path.relative(base, resolved)
+		expect(relative).to.equal('bar')
+	})
 
-  it('prevents relative paths from breaking out of the base', function()
-  {
-    expect(() => resolve(base, '../foo')).to.throw();
-  });
+	it('prevents relative paths from breaking out of the base', function () {
+		expect(() => resolve(base, '../foo')).to.throw()
+	})
 
-  it('prevents long relative paths from breaking out of the base', function()
-  {
-    expect(() => resolve(base, '../../../foo')).to.throw();
-  });
+	it('prevents long relative paths from breaking out of the base', function () {
+		expect(() => resolve(base, '../../../foo')).to.throw()
+	})
 
-  it('prevents sneaky relative paths from breaking out of the base', function()
-  {
-    expect(() => resolve(base, 'foo/../../../bar')).to.throw();
-  });
+	it('prevents sneaky relative paths from breaking out of the base', function () {
+		expect(() => resolve(base, 'foo/../../../bar')).to.throw()
+	})
 }
 
-describe('util/fs/resolve', function()
-{
-  describe('slash base', function()
-  {
-    tests('/');
-  });
+describe('util/fs/resolve', function () {
+	describe('slash base', function () {
+		tests('/')
+	})
 
-  describe('empty base', function()
-  {
-    tests('');
-  });
+	describe('empty base', function () {
+		tests('')
+	})
 
-  describe('short base', function()
-  {
-    tests('/base');
-  });
+	describe('short base', function () {
+		tests('/base')
+	})
 
-  describe('long base', function()
-  {
-    tests('/this/base/is/very/long/indeed');
-  });
-});
+	describe('long base', function () {
+		tests('/this/base/is/very/long/indeed')
+	})
+})

+ 35 - 38
storage-node/packages/util/test/fs/walk.js

@@ -16,54 +16,51 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const mocha = require('mocha');
-const expect = require('chai').expect;
-const temp = require('temp').track();
+const mocha = require('mocha')
+const expect = require('chai').expect
+const temp = require('temp').track()
 
-const fs = require('fs');
-const path = require('path');
+const fs = require('fs')
+const path = require('path')
 
-const fswalk = require('@joystream/storage-utils/fs/walk');
+const fswalk = require('@joystream/storage-utils/fs/walk')
 
-function walktest(archive, base, done)
-{
-  var results = new Map();
+function walktest(archive, base, done) {
+	const results = new Map()
 
-  fswalk(base, archive, (err, relname, stat, linktarget) => {
-    expect(err).to.be.null;
+	fswalk(base, archive, (err, relname, stat, linktarget) => {
+		expect(err).to.be.null
 
-    if (relname) {
-      results.set(relname, [stat, linktarget]);
-      return;
-    }
+		if (relname) {
+			results.set(relname, [stat, linktarget])
+			return
+		}
 
-    // End of data, do testing
-    const entries = Array.from(results.keys());
-    expect(entries).to.include('foo');
-    expect(results.get('foo')[0].isDirectory()).to.be.true;
+		// End of data, do testing
+		const entries = Array.from(results.keys())
+		expect(entries).to.include('foo')
+		expect(results.get('foo')[0].isDirectory()).to.be.true
 
-    expect(entries).to.include('bar');
-    expect(results.get('bar')[0].isFile()).to.be.true;
+		expect(entries).to.include('bar')
+		expect(results.get('bar')[0].isFile()).to.be.true
 
-    if (archive === fs) {
-      expect(entries).to.include('quux');
-      expect(results.get('quux')[0].isSymbolicLink()).to.be.true;
-      expect(results.get('quux')[1]).to.equal('foo/baz');
-    }
+		if (archive === fs) {
+			expect(entries).to.include('quux')
+			expect(results.get('quux')[0].isSymbolicLink()).to.be.true
+			expect(results.get('quux')[1]).to.equal('foo/baz')
+		}
 
-    expect(entries).to.include('foo/baz');
-    expect(results.get('foo/baz')[0].isFile()).to.be.true;
+		expect(entries).to.include('foo/baz')
+		expect(results.get('foo/baz')[0].isFile()).to.be.true
 
-    done();
-  });
+		done()
+	})
 }
 
-describe('util/fs/walk', function()
-{
-  it('reports all files in a file system hierarchy', function(done)
-  {
-    walktest(fs, path.resolve(__dirname, '../data'), done)
-  });
-});
+describe('util/fs/walk', function () {
+	it('reports all files in a file system hierarchy', function (done) {
+		walktest(fs, path.resolve(__dirname, '../data'), done)
+	})
+})

+ 130 - 141
storage-node/packages/util/test/lru.js

@@ -16,149 +16,138 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
+'use strict'
 
-const mocha = require('mocha');
-const expect = require('chai').expect;
+const mocha = require('mocha')
+const expect = require('chai').expect
 
-const lru = require('@joystream/storage-utils/lru');
+const lru = require('@joystream/storage-utils/lru')
 
-const DEFAULT_SLEEP = 1;
-function sleep(ms = DEFAULT_SLEEP)
-{
-  return new Promise(resolve => {
-    setTimeout(resolve, ms)
-  })
+const DEFAULT_SLEEP = 1
+function sleep(ms = DEFAULT_SLEEP) {
+	return new Promise((resolve) => {
+		setTimeout(resolve, ms)
+	})
 }
 
-describe('util/lru', function()
-{
-  describe('simple usage', function()
-  {
-    it('does not contain keys that were not added', function()
-    {
-      var cache = new lru.LRUCache();
-      expect(cache.size()).to.equal(0);
-
-      var val = cache.get('something');
-      expect(val).to.be.undefined;
-
-      expect(cache.has('something')).to.be.false;
-    });
-
-    it('contains keys that were added', function()
-    {
-      var cache = new lru.LRUCache();
-      cache.put('something', 'yay!');
-      expect(cache.size()).to.equal(1);
-
-      var val = cache.get('something');
-      expect(val).to.be.equal('yay!');
-
-      expect(cache.has('something')).to.be.true;
-    });
-
-    it('does not contain keys that were deleted', function()
-    {
-      var cache = new lru.LRUCache();
-      cache.put('something', 'yay!');
-      expect(cache.size()).to.equal(1);
-      var val = cache.get('something');
-      expect(val).to.be.equal('yay!');
-      expect(cache.has('something')).to.be.true;
-
-      cache.del('something');
-      expect(cache.size()).to.equal(0);
-      val = cache.get('something');
-      expect(val).to.be.undefined;
-      expect(cache.has('something')).to.be.false;
-    });
-
-    it('can be cleared', function()
-    {
-      var cache = new lru.LRUCache();
-      cache.put('something', 'yay!');
-      expect(cache.size()).to.equal(1);
-
-      cache.clear();
-      expect(cache.size()).to.equal(0);
-    });
-  });
-
-  describe('capacity management', function()
-  {
-    it('does not grow beyond capacity', async function()
-    {
-      var cache = new lru.LRUCache(2); // Small capacity
-      expect(cache.size()).to.equal(0);
-
-      cache.put('foo', '42');
-      expect(cache.size()).to.equal(1);
-
-      await sleep();
-
-      cache.put('bar', '42');
-      expect(cache.size()).to.equal(2);
-
-      await sleep();
-
-      cache.put('baz', '42');
-      expect(cache.size()).to.equal(2); // Capacity exceeded
-    });
-
-    it('removes the oldest key when pruning', async function()
-    {
-      var cache = new lru.LRUCache(2); // Small capacity
-      expect(cache.size()).to.equal(0);
-
-      cache.put('foo', '42');
-      expect(cache.size()).to.equal(1);
-      expect(cache.has('foo')).to.be.true;
-
-      await sleep();
-
-      cache.put('bar', '42');
-      expect(cache.size()).to.equal(2);
-      expect(cache.has('foo')).to.be.true;
-      expect(cache.has('bar')).to.be.true;
-
-      await sleep();
-
-      cache.put('baz', '42');
-      expect(cache.size()).to.equal(2); // Capacity exceeded
-      expect(cache.has('bar')).to.be.true;
-      expect(cache.has('baz')).to.be.true;
-    });
-
-    it('updates LRU timestamp when reading', async function()
-    {
-      var cache = new lru.LRUCache(2); // Small capacity
-      expect(cache.size()).to.equal(0);
-
-      cache.put('foo', '42');
-      expect(cache.size()).to.equal(1);
-      expect(cache.has('foo')).to.be.true;
-
-      await sleep();
-
-      cache.put('bar', '42');
-      expect(cache.size()).to.equal(2);
-      expect(cache.has('foo')).to.be.true;
-      expect(cache.has('bar')).to.be.true;
-
-      await sleep();
-
-      // 'foo' is older than 'bar' right now, so should be pruned first. But
-      // if we get 'foo', it would be 'bar' that has to go.
-      var _ = cache.get('foo');
-
-      // Makes debugging a bit more obvious
-      await sleep();
-
-      cache.put('baz', '42');
-      expect(cache.size()).to.equal(2); // Capacity exceeded
-      expect(cache.has('foo')).to.be.true;
-      expect(cache.has('baz')).to.be.true;
-    });
-  });
-});
+describe('util/lru', function () {
+	describe('simple usage', function () {
+		it('does not contain keys that were not added', function () {
+			const cache = new lru.LRUCache()
+			expect(cache.size()).to.equal(0)
+
+			const val = cache.get('something')
+			expect(val).to.be.undefined
+
+			expect(cache.has('something')).to.be.false
+		})
+
+		it('contains keys that were added', function () {
+			const cache = new lru.LRUCache()
+			cache.put('something', 'yay!')
+			expect(cache.size()).to.equal(1)
+
+			const val = cache.get('something')
+			expect(val).to.be.equal('yay!')
+
+			expect(cache.has('something')).to.be.true
+		})
+
+		it('does not contain keys that were deleted', function () {
+			const cache = new lru.LRUCache()
+			cache.put('something', 'yay!')
+			expect(cache.size()).to.equal(1)
+			let val = cache.get('something')
+			expect(val).to.be.equal('yay!')
+			expect(cache.has('something')).to.be.true
+
+			cache.del('something')
+			expect(cache.size()).to.equal(0)
+			val = cache.get('something')
+			expect(val).to.be.undefined
+			expect(cache.has('something')).to.be.false
+		})
+
+		it('can be cleared', function () {
+			const cache = new lru.LRUCache()
+			cache.put('something', 'yay!')
+			expect(cache.size()).to.equal(1)
+
+			cache.clear()
+			expect(cache.size()).to.equal(0)
+		})
+	})
+
+	describe('capacity management', function () {
+		it('does not grow beyond capacity', async function () {
+			const cache = new lru.LRUCache(2) // Small capacity
+			expect(cache.size()).to.equal(0)
+
+			cache.put('foo', '42')
+			expect(cache.size()).to.equal(1)
+
+			await sleep()
+
+			cache.put('bar', '42')
+			expect(cache.size()).to.equal(2)
+
+			await sleep()
+
+			cache.put('baz', '42')
+			expect(cache.size()).to.equal(2) // Capacity exceeded
+		})
+
+		it('removes the oldest key when pruning', async function () {
+			const cache = new lru.LRUCache(2) // Small capacity
+			expect(cache.size()).to.equal(0)
+
+			cache.put('foo', '42')
+			expect(cache.size()).to.equal(1)
+			expect(cache.has('foo')).to.be.true
+
+			await sleep()
+
+			cache.put('bar', '42')
+			expect(cache.size()).to.equal(2)
+			expect(cache.has('foo')).to.be.true
+			expect(cache.has('bar')).to.be.true
+
+			await sleep()
+
+			cache.put('baz', '42')
+			expect(cache.size()).to.equal(2) // Capacity exceeded
+			expect(cache.has('bar')).to.be.true
+			expect(cache.has('baz')).to.be.true
+		})
+
+		it('updates LRU timestamp when reading', async function () {
+			const cache = new lru.LRUCache(2) // Small capacity
+			expect(cache.size()).to.equal(0)
+
+			cache.put('foo', '42')
+			expect(cache.size()).to.equal(1)
+			expect(cache.has('foo')).to.be.true
+
+			await sleep()
+
+			cache.put('bar', '42')
+			expect(cache.size()).to.equal(2)
+			expect(cache.has('foo')).to.be.true
+			expect(cache.has('bar')).to.be.true
+
+			await sleep()
+
+			// 'foo' is older than 'bar' right now, so should be pruned first. But
+			// if we get 'foo', it would be 'bar' that has to go.
+			const _ = cache.get('foo')
+
+			// Makes debugging a bit more obvious
+			await sleep()
+
+			cache.put('baz', '42')
+			expect(cache.size()).to.equal(2) // Capacity exceeded
+			expect(cache.has('foo')).to.be.true
+			expect(cache.has('baz')).to.be.true
+		})
+	})
+})

+ 96 - 106
storage-node/packages/util/test/pagination.js

@@ -16,109 +16,99 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
-
-const mocha = require('mocha');
-const expect = require('chai').expect;
-const mock_http = require('node-mocks-http');
-
-const pagination = require('@joystream/storage-utils/pagination');
-
-describe('util/pagination', function()
-{
-  describe('openapi()', function()
-  {
-    it('should add parameters and definitions to an API spec', function()
-    {
-      var api = pagination.openapi({});
-
-      // Parameters
-      expect(api).to.have.property('components');
-
-      expect(api.components).to.have.property('parameters');
-      expect(api.components.parameters).to.have.property('paginationLimit');
-
-      expect(api.components.parameters.paginationLimit).to.have.property('name');
-      expect(api.components.parameters.paginationLimit.name).to.equal('limit');
-
-      expect(api.components.parameters.paginationLimit).to.have.property('schema');
-      expect(api.components.parameters.paginationLimit.schema).to.have.property('type');
-      expect(api.components.parameters.paginationLimit.schema.type).to.equal('integer');
-
-      expect(api.components.parameters.paginationOffset).to.have.property('name');
-      expect(api.components.parameters.paginationOffset.name).to.equal('offset');
-
-      expect(api.components.parameters.paginationOffset).to.have.property('schema');
-      expect(api.components.parameters.paginationOffset.schema).to.have.property('type');
-      expect(api.components.parameters.paginationOffset.schema.type).to.equal('integer');
-
-
-      // Defintiions
-      expect(api.components).to.have.property('schemas');
-      expect(api.components.schemas).to.have.property('PaginationInfo');
-
-      expect(api.components.schemas.PaginationInfo).to.have.property('type');
-      expect(api.components.schemas.PaginationInfo.type).to.equal('object');
-
-      expect(api.components.schemas.PaginationInfo).to.have.property('properties');
-      expect(api.components.schemas.PaginationInfo.properties)
-        .to.be.an('object')
-        .that.has.all.keys('self', 'next', 'prev', 'first', 'last');
-    });
-  });
-
-
-  describe('paginate()', function()
-  {
-    it('should add pagination links to a response object', function()
-    {
-      var req = mock_http.createRequest({
-        method: 'GET',
-        url: '/foo?limit=10',
-        query: {
-          limit: 10, // Mock is a little stupid, we have to explicitly set query
-        },
-        headers: {
-          host: 'localhost',
-        },
-        protocol: 'http',
-      });
-
-      var res = pagination.paginate(req, {});
-
-      expect(res).to.have.property('pagination')
-        .that.has.all.keys('self', 'first', 'next');
-
-      expect(res.pagination.self).to.equal('http://localhost/foo?limit=10');
-      expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0');
-      expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=10');
-    });
-
-    it('should add a last pagination link when requested', function()
-    {
-      var req = mock_http.createRequest({
-        method: 'GET',
-        url: '/foo?limit=10&offset=15',
-        query: {
-          limit: 10, // Mock is a little stupid, we have to explicitly set query
-          offset: 15,
-        },
-        headers: {
-          host: 'localhost',
-        },
-        protocol: 'http',
-      });
-
-      var res = pagination.paginate(req, {}, 35);
-
-      expect(res).to.have.property('pagination')
-        .that.has.all.keys('self', 'first', 'next', 'prev', 'last');
-
-      expect(res.pagination.self).to.equal('http://localhost/foo?limit=10&offset=15');
-      expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0');
-      expect(res.pagination.last).to.equal('http://localhost/foo?limit=10&offset=35');
-      expect(res.pagination.prev).to.equal('http://localhost/foo?limit=10&offset=5');
-      expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=25');
-    });
-  });
-});
+'use strict'
+
+const mocha = require('mocha')
+const expect = require('chai').expect
+const mock_http = require('node-mocks-http')
+
+const pagination = require('@joystream/storage-utils/pagination')
+
+describe('util/pagination', function () {
+	describe('openapi()', function () {
+		it('should add parameters and definitions to an API spec', function () {
+			const api = pagination.openapi({})
+
+			// Parameters
+			expect(api).to.have.property('components')
+
+			expect(api.components).to.have.property('parameters')
+			expect(api.components.parameters).to.have.property('paginationLimit')
+
+			expect(api.components.parameters.paginationLimit).to.have.property('name')
+			expect(api.components.parameters.paginationLimit.name).to.equal('limit')
+
+			expect(api.components.parameters.paginationLimit).to.have.property('schema')
+			expect(api.components.parameters.paginationLimit.schema).to.have.property('type')
+			expect(api.components.parameters.paginationLimit.schema.type).to.equal('integer')
+
+			expect(api.components.parameters.paginationOffset).to.have.property('name')
+			expect(api.components.parameters.paginationOffset.name).to.equal('offset')
+
+			expect(api.components.parameters.paginationOffset).to.have.property('schema')
+			expect(api.components.parameters.paginationOffset.schema).to.have.property('type')
+			expect(api.components.parameters.paginationOffset.schema.type).to.equal('integer')
+
+			// Defintiions
+			expect(api.components).to.have.property('schemas')
+			expect(api.components.schemas).to.have.property('PaginationInfo')
+
+			expect(api.components.schemas.PaginationInfo).to.have.property('type')
+			expect(api.components.schemas.PaginationInfo.type).to.equal('object')
+
+			expect(api.components.schemas.PaginationInfo).to.have.property('properties')
+			expect(api.components.schemas.PaginationInfo.properties)
+				.to.be.an('object')
+				.that.has.all.keys('self', 'next', 'prev', 'first', 'last')
+		})
+	})
+
+	describe('paginate()', function () {
+		it('should add pagination links to a response object', function () {
+			const req = mock_http.createRequest({
+				method: 'GET',
+				url: '/foo?limit=10',
+				query: {
+					limit: 10, // Mock is a little stupid, we have to explicitly set query
+				},
+				headers: {
+					host: 'localhost',
+				},
+				protocol: 'http',
+			})
+
+			const res = pagination.paginate(req, {})
+
+			expect(res).to.have.property('pagination').that.has.all.keys('self', 'first', 'next')
+
+			expect(res.pagination.self).to.equal('http://localhost/foo?limit=10')
+			expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0')
+			expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=10')
+		})
+
+		it('should add a last pagination link when requested', function () {
+			const req = mock_http.createRequest({
+				method: 'GET',
+				url: '/foo?limit=10&offset=15',
+				query: {
+					limit: 10, // Mock is a little stupid, we have to explicitly set query
+					offset: 15,
+				},
+				headers: {
+					host: 'localhost',
+				},
+				protocol: 'http',
+			})
+
+			const res = pagination.paginate(req, {}, 35)
+
+			expect(res).to.have.property('pagination').that.has.all.keys('self', 'first', 'next', 'prev', 'last')
+
+			expect(res.pagination.self).to.equal('http://localhost/foo?limit=10&offset=15')
+			expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0')
+			expect(res.pagination.last).to.equal('http://localhost/foo?limit=10&offset=35')
+			expect(res.pagination.prev).to.equal('http://localhost/foo?limit=10&offset=5')
+			expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=25')
+		})
+	})
+})

+ 375 - 391
storage-node/packages/util/test/ranges.js

@@ -16,394 +16,378 @@
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-'use strict';
-
-const mocha = require('mocha');
-const expect = require('chai').expect;
-const mock_http = require('node-mocks-http');
-const stream_buffers = require('stream-buffers');
-
-const ranges = require('@joystream/storage-utils/ranges');
-
-describe('util/ranges', function()
-{
-  describe('parse()', function()
-  {
-    it('should parse a full range', function()
-    {
-      // Range with unit
-      var range = ranges.parse('bytes=0-100');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('0-100');
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(100);
-
-      // Range without unit
-      var range = ranges.parse('0-100');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('0-100');
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(100);
-
-      // Range with custom unit
-      //
-      var range = ranges.parse('foo=0-100');
-      expect(range.unit).to.equal('foo');
-      expect(range.range_str).to.equal('0-100');
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(100);
-    });
-
-    it('should error out on malformed strings', function()
-    {
-      expect(() => ranges.parse('foo')).to.throw();
-      expect(() => ranges.parse('foo=bar')).to.throw();
-      expect(() => ranges.parse('foo=100')).to.throw();
-      expect(() => ranges.parse('foo=100-0')).to.throw();
-    });
-
-    it('should parse a range without end', function()
-    {
-      var range = ranges.parse('0-');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('0-');
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.be.undefined;
-    });
-
-    it('should parse a range without start', function()
-    {
-      var range = ranges.parse('-100');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('-100');
-      expect(range.ranges[0][0]).to.be.undefined;
-      expect(range.ranges[0][1]).to.equal(100);
-    });
-
-    it('should parse multiple ranges', function()
-    {
-      var range = ranges.parse('0-10,30-40,60-80');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('0-10,30-40,60-80');
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(10);
-      expect(range.ranges[1][0]).to.equal(30);
-      expect(range.ranges[1][1]).to.equal(40);
-      expect(range.ranges[2][0]).to.equal(60);
-      expect(range.ranges[2][1]).to.equal(80);
-    });
-
-    it('should merge overlapping ranges', function()
-    {
-      // Two overlapping ranges
-      var range = ranges.parse('0-20,10-30');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('0-20,10-30');
-      expect(range.ranges).to.have.lengthOf(1);
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(30);
-
-      // Three overlapping ranges
-      var range = ranges.parse('0-15,10-25,20-30');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('0-15,10-25,20-30');
-      expect(range.ranges).to.have.lengthOf(1);
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(30);
-
-      // Three overlapping ranges, reverse order
-      var range = ranges.parse('20-30,10-25,0-15');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('20-30,10-25,0-15');
-      expect(range.ranges).to.have.lengthOf(1);
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(30);
-
-      // Adjacent ranges
-      var range = ranges.parse('0-10,11-20');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('0-10,11-20');
-      expect(range.ranges).to.have.lengthOf(1);
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(20);
-    });
-
-    it('should sort ranges', function()
-    {
-      var range = ranges.parse('10-30,0-5');
-      expect(range.unit).to.equal('bytes');
-      expect(range.range_str).to.equal('10-30,0-5');
-      expect(range.ranges).to.have.lengthOf(2);
-      expect(range.ranges[0][0]).to.equal(0);
-      expect(range.ranges[0][1]).to.equal(5);
-      expect(range.ranges[1][0]).to.equal(10);
-      expect(range.ranges[1][1]).to.equal(30);
-    });
-  });
-
-  describe('send()', function()
-  {
-    it('should send full files on request', function(done)
-    {
-      var res = mock_http.createResponse({});
-      var in_stream = new stream_buffers.ReadableStreamBuffer({});
-
-      // End-of-stream callback
-      var opts = {
-        name: 'test.file',
-        type: 'application/test',
-      };
-      ranges.send(res, in_stream, opts, function(err) {
-        expect(err).to.not.exist;
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(200);
-        expect(res.getHeader('content-type')).to.equal('application/test');
-        expect(res.getHeader('content-disposition')).to.equal('inline');
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true;
-        expect(res._getBuffer().toString()).to.equal('Hello, world!');
-
-        // Notify mocha that we're done.
-        done();
-      });
-
-      // Simulate file stream
-      in_stream.emit('open');
-      in_stream.put('Hello, world!');
-      in_stream.stop();
-    });
-
-    it('should send a range spanning the entire file on request', function(done)
-    {
-      var res = mock_http.createResponse({});
-      var in_stream = new stream_buffers.ReadableStreamBuffer({});
-
-      // End-of-stream callback
-      var opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[0, 12]],
-        }
-      };
-      ranges.send(res, in_stream, opts, function(err) {
-        expect(err).to.not.exist;
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206);
-        expect(res.getHeader('content-type')).to.equal('application/test');
-        expect(res.getHeader('content-disposition')).to.equal('inline');
-        expect(res.getHeader('content-range')).to.equal('bytes 0-12/*');
-        expect(res.getHeader('content-length')).to.equal('13');
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true;
-        expect(res._getBuffer().toString()).to.equal('Hello, world!');
-
-        // Notify mocha that we're done.
-        done();
-      });
-
-      // Simulate file stream
-      in_stream.emit('open');
-      in_stream.put('Hello, world!');
-      in_stream.stop();
-
-    });
-
-    it('should send a small range on request', function(done)
-    {
-      var res = mock_http.createResponse({});
-      var in_stream = new stream_buffers.ReadableStreamBuffer({});
-
-      // End-of-stream callback
-      var opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[1, 11]], // Cut off first and last letter
-        }
-      };
-      ranges.send(res, in_stream, opts, function(err) {
-        expect(err).to.not.exist;
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206);
-        expect(res.getHeader('content-type')).to.equal('application/test');
-        expect(res.getHeader('content-disposition')).to.equal('inline');
-        expect(res.getHeader('content-range')).to.equal('bytes 1-11/*');
-        expect(res.getHeader('content-length')).to.equal('11');
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true;
-        expect(res._getBuffer().toString()).to.equal('ello, world');
-
-        // Notify mocha that we're done.
-        done();
-      });
-
-      // Simulate file stream
-      in_stream.emit('open');
-      in_stream.put('Hello, world!');
-      in_stream.stop();
-    });
-
-    it('should send ranges crossing buffer boundaries', function(done)
-    {
-      var res = mock_http.createResponse({});
-      var in_stream = new stream_buffers.ReadableStreamBuffer({
-        chunkSize: 3, // Setting a chunk size smaller than the range should
-                      // not impact the test.
-      });
-
-      // End-of-stream callback
-      var opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[1, 11]], // Cut off first and last letter
-        }
-      };
-      ranges.send(res, in_stream, opts, function(err) {
-        expect(err).to.not.exist;
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206);
-        expect(res.getHeader('content-type')).to.equal('application/test');
-        expect(res.getHeader('content-disposition')).to.equal('inline');
-        expect(res.getHeader('content-range')).to.equal('bytes 1-11/*');
-        expect(res.getHeader('content-length')).to.equal('11');
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true;
-        expect(res._getBuffer().toString()).to.equal('ello, world');
-
-        // Notify mocha that we're done.
-        done();
-      });
-
-      // Simulate file stream
-      in_stream.emit('open');
-      in_stream.put('Hello, world!');
-      in_stream.stop();
-    });
-
-    it('should send multiple ranges', function(done)
-    {
-      var res = mock_http.createResponse({});
-      var in_stream = new stream_buffers.ReadableStreamBuffer({});
-
-      // End-of-stream callback
-      var opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[1, 3], [5, 7]], // Slice two ranges out
-        }
-      };
-      ranges.send(res, in_stream, opts, function(err) {
-        expect(err).to.not.exist;
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206);
-        expect(res.getHeader('content-type')).to.satisfy((str) => str.startsWith('multipart/byteranges'));
-        expect(res.getHeader('content-disposition')).to.equal('inline');
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true;
-
-        // The buffer should contain both ranges, but with all the That would be
-        // "ell" and ", w".
-        // It's pretty elaborate having to parse the entire multipart response
-        // body, so we'll restrict ourselves to finding lines within it.
-        var body = res._getBuffer().toString();
-        expect(body).to.contain('\r\nContent-Range: bytes 1-3/*\r\n');
-        expect(body).to.contain('\r\nell\r\n');
-        expect(body).to.contain('\r\nContent-Range: bytes 5-7/*\r\n');
-        expect(body).to.contain('\r\n, w');
-
-        // Notify mocha that we're done.
-        done();
-      });
-
-      // Simulate file stream
-      in_stream.emit('open');
-      in_stream.put('Hello, world!');
-      in_stream.stop();
-    });
-
-    it('should deal with ranges without end', function(done)
-    {
-      var res = mock_http.createResponse({});
-      var in_stream = new stream_buffers.ReadableStreamBuffer({});
-
-      // End-of-stream callback
-      var opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[5, undefined]], // Skip the first part, but read until end
-        }
-      };
-      ranges.send(res, in_stream, opts, function(err) {
-        expect(err).to.not.exist;
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206);
-        expect(res.getHeader('content-type')).to.equal('application/test');
-        expect(res.getHeader('content-disposition')).to.equal('inline');
-        expect(res.getHeader('content-range')).to.equal('bytes 5-/*');
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true;
-        expect(res._getBuffer().toString()).to.equal(', world!');
-
-        // Notify mocha that we're done.
-        done();
-      });
-
-      // Simulate file stream
-      in_stream.emit('open');
-      in_stream.put('Hello, world!');
-      in_stream.stop();
-    });
-
-    it('should ignore ranges without start', function(done)
-    {
-      var res = mock_http.createResponse({});
-      var in_stream = new stream_buffers.ReadableStreamBuffer({});
-
-      // End-of-stream callback
-      var opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[undefined, 5]], // Only last five
-        }
-      };
-      ranges.send(res, in_stream, opts, function(err) {
-        expect(err).to.not.exist;
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(200);
-        expect(res.getHeader('content-type')).to.equal('application/test');
-        expect(res.getHeader('content-disposition')).to.equal('inline');
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true;
-        expect(res._getBuffer().toString()).to.equal('Hello, world!');
-
-        // Notify mocha that we're done.
-        done();
-      });
-
-      // Simulate file stream
-      in_stream.emit('open');
-      in_stream.put('Hello, world!');
-      in_stream.stop();
-
-    });
-  });
-});
+'use strict'
+
+const mocha = require('mocha')
+const expect = require('chai').expect
+const mock_http = require('node-mocks-http')
+const stream_buffers = require('stream-buffers')
+
+const ranges = require('@joystream/storage-utils/ranges')
+
+describe('util/ranges', function () {
+	describe('parse()', function () {
+		it('should parse a full range', function () {
+			// Range with unit
+			var range = ranges.parse('bytes=0-100')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('0-100')
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(100)
+
+			// Range without unit
+			var range = ranges.parse('0-100')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('0-100')
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(100)
+
+			// Range with custom unit
+			//
+			var range = ranges.parse('foo=0-100')
+			expect(range.unit).to.equal('foo')
+			expect(range.range_str).to.equal('0-100')
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(100)
+		})
+
+		it('should error out on malformed strings', function () {
+			expect(() => ranges.parse('foo')).to.throw()
+			expect(() => ranges.parse('foo=bar')).to.throw()
+			expect(() => ranges.parse('foo=100')).to.throw()
+			expect(() => ranges.parse('foo=100-0')).to.throw()
+		})
+
+		it('should parse a range without end', function () {
+			const range = ranges.parse('0-')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('0-')
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.be.undefined
+		})
+
+		it('should parse a range without start', function () {
+			const range = ranges.parse('-100')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('-100')
+			expect(range.ranges[0][0]).to.be.undefined
+			expect(range.ranges[0][1]).to.equal(100)
+		})
+
+		it('should parse multiple ranges', function () {
+			const range = ranges.parse('0-10,30-40,60-80')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('0-10,30-40,60-80')
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(10)
+			expect(range.ranges[1][0]).to.equal(30)
+			expect(range.ranges[1][1]).to.equal(40)
+			expect(range.ranges[2][0]).to.equal(60)
+			expect(range.ranges[2][1]).to.equal(80)
+		})
+
+		it('should merge overlapping ranges', function () {
+			// Two overlapping ranges
+			var range = ranges.parse('0-20,10-30')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('0-20,10-30')
+			expect(range.ranges).to.have.lengthOf(1)
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(30)
+
+			// Three overlapping ranges
+			var range = ranges.parse('0-15,10-25,20-30')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('0-15,10-25,20-30')
+			expect(range.ranges).to.have.lengthOf(1)
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(30)
+
+			// Three overlapping ranges, reverse order
+			var range = ranges.parse('20-30,10-25,0-15')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('20-30,10-25,0-15')
+			expect(range.ranges).to.have.lengthOf(1)
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(30)
+
+			// Adjacent ranges
+			var range = ranges.parse('0-10,11-20')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('0-10,11-20')
+			expect(range.ranges).to.have.lengthOf(1)
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(20)
+		})
+
+		it('should sort ranges', function () {
+			const range = ranges.parse('10-30,0-5')
+			expect(range.unit).to.equal('bytes')
+			expect(range.range_str).to.equal('10-30,0-5')
+			expect(range.ranges).to.have.lengthOf(2)
+			expect(range.ranges[0][0]).to.equal(0)
+			expect(range.ranges[0][1]).to.equal(5)
+			expect(range.ranges[1][0]).to.equal(10)
+			expect(range.ranges[1][1]).to.equal(30)
+		})
+	})
+
+	describe('send()', function () {
+		it('should send full files on request', function (done) {
+			const res = mock_http.createResponse({})
+			const in_stream = new stream_buffers.ReadableStreamBuffer({})
+
+			// End-of-stream callback
+			const opts = {
+				name: 'test.file',
+				type: 'application/test',
+			}
+			ranges.send(res, in_stream, opts, function (err) {
+				expect(err).to.not.exist
+
+				// HTTP handling
+				expect(res.statusCode).to.equal(200)
+				expect(res.getHeader('content-type')).to.equal('application/test')
+				expect(res.getHeader('content-disposition')).to.equal('inline')
+
+				// Data/stream handling
+				expect(res._isEndCalled()).to.be.true
+				expect(res._getBuffer().toString()).to.equal('Hello, world!')
+
+				// Notify mocha that we're done.
+				done()
+			})
+
+			// Simulate file stream
+			in_stream.emit('open')
+			in_stream.put('Hello, world!')
+			in_stream.stop()
+		})
+
+		it('should send a range spanning the entire file on request', function (done) {
+			const res = mock_http.createResponse({})
+			const in_stream = new stream_buffers.ReadableStreamBuffer({})
+
+			// End-of-stream callback
+			const opts = {
+				name: 'test.file',
+				type: 'application/test',
+				ranges: {
+					ranges: [[0, 12]],
+				},
+			}
+			ranges.send(res, in_stream, opts, function (err) {
+				expect(err).to.not.exist
+
+				// HTTP handling
+				expect(res.statusCode).to.equal(206)
+				expect(res.getHeader('content-type')).to.equal('application/test')
+				expect(res.getHeader('content-disposition')).to.equal('inline')
+				expect(res.getHeader('content-range')).to.equal('bytes 0-12/*')
+				expect(res.getHeader('content-length')).to.equal('13')
+
+				// Data/stream handling
+				expect(res._isEndCalled()).to.be.true
+				expect(res._getBuffer().toString()).to.equal('Hello, world!')
+
+				// Notify mocha that we're done.
+				done()
+			})
+
+			// Simulate file stream
+			in_stream.emit('open')
+			in_stream.put('Hello, world!')
+			in_stream.stop()
+		})
+
+		it('should send a small range on request', function (done) {
+			const res = mock_http.createResponse({})
+			const in_stream = new stream_buffers.ReadableStreamBuffer({})
+
+			// End-of-stream callback
+			const opts = {
+				name: 'test.file',
+				type: 'application/test',
+				ranges: {
+					ranges: [[1, 11]], // Cut off first and last letter
+				},
+			}
+			ranges.send(res, in_stream, opts, function (err) {
+				expect(err).to.not.exist
+
+				// HTTP handling
+				expect(res.statusCode).to.equal(206)
+				expect(res.getHeader('content-type')).to.equal('application/test')
+				expect(res.getHeader('content-disposition')).to.equal('inline')
+				expect(res.getHeader('content-range')).to.equal('bytes 1-11/*')
+				expect(res.getHeader('content-length')).to.equal('11')
+
+				// Data/stream handling
+				expect(res._isEndCalled()).to.be.true
+				expect(res._getBuffer().toString()).to.equal('ello, world')
+
+				// Notify mocha that we're done.
+				done()
+			})
+
+			// Simulate file stream
+			in_stream.emit('open')
+			in_stream.put('Hello, world!')
+			in_stream.stop()
+		})
+
+		it('should send ranges crossing buffer boundaries', function (done) {
+			const res = mock_http.createResponse({})
+			const in_stream = new stream_buffers.ReadableStreamBuffer({
+				chunkSize: 3, // Setting a chunk size smaller than the range should
+				// not impact the test.
+			})
+
+			// End-of-stream callback
+			const opts = {
+				name: 'test.file',
+				type: 'application/test',
+				ranges: {
+					ranges: [[1, 11]], // Cut off first and last letter
+				},
+			}
+			ranges.send(res, in_stream, opts, function (err) {
+				expect(err).to.not.exist
+
+				// HTTP handling
+				expect(res.statusCode).to.equal(206)
+				expect(res.getHeader('content-type')).to.equal('application/test')
+				expect(res.getHeader('content-disposition')).to.equal('inline')
+				expect(res.getHeader('content-range')).to.equal('bytes 1-11/*')
+				expect(res.getHeader('content-length')).to.equal('11')
+
+				// Data/stream handling
+				expect(res._isEndCalled()).to.be.true
+				expect(res._getBuffer().toString()).to.equal('ello, world')
+
+				// Notify mocha that we're done.
+				done()
+			})
+
+			// Simulate file stream
+			in_stream.emit('open')
+			in_stream.put('Hello, world!')
+			in_stream.stop()
+		})
+
+		it('should send multiple ranges', function (done) {
+			const res = mock_http.createResponse({})
+			const in_stream = new stream_buffers.ReadableStreamBuffer({})
+
+			// End-of-stream callback
+			const opts = {
+				name: 'test.file',
+				type: 'application/test',
+				ranges: {
+					ranges: [
+						[1, 3],
+						[5, 7],
+					], // Slice two ranges out
+				},
+			}
+			ranges.send(res, in_stream, opts, function (err) {
+				expect(err).to.not.exist
+
+				// HTTP handling
+				expect(res.statusCode).to.equal(206)
+				expect(res.getHeader('content-type')).to.satisfy((str) => str.startsWith('multipart/byteranges'))
+				expect(res.getHeader('content-disposition')).to.equal('inline')
+
+				// Data/stream handling
+				expect(res._isEndCalled()).to.be.true
+
+				// The buffer should contain both ranges, but with all the That would be
+				// "ell" and ", w".
+				// It's pretty elaborate having to parse the entire multipart response
+				// body, so we'll restrict ourselves to finding lines within it.
+				const body = res._getBuffer().toString()
+				expect(body).to.contain('\r\nContent-Range: bytes 1-3/*\r\n')
+				expect(body).to.contain('\r\nell\r\n')
+				expect(body).to.contain('\r\nContent-Range: bytes 5-7/*\r\n')
+				expect(body).to.contain('\r\n, w')
+
+				// Notify mocha that we're done.
+				done()
+			})
+
+			// Simulate file stream
+			in_stream.emit('open')
+			in_stream.put('Hello, world!')
+			in_stream.stop()
+		})
+
+		it('should deal with ranges without end', function (done) {
+			const res = mock_http.createResponse({})
+			const in_stream = new stream_buffers.ReadableStreamBuffer({})
+
+			// End-of-stream callback
+			const opts = {
+				name: 'test.file',
+				type: 'application/test',
+				ranges: {
+					ranges: [[5, undefined]], // Skip the first part, but read until end
+				},
+			}
+			ranges.send(res, in_stream, opts, function (err) {
+				expect(err).to.not.exist
+
+				// HTTP handling
+				expect(res.statusCode).to.equal(206)
+				expect(res.getHeader('content-type')).to.equal('application/test')
+				expect(res.getHeader('content-disposition')).to.equal('inline')
+				expect(res.getHeader('content-range')).to.equal('bytes 5-/*')
+
+				// Data/stream handling
+				expect(res._isEndCalled()).to.be.true
+				expect(res._getBuffer().toString()).to.equal(', world!')
+
+				// Notify mocha that we're done.
+				done()
+			})
+
+			// Simulate file stream
+			in_stream.emit('open')
+			in_stream.put('Hello, world!')
+			in_stream.stop()
+		})
+
+		it('should ignore ranges without start', function (done) {
+			const res = mock_http.createResponse({})
+			const in_stream = new stream_buffers.ReadableStreamBuffer({})
+
+			// End-of-stream callback
+			const opts = {
+				name: 'test.file',
+				type: 'application/test',
+				ranges: {
+					ranges: [[undefined, 5]], // Only last five
+				},
+			}
+			ranges.send(res, in_stream, opts, function (err) {
+				expect(err).to.not.exist
+
+				// HTTP handling
+				expect(res.statusCode).to.equal(200)
+				expect(res.getHeader('content-type')).to.equal('application/test')
+				expect(res.getHeader('content-disposition')).to.equal('inline')
+
+				// Data/stream handling
+				expect(res._isEndCalled()).to.be.true
+				expect(res._getBuffer().toString()).to.equal('Hello, world!')
+
+				// Notify mocha that we're done.
+				done()
+			})
+
+			// Simulate file stream
+			in_stream.emit('open')
+			in_stream.put('Hello, world!')
+			in_stream.stop()
+		})
+	})
+})