12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309 |
- //! # Storage module
- //! Storage module for the Joystream platform. Version 2.
- // Ensure we're `no_std` when compiling for Wasm.
- #![cfg_attr(not(feature = "std"), no_std)]
- #![warn(missing_docs)]
- // TODO: Remove old Storage pallet.
- // TODO: add module comment
- // TODO: make public methods as root extrinsics to enable storage-node dev mode.
- // TODO: make public methods "weight-ready".
- // TODO: review extrinsic, parameters and error names.
- #[cfg(test)]
- mod tests;
- #[cfg(feature = "runtime-benchmarks")]
- mod benchmarking;
- mod bag_manager;
- pub(crate) mod storage_bucket_picker;
- use codec::{Codec, Decode, Encode};
- use frame_support::dispatch::{DispatchError, DispatchResult};
- use frame_support::traits::{Currency, ExistenceRequirement, Get, Randomness};
- use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure, Parameter};
- #[cfg(feature = "std")]
- use serde::{Deserialize, Serialize};
- use sp_arithmetic::traits::{BaseArithmetic, One, Zero};
- use sp_runtime::traits::{AccountIdConversion, MaybeSerialize, Member, Saturating};
- use sp_runtime::{ModuleId, SaturatedConversion};
- use sp_std::collections::btree_map::BTreeMap;
- use sp_std::collections::btree_set::BTreeSet;
- use sp_std::iter;
- use sp_std::marker::PhantomData;
- use sp_std::vec::Vec;
- use common::constraints::BoundedValueConstraint;
- use common::origin::ActorOriginValidator;
- use common::working_group::WorkingGroup;
- use bag_manager::BagManager;
- use storage_bucket_picker::StorageBucketPicker;
- /// Public interface for the storage module.
- pub trait DataObjectStorage<T: Trait> {
- /// Validates upload parameters and conditions (like global uploading block).
- /// Validates voucher usage for affected buckets.
- fn can_upload_data_objects(params: &UploadParameters<T>) -> DispatchResult;
- /// Upload new data objects.
- fn upload_data_objects(params: UploadParameters<T>) -> DispatchResult;
- /// Validates moving objects parameters.
- /// Validates voucher usage for affected buckets.
- fn can_move_data_objects(
- src_bag_id: &BagId<T>,
- dest_bag_id: &BagId<T>,
- objects: &BTreeSet<T::DataObjectId>,
- ) -> DispatchResult;
- /// Move data objects to a new bag.
- fn move_data_objects(
- src_bag_id: BagId<T>,
- dest_bag_id: BagId<T>,
- objects: BTreeSet<T::DataObjectId>,
- ) -> DispatchResult;
- /// Validates `delete_data_objects` parameters.
- /// Validates voucher usage for affected buckets.
- fn can_delete_data_objects(
- bag_id: &BagId<T>,
- objects: &BTreeSet<T::DataObjectId>,
- ) -> DispatchResult;
- /// Delete storage objects. Transfer deletion prize to the provided account.
- fn delete_data_objects(
- deletion_prize_account_id: T::AccountId,
- bag_id: BagId<T>,
- objects: BTreeSet<T::DataObjectId>,
- ) -> DispatchResult;
- /// Delete dynamic bag. Updates related storage bucket vouchers.
- fn delete_dynamic_bag(
- deletion_prize_account_id: T::AccountId,
- bag_id: DynamicBagId<T>,
- ) -> DispatchResult;
- /// Validates `delete_dynamic_bag` parameters and conditions.
- fn can_delete_dynamic_bag(bag_id: &DynamicBagId<T>) -> DispatchResult;
- /// Creates dynamic bag. BagId should provide the caller.
- fn create_dynamic_bag(bag_id: DynamicBagId<T>) -> DispatchResult;
- /// Validates `create_dynamic_bag` parameters and conditions.
- fn can_create_dynamic_bag(bag_id: &DynamicBagId<T>) -> DispatchResult;
- }
- /// Storage trait.
- pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
- /// Storage event type.
- type Event: From<Event<Self>> + Into<<Self as frame_system::Trait>::Event>;
- /// Data object ID type.
- type DataObjectId: Parameter
- + Member
- + BaseArithmetic
- + Codec
- + Default
- + Copy
- + MaybeSerialize
- + PartialEq;
- /// Storage bucket ID type.
- type StorageBucketId: Parameter
- + Member
- + BaseArithmetic
- + Codec
- + Default
- + Copy
- + MaybeSerialize
- + PartialEq;
- /// Distribution bucket ID type.
- type DistributionBucketId: Parameter
- + Member
- + BaseArithmetic
- + Codec
- + Default
- + Copy
- + MaybeSerialize
- + PartialEq;
- /// Channel ID type (part of the dynamic bag ID).
- type ChannelId: Parameter
- + Member
- + BaseArithmetic
- + Codec
- + Default
- + Copy
- + MaybeSerialize
- + PartialEq;
- /// Defines max allowed storage bucket number.
- type MaxStorageBucketNumber: Get<u64>;
- /// Defines max number of data objects per bag.
- type MaxNumberOfDataObjectsPerBag: Get<u64>;
- /// Defines a prize for a data object deletion.
- type DataObjectDeletionPrize: Get<BalanceOf<Self>>;
- /// Defines maximum size of the "hash blacklist" collection.
- type BlacklistSizeLimit: Get<u64>;
- /// The module id, used for deriving its sovereign account ID.
- type ModuleId: Get<ModuleId>;
- /// Validates member id and origin combination.
- type MemberOriginValidator: ActorOriginValidator<Self::Origin, MemberId<Self>, Self::AccountId>;
- /// "Storage buckets per bag" value constraint.
- type StorageBucketsPerBagValueConstraint: Get<StorageBucketsPerBagValueConstraint>;
- /// Defines the default dynamic bag creation policy for members.
- type DefaultMemberDynamicBagCreationPolicy: Get<DynamicBagCreationPolicy>;
- /// Defines the default dynamic bag creation policy for channels.
- type DefaultChannelDynamicBagCreationPolicy: Get<DynamicBagCreationPolicy>;
- /// Defines max random iteration number (eg.: when picking the storage buckets).
- type MaxRandomIterationNumber: Get<u64>;
- /// Something that provides randomness in the runtime.
- type Randomness: Randomness<Self::Hash>;
- /// Demand the working group leader authorization.
- /// TODO: Refactor after merging with the Olympia release.
- fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult;
- /// Validate origin for the worker.
- /// TODO: Refactor after merging with the Olympia release.
- fn ensure_worker_origin(origin: Self::Origin, worker_id: WorkerId<Self>) -> DispatchResult;
- /// Validate worker existence.
- /// TODO: Refactor after merging with the Olympia release.
- fn ensure_worker_exists(worker_id: &WorkerId<Self>) -> DispatchResult;
- }
- /// Operations with local pallet account.
- pub trait ModuleAccount<T: balances::Trait> {
- /// The module id, used for deriving its sovereign account ID.
- type ModuleId: Get<ModuleId>;
- /// The account ID of the module account.
- fn module_account_id() -> T::AccountId {
- Self::ModuleId::get().into_sub_account(Vec::<u8>::new())
- }
- /// Transfer tokens from the module account to the destination account (spends from
- /// module account).
- fn withdraw(dest_account_id: &T::AccountId, amount: BalanceOf<T>) -> DispatchResult {
- <Balances<T> as Currency<T::AccountId>>::transfer(
- &Self::module_account_id(),
- dest_account_id,
- amount,
- ExistenceRequirement::AllowDeath,
- )
- }
- /// Transfer tokens from the destination account to the module account (fills module account).
- fn deposit(src_account_id: &T::AccountId, amount: BalanceOf<T>) -> DispatchResult {
- <Balances<T> as Currency<T::AccountId>>::transfer(
- src_account_id,
- &Self::module_account_id(),
- amount,
- ExistenceRequirement::AllowDeath,
- )
- }
- /// Displays usable balance for the module account.
- fn usable_balance() -> BalanceOf<T> {
- <Balances<T>>::usable_balance(&Self::module_account_id())
- }
- }
- /// Implementation of the ModuleAccountHandler.
- pub struct ModuleAccountHandler<T: balances::Trait, ModId: Get<ModuleId>> {
- /// Phantom marker for the trait.
- trait_marker: PhantomData<T>,
- /// Phantom marker for the module id type.
- module_id_marker: PhantomData<ModId>,
- }
- impl<T: balances::Trait, ModId: Get<ModuleId>> ModuleAccount<T> for ModuleAccountHandler<T, ModId> {
- type ModuleId = ModId;
- }
- /// Holds parameter values impacting how exactly the creation of a new dynamic bag occurs,
- /// and there is one such policy for each type of dynamic bag.
- /// It describes how many storage buckets should store the bag.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
- pub struct DynamicBagCreationPolicy {
- /// The number of storage buckets which should replicate the new bag.
- pub number_of_storage_buckets: u64,
- }
- impl DynamicBagCreationPolicy {
- // Verifies non-zero number of storage buckets.
- pub(crate) fn no_storage_buckets_required(&self) -> bool {
- self.number_of_storage_buckets == 0
- }
- }
- /// "Storage buckets per bag" value constraint type.
- pub type StorageBucketsPerBagValueConstraint = BoundedValueConstraint<u64>;
- /// Local module account handler.
- pub type StorageTreasury<T> = ModuleAccountHandler<T, <T as Trait>::ModuleId>;
- /// IPFS hash type alias.
- pub type ContentId = Vec<u8>;
- // Alias for the Substrate balances pallet.
- type Balances<T> = balances::Module<T>;
- /// Alias for the member id.
- pub type MemberId<T> = <T as membership::Trait>::MemberId;
- /// Type identifier for worker role, which must be same as membership actor identifier
- pub type WorkerId<T> = <T as membership::Trait>::ActorId;
- /// Balance alias for `balances` module.
- pub type BalanceOf<T> = <T as balances::Trait>::Balance;
- /// The fundamental concept in the system, which represents single static binary object in the
- /// system. The main goal of the system is to retain an index of all such objects, including who
- /// owns them, and information about what actors are currently tasked with storing and distributing
- /// them to end users. The system is unaware of the underlying content represented by such an
- /// object, as it is used by different parts of the Joystream system.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
- pub struct DataObject<Balance> {
- /// Defines whether the data object was accepted by a liaison.
- pub accepted: bool,
- /// A reward for the data object deletion.
- pub deletion_prize: Balance,
- /// Object size in bytes.
- pub size: u64,
- }
- /// Type alias for the StaticBagObject.
- pub type StaticBag<T> = StaticBagObject<
- <T as Trait>::DataObjectId,
- <T as Trait>::StorageBucketId,
- <T as Trait>::DistributionBucketId,
- BalanceOf<T>,
- >;
- /// Static bag container.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
- pub struct StaticBagObject<
- DataObjectId: Ord,
- StorageBucketId: Ord,
- DistributionBucketId: Ord,
- Balance,
- > {
- /// Associated data objects.
- pub objects: BTreeMap<DataObjectId, DataObject<Balance>>,
- /// Associated storage buckets.
- pub stored_by: BTreeSet<StorageBucketId>,
- /// Associated distribution buckets.
- pub distributed_by: BTreeSet<DistributionBucketId>,
- }
- impl<DataObjectId: Ord, StorageBucketId: Ord, DistributionBucketId: Ord, Balance>
- StaticBagObject<DataObjectId, StorageBucketId, DistributionBucketId, Balance>
- {
- // Calculates total object size for static bag.
- pub(crate) fn objects_total_size(&self) -> u64 {
- self.objects.values().map(|obj| obj.size).sum()
- }
- // Calculates total objects number for static bag.
- pub(crate) fn objects_number(&self) -> u64 {
- self.objects.len().saturated_into()
- }
- }
- /// Parameters for the data object creation.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug)]
- pub struct DataObjectCreationParameters {
- /// Object size in bytes.
- pub size: u64,
- /// Content identifier presented as IPFS hash.
- pub ipfs_content_id: Vec<u8>,
- }
- /// Type alias for the BagIdType.
- pub type BagId<T> = BagIdType<MemberId<T>, <T as Trait>::ChannelId>;
- /// Identifier for a bag.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug, PartialOrd, Ord)]
- pub enum BagIdType<MemberId, ChannelId> {
- /// Static bag type.
- StaticBag(StaticBagId),
- /// Dynamic bag type.
- DynamicBag(DynamicBagIdType<MemberId, ChannelId>),
- }
- impl<MemberId, ChannelId> Default for BagIdType<MemberId, ChannelId> {
- fn default() -> Self {
- Self::StaticBag(Default::default())
- }
- }
- /// Define dynamic bag types.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug, PartialOrd, Ord, Copy)]
- pub enum DynamicBagType {
- /// Member dynamic bag type.
- Member,
- /// Channel dynamic bag type.
- Channel,
- }
- impl Default for DynamicBagType {
- fn default() -> Self {
- Self::Member
- }
- }
- /// A type for static bags ID.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug, PartialOrd, Ord)]
- pub enum StaticBagId {
- /// Dedicated bag for a council.
- Council,
- /// Dedicated bag for some working group.
- WorkingGroup(WorkingGroup),
- }
- impl Default for StaticBagId {
- fn default() -> Self {
- Self::Council
- }
- }
- /// Type alias for the DynamicBagIdType.
- pub type DynamicBagId<T> = DynamicBagIdType<MemberId<T>, <T as Trait>::ChannelId>;
- /// A type for dynamic bags ID.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug, PartialOrd, Ord)]
- pub enum DynamicBagIdType<MemberId, ChannelId> {
- /// Dynamic bag assigned to a member.
- Member(MemberId),
- /// Dynamic bag assigned to media channel.
- Channel(ChannelId),
- }
- impl<MemberId: Default, ChannelId> Default for DynamicBagIdType<MemberId, ChannelId> {
- fn default() -> Self {
- Self::Member(Default::default())
- }
- }
- #[allow(clippy::from_over_into)] // Cannot implement From using these types.
- impl<MemberId: Default, ChannelId> Into<DynamicBagType> for DynamicBagIdType<MemberId, ChannelId> {
- fn into(self) -> DynamicBagType {
- match self {
- DynamicBagIdType::Member(_) => DynamicBagType::Member,
- DynamicBagIdType::Channel(_) => DynamicBagType::Channel,
- }
- }
- }
- /// Alias for the UploadParametersObject
- pub type UploadParameters<T> = UploadParametersObject<
- MemberId<T>,
- <T as Trait>::ChannelId,
- <T as frame_system::Trait>::AccountId,
- >;
- /// Data wrapper structure. Helps passing the parameters to the `upload` extrinsic.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
- pub struct UploadParametersObject<MemberId, ChannelId, AccountId> {
- /// Public key used authentication in upload to liaison.
- pub authentication_key: Vec<u8>,
- /// Static or dynamic bag to upload data.
- pub bag_id: BagIdType<MemberId, ChannelId>,
- /// Data object parameters.
- pub object_creation_list: Vec<DataObjectCreationParameters>,
- /// Account for the data object deletion prize.
- pub deletion_prize_source_account_id: AccountId,
- }
- /// Defines storage bucket parameters.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
- pub struct Voucher {
- /// Total size limit.
- pub size_limit: u64,
- /// Object number limit.
- pub objects_limit: u64,
- /// Current size.
- pub size_used: u64,
- /// Current object number.
- pub objects_used: u64,
- }
- // Defines whether we should increase or decrease parameters during some operation.
- #[derive(Clone, PartialEq, Eq, Debug, Copy)]
- enum OperationType {
- // Increase parameters.
- Increase,
- // Decrease parameters.
- Decrease,
- }
- // Helper-struct - defines voucher changes.
- #[derive(Clone, PartialEq, Eq, Debug, Copy, Default)]
- struct VoucherUpdate {
- /// Total number.
- pub objects_number: u64,
- /// Total objects size sum.
- pub objects_total_size: u64,
- }
- impl VoucherUpdate {
- fn get_updated_voucher(&self, voucher: &Voucher, voucher_operation: OperationType) -> Voucher {
- let (objects_used, size_used) = match voucher_operation {
- OperationType::Increase => (
- voucher.objects_used.saturating_add(self.objects_number),
- voucher.size_used.saturating_add(self.objects_total_size),
- ),
- OperationType::Decrease => (
- voucher.objects_used.saturating_sub(self.objects_number),
- voucher.size_used.saturating_sub(self.objects_total_size),
- ),
- };
- Voucher {
- objects_used,
- size_used,
- ..voucher.clone()
- }
- }
- // Adds a single object data to the voucher update (updates objects size and number).
- fn add_object(&mut self, size: u64) {
- self.objects_number = self.objects_number.saturating_add(1);
- self.objects_total_size = self.objects_total_size.saturating_add(size);
- }
- }
- /// Defines the storage bucket connection to the storage operator (storage WG worker).
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug)]
- pub enum StorageBucketOperatorStatus<WorkerId> {
- /// No connection.
- Missing,
- /// Storage operator was invited.
- InvitedStorageWorker(WorkerId),
- /// Storage operator accepted the invitation.
- StorageWorker(WorkerId),
- }
- impl<WorkerId> Default for StorageBucketOperatorStatus<WorkerId> {
- fn default() -> Self {
- Self::Missing
- }
- }
- /// A commitment to hold some set of bags for long term storage. A bucket may have a bucket
- /// operator, which is a single worker in the storage working group.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
- pub struct StorageBucket<WorkerId> {
- /// Current storage operator status.
- pub operator_status: StorageBucketOperatorStatus<WorkerId>,
- /// Defines whether the bucket accepts new bags.
- pub accepting_new_bags: bool,
- /// Defines limits for a bucket.
- pub voucher: Voucher,
- /// Defines storage bucket medata (like current storage provider URL).
- pub metadata: Vec<u8>,
- }
- // Helper-struct for the data object uploading.
- #[derive(Default, Clone, Debug)]
- struct DataObjectCandidates<T: Trait> {
- // next data object ID to be saved in the storage.
- next_data_object_id: T::DataObjectId,
- // 'ID-data object' map.
- data_objects_map: BTreeMap<T::DataObjectId, DataObject<BalanceOf<T>>>,
- }
- /// Type alias for the DynamicBagObject.
- pub type DynamicBag<T> = DynamicBagObject<
- <T as Trait>::DataObjectId,
- <T as Trait>::StorageBucketId,
- <T as Trait>::DistributionBucketId,
- BalanceOf<T>,
- >;
- /// Dynamic bag container.
- #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
- #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
- pub struct DynamicBagObject<
- DataObjectId: Ord,
- StorageBucketId: Ord,
- DistributionBucketId: Ord,
- Balance,
- > {
- /// Associated data objects.
- pub objects: BTreeMap<DataObjectId, DataObject<Balance>>,
- /// Associated storage buckets.
- pub stored_by: BTreeSet<StorageBucketId>,
- /// Associated distribution buckets.
- pub distributed_by: BTreeSet<DistributionBucketId>,
- /// Dynamic bag deletion prize.
- pub deletion_prize: Balance,
- }
- impl<DataObjectId: Ord, StorageBucketId: Ord, DistributionBucketId: Ord, Balance>
- DynamicBagObject<DataObjectId, StorageBucketId, DistributionBucketId, Balance>
- {
- // Calculates total object size for dynamic bag.
- pub(crate) fn objects_total_size(&self) -> u64 {
- self.objects.values().map(|obj| obj.size).sum()
- }
- // Calculates total objects number for dynamic bag.
- pub(crate) fn objects_number(&self) -> u64 {
- self.objects.len().saturated_into()
- }
- }
- // Helper struct for the dynamic bag changing.
- #[derive(Clone, PartialEq, Eq, Debug, Copy, Default)]
- struct BagChangeInfo<Balance> {
- // Voucher update for data objects
- voucher_update: VoucherUpdate,
- // Total deletion prize for data objects.
- total_deletion_prize: Balance,
- }
- impl<Balance: Saturating + Copy> BagChangeInfo<Balance> {
- // Adds a single object data to the voucher update (updates objects size, number)
- // and deletion prize.
- fn add_object(&mut self, size: u64, deletion_prize: Balance) {
- self.voucher_update.add_object(size);
- self.total_deletion_prize = self.total_deletion_prize.saturating_add(deletion_prize);
- }
- }
- decl_storage! {
- trait Store for Module<T: Trait> as Storage {
- /// Defines whether all new uploads blocked
- pub UploadingBlocked get(fn uploading_blocked): bool;
- /// Working groups' and council's bags storage map.
- pub StaticBags get(fn static_bag): map hasher(blake2_128_concat)
- StaticBagId => StaticBag<T>;
- /// Dynamic bag storage map.
- pub DynamicBags get (fn dynamic_bag): map hasher(blake2_128_concat)
- DynamicBagId<T> => DynamicBag<T>;
- /// Storage bucket id counter. Starts at zero.
- pub NextStorageBucketId get(fn next_storage_bucket_id): T::StorageBucketId;
- /// Data object id counter. Starts at zero.
- pub NextDataObjectId get(fn next_data_object_id): T::DataObjectId;
- /// Total number of the storage buckets in the system.
- pub StorageBucketsNumber get(fn storage_buckets_number): u64;
- /// Storage buckets.
- pub StorageBucketById get (fn storage_bucket_by_id): map hasher(blake2_128_concat)
- T::StorageBucketId => StorageBucket<WorkerId<T>>;
- /// Blacklisted data object hashes.
- pub Blacklist get (fn blacklist): map hasher(blake2_128_concat) ContentId => ();
- /// Blacklist collection counter.
- pub CurrentBlacklistSize get (fn current_blacklist_size): u64;
- /// Size based pricing of new objects uploaded.
- pub DataObjectPerMegabyteFee get (fn data_object_per_mega_byte_fee): BalanceOf<T>;
- /// "Storage buckets per bag" number limit.
- pub StorageBucketsPerBagLimit get (fn storage_buckets_per_bag_limit): u64;
- /// "Max objects size for a storage bucket voucher" number limit.
- pub VoucherMaxObjectsSizeLimit get (fn voucher_max_objects_size_limit): u64;
- /// "Max objects number for a storage bucket voucher" number limit.
- pub VoucherMaxObjectsNumberLimit get (fn voucher_max_objects_number_limit): u64;
- /// DynamicBagCreationPolicy by bag type storage map.
- pub DynamicBagCreationPolicies get (fn dynamic_bag_creation_policy):
- map hasher(blake2_128_concat) DynamicBagType => DynamicBagCreationPolicy;
- }
- }
- decl_event! {
- /// Storage events
- pub enum Event<T>
- where
- <T as Trait>::StorageBucketId,
- WorkerId = WorkerId<T>,
- <T as Trait>::DataObjectId,
- UploadParameters = UploadParameters<T>,
- BagId = BagId<T>,
- DynamicBagId = DynamicBagId<T>,
- <T as frame_system::Trait>::AccountId,
- Balance = BalanceOf<T>,
- {
- /// Emits on creating the storage bucket.
- /// Params
- /// - storage bucket ID
- /// - invited worker
- /// - flag "accepting_new_bags"
- /// - size limit for voucher,
- /// - objects limit for voucher,
- StorageBucketCreated(StorageBucketId, Option<WorkerId>, bool, u64, u64),
- /// Emits on accepting the storage bucket invitation.
- /// Params
- /// - storage bucket ID
- /// - invited worker ID
- StorageBucketInvitationAccepted(StorageBucketId, WorkerId),
- /// Emits on updating storage buckets for bag.
- /// Params
- /// - bag ID
- /// - storage buckets to add ID collection
- /// - storage buckets to remove ID collection
- StorageBucketsUpdatedForBag(BagId, BTreeSet<StorageBucketId>, BTreeSet<StorageBucketId>),
- /// Emits on uploading data objects.
- /// Params
- /// - data objects IDs
- /// - initial uploading parameters
- DataObjectdUploaded(Vec<DataObjectId>, UploadParameters),
- /// Emits on setting the storage operator metadata.
- /// Params
- /// - storage bucket ID
- /// - invited worker ID
- /// - metadata
- StorageOperatorMetadataSet(StorageBucketId, WorkerId, Vec<u8>),
- /// Emits on setting the storage bucket voucher limits.
- /// Params
- /// - storage bucket ID
- /// - invited worker ID
- /// - new total objects size limit
- /// - new total objects number limit
- StorageBucketVoucherLimitsSet(StorageBucketId, WorkerId, u64, u64),
- /// Emits on accepting pending data objects.
- /// Params
- /// - storage bucket ID
- /// - worker ID (storage provider ID)
- /// - bag ID
- /// - pending data objects
- PendingDataObjectsAccepted(StorageBucketId, WorkerId, BagId, BTreeSet<DataObjectId>),
- /// Emits on cancelling the storage bucket invitation.
- /// Params
- /// - storage bucket ID
- StorageBucketInvitationCancelled(StorageBucketId),
- /// Emits on the storage bucket operator invitation.
- /// Params
- /// - storage bucket ID
- /// - operator worker ID (storage provider ID)
- StorageBucketOperatorInvited(StorageBucketId, WorkerId),
- /// Emits on the storage bucket operator removal.
- /// Params
- /// - storage bucket ID
- StorageBucketOperatorRemoved(StorageBucketId),
- /// Emits on changing the size-based pricing of new objects uploaded.
- /// Params
- /// - new status
- UploadingBlockStatusUpdated(bool),
- /// Emits on changing the size-based pricing of new objects uploaded.
- /// Params
- /// - new data size fee
- DataObjectPerMegabyteFeeUpdated(Balance),
- /// Emits on changing the "Storage buckets per bag" number limit.
- /// Params
- /// - new limit
- StorageBucketsPerBagLimitUpdated(u64),
- /// Emits on changing the "Storage buckets voucher max limits".
- /// Params
- /// - new objects size limit
- /// - new objects number limit
- StorageBucketsVoucherMaxLimitsUpdated(u64, u64),
- /// Emits on moving data objects between bags.
- /// Params
- /// - source bag ID
- /// - destination bag ID
- /// - data object IDs
- DataObjectsMoved(BagId, BagId, BTreeSet<DataObjectId>),
- /// Emits on data objects deletion from bags.
- /// Params
- /// - account ID for the deletion prize
- /// - bag ID
- /// - data object IDs
- DataObjectsDeleted(AccountId, BagId, BTreeSet<DataObjectId>),
- /// Emits on storage bucket status update.
- /// Params
- /// - storage bucket ID
- /// - worker ID (storage provider ID)
- /// - new status
- StorageBucketStatusUpdated(StorageBucketId, WorkerId, bool),
- /// Emits on updating the blacklist with data hashes.
- /// Params
- /// - hashes to remove from the blacklist
- /// - hashes to add to the blacklist
- UpdateBlacklist(BTreeSet<ContentId>, BTreeSet<ContentId>),
- /// Emits on deleting a dynamic bag.
- /// Params
- /// - account ID for the deletion prize
- /// - dynamic bag ID
- DynamicBagDeleted(AccountId, DynamicBagId),
- /// Emits on creating a dynamic bag.
- /// Params
- /// - dynamic bag ID
- DynamicBagCreated(DynamicBagId),
- /// Emits on changing the deletion prize for a dynamic bag.
- /// Params
- /// - dynamic bag ID
- /// - new deletion prize
- DeletionPrizeChanged(DynamicBagId, Balance),
- /// Emits on changing the voucher for a storage bucket.
- /// Params
- /// - storage bucket ID
- /// - new voucher
- VoucherChanged(StorageBucketId, Voucher),
- /// Emits on storage bucket deleting.
- /// Params
- /// - storage bucket ID
- StorageBucketDeleted(StorageBucketId),
- /// Emits on updating the number of storage buckets in dynamic bag creation policy.
- /// Params
- /// - dynamic bag type
- /// - new number of storage buckets
- NumberOfStorageBucketsInDynamicBagCreationPolicyUpdated(DynamicBagType, u64),
- }
- }
- decl_error! {
- /// Storage module predefined errors
- pub enum Error for Module<T: Trait>{
- /// Max storage bucket number limit exceeded.
- MaxStorageBucketNumberLimitExceeded,
- /// Empty "data object creation" collection.
- NoObjectsOnUpload,
- /// The requested storage bucket doesn't exist.
- StorageBucketDoesntExist,
- /// The requested storage bucket is not bound to a bag.
- StorageBucketIsNotBoundToBag,
- /// The requested storage bucket is already bound to a bag.
- StorageBucketIsBoundToBag,
- /// Invalid operation with invites: there is no storage bucket invitation.
- NoStorageBucketInvitation,
- /// Invalid operation with invites: storage provider was already set.
- StorageProviderAlreadySet,
- /// Storage provider must be set.
- StorageProviderMustBeSet,
- /// Invalid operation with invites: another storage provider was invited.
- DifferentStorageProviderInvited,
- /// Invalid operation with invites: storage provider was already invited.
- InvitedStorageProvider,
- /// Storage bucket id collections are empty.
- StorageBucketIdCollectionsAreEmpty,
- /// Upload data error: empty content ID provided.
- EmptyContentId,
- /// Upload data error: zero object size.
- ZeroObjectSize,
- /// Upload data error: invalid deletion prize source account.
- InvalidDeletionPrizeSourceAccount,
- /// Upload data error: data objects per bag limit exceeded.
- DataObjectsPerBagLimitExceeded,
- /// Invalid storage provider for bucket.
- InvalidStorageProvider,
- /// Insufficient balance for an operation.
- InsufficientBalance,
- /// Data object doesn't exist.
- DataObjectDoesntExist,
- /// Uploading of the new object is blocked.
- UploadingBlocked,
- /// Data object id collection is empty.
- DataObjectIdCollectionIsEmpty,
- /// Cannot move objects within the same bag.
- SourceAndDestinationBagsAreEqual,
- /// Data object hash is part of the blacklist.
- DataObjectBlacklisted,
- /// Blacklist size limit exceeded.
- BlacklistSizeLimitExceeded,
- /// Max object size limit exceeded for voucher.
- VoucherMaxObjectSizeLimitExceeded,
- /// Max object number limit exceeded for voucher.
- VoucherMaxObjectNumberLimitExceeded,
- /// Object number limit for the storage bucket reached.
- StorageBucketObjectNumberLimitReached,
- /// Objects total size limit for the storage bucket reached.
- StorageBucketObjectSizeLimitReached,
- /// Insufficient module treasury balance for an operation.
- InsufficientTreasuryBalance,
- /// Cannot delete a non-empty storage bucket.
- CannotDeleteNonEmptyStorageBucket,
- /// The `data_object_ids` extrinsic parameter collection is empty.
- DataObjectIdParamsAreEmpty,
- /// The new `StorageBucketsPerBagLimit` number is too low.
- StorageBucketsPerBagLimitTooLow,
- /// The new `StorageBucketsPerBagLimit` number is too high.
- StorageBucketsPerBagLimitTooHigh,
- /// `StorageBucketsPerBagLimit` was exceeded for a bag.
- StorageBucketPerBagLimitExceeded,
- /// The storage bucket doesn't accept new bags.
- StorageBucketDoesntAcceptNewBags,
- /// Cannot create the dynamic bag: dynamic bag exists.
- DynamicBagExists,
- /// Dynamic bag doesn't exist.
- DynamicBagDoesntExist,
- /// Storage provider operator doesn't exist.
- StorageProviderOperatorDoesntExist,
- }
- }
- decl_module! {
- /// _Storage_ substrate module.
- pub struct Module<T: Trait> for enum Call where origin: T::Origin {
- /// Default deposit_event() handler
- fn deposit_event() = default;
- /// Predefined errors.
- type Error = Error<T>;
- /// Exports const - max allowed storage bucket number.
- const MaxStorageBucketNumber: u64 = T::MaxStorageBucketNumber::get();
- /// Exports const - max number of data objects per bag.
- const MaxNumberOfDataObjectsPerBag: u64 = T::MaxNumberOfDataObjectsPerBag::get();
- /// Exports const - a prize for a data object deletion.
- const DataObjectDeletionPrize: BalanceOf<T> = T::DataObjectDeletionPrize::get();
- /// Exports const - maximum size of the "hash blacklist" collection.
- const BlacklistSizeLimit: u64 = T::BlacklistSizeLimit::get();
- /// Exports const - "Storage buckets per bag" value constraint.
- const StorageBucketsPerBagValueConstraint: StorageBucketsPerBagValueConstraint =
- T::StorageBucketsPerBagValueConstraint::get();
- /// Exports const - the default dynamic bag creation policy for members.
- const DefaultMemberDynamicBagCreationPolicy: DynamicBagCreationPolicy =
- T::DefaultMemberDynamicBagCreationPolicy::get();
- /// Exports const - the default dynamic bag creation policy for channels.
- const DefaultChannelDynamicBagCreationPolicy: DynamicBagCreationPolicy =
- T::DefaultChannelDynamicBagCreationPolicy::get();
- // ===== Storage Lead actions =====
- /// Delete storage bucket. Must be empty. Storage operator must be missing.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn delete_storage_bucket(
- origin,
- storage_bucket_id: T::StorageBucketId,
- ){
- T::ensure_working_group_leader_origin(origin)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_missing_invitation_status(&bucket)?;
- ensure!(
- bucket.voucher.objects_used == 0,
- Error::<T>::CannotDeleteNonEmptyStorageBucket
- );
- //
- // == MUTATION SAFE ==
- //
- <StorageBucketById<T>>::remove(storage_bucket_id);
- Self::deposit_event(
- RawEvent::StorageBucketDeleted(storage_bucket_id)
- );
- }
- /// Update whether uploading is globally blocked.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn update_uploading_blocked_status(origin, new_status: bool) {
- T::ensure_working_group_leader_origin(origin)?;
- //
- // == MUTATION SAFE ==
- //
- UploadingBlocked::put(new_status);
- Self::deposit_event(RawEvent::UploadingBlockStatusUpdated(new_status));
- }
- /// Updates size-based pricing of new objects uploaded.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn update_data_size_fee(origin, new_data_size_fee: BalanceOf<T>) {
- T::ensure_working_group_leader_origin(origin)?;
- //
- // == MUTATION SAFE ==
- //
- DataObjectPerMegabyteFee::<T>::put(new_data_size_fee);
- Self::deposit_event(RawEvent::DataObjectPerMegabyteFeeUpdated(new_data_size_fee));
- }
- /// Updates "Storage buckets per bag" number limit.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn update_storage_buckets_per_bag_limit(origin, new_limit: u64) {
- T::ensure_working_group_leader_origin(origin)?;
- T::StorageBucketsPerBagValueConstraint::get().ensure_valid(
- new_limit,
- Error::<T>::StorageBucketsPerBagLimitTooLow,
- Error::<T>::StorageBucketsPerBagLimitTooHigh,
- )?;
- //
- // == MUTATION SAFE ==
- //
- StorageBucketsPerBagLimit::put(new_limit);
- Self::deposit_event(RawEvent::StorageBucketsPerBagLimitUpdated(new_limit));
- }
- /// Updates "Storage buckets voucher max limits".
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn update_storage_buckets_voucher_max_limits(
- origin,
- new_objects_size: u64,
- new_objects_number: u64,
- ) {
- T::ensure_working_group_leader_origin(origin)?;
- //
- // == MUTATION SAFE ==
- //
- VoucherMaxObjectsSizeLimit::put(new_objects_size);
- VoucherMaxObjectsNumberLimit::put(new_objects_number);
- Self::deposit_event(
- RawEvent::StorageBucketsVoucherMaxLimitsUpdated(new_objects_size, new_objects_number)
- );
- }
- /// Update number of storage buckets used in given dynamic bag creation policy.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn update_number_of_storage_buckets_in_dynamic_bag_creation_policy(
- origin,
- dynamic_bag_type: DynamicBagType,
- number_of_storage_buckets: u64,
- ) {
- T::ensure_working_group_leader_origin(origin)?;
- //
- // == MUTATION SAFE ==
- //
- let mut creation_policy = Self::get_dynamic_bag_creation_policy(dynamic_bag_type);
- creation_policy.number_of_storage_buckets = number_of_storage_buckets;
- DynamicBagCreationPolicies::insert(dynamic_bag_type, creation_policy);
- Self::deposit_event(
- RawEvent::NumberOfStorageBucketsInDynamicBagCreationPolicyUpdated(
- dynamic_bag_type,
- number_of_storage_buckets
- )
- );
- }
- /// Add and remove hashes to the current blacklist.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn update_blacklist(
- origin,
- remove_hashes: BTreeSet<ContentId>,
- add_hashes: BTreeSet<ContentId>
- ){
- T::ensure_working_group_leader_origin(origin)?;
- // Get only hashes that exist in the blacklist.
- let verified_remove_hashes = Self::get_existing_hashes(&remove_hashes);
- // Get only hashes that doesn't exist in the blacklist.
- let verified_add_hashes = Self::get_nonexisting_hashes(&add_hashes);
- let updated_blacklist_size: u64 = Self::current_blacklist_size()
- .saturating_add(verified_add_hashes.len().saturated_into::<u64>())
- .saturating_sub(verified_remove_hashes.len().saturated_into::<u64>());
- ensure!(
- updated_blacklist_size <= T::BlacklistSizeLimit::get(),
- Error::<T>::BlacklistSizeLimitExceeded
- );
- //
- // == MUTATION SAFE ==
- //
- for cid in verified_remove_hashes.iter() {
- Blacklist::remove(cid);
- }
- for cid in verified_add_hashes.iter() {
- Blacklist::insert(cid, ());
- }
- CurrentBlacklistSize::put(updated_blacklist_size);
- Self::deposit_event(RawEvent::UpdateBlacklist(remove_hashes, add_hashes));
- }
- /// Create storage bucket.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn create_storage_bucket(
- origin,
- invite_worker: Option<WorkerId<T>>,
- accepting_new_bags: bool,
- size_limit: u64,
- objects_limit: u64,
- ) {
- T::ensure_working_group_leader_origin(origin)?;
- let voucher = Voucher {
- size_limit,
- objects_limit,
- ..Default::default()
- };
- Self::can_create_storage_bucket(&voucher, &invite_worker)?;
- //
- // == MUTATION SAFE ==
- //
- let operator_status = invite_worker
- .map(StorageBucketOperatorStatus::InvitedStorageWorker)
- .unwrap_or(StorageBucketOperatorStatus::Missing);
- let storage_bucket = StorageBucket {
- operator_status,
- accepting_new_bags,
- voucher,
- metadata: Vec::new(),
- };
- let storage_bucket_id = Self::next_storage_bucket_id();
- StorageBucketsNumber::put(Self::storage_buckets_number() + 1);
- <NextStorageBucketId<T>>::put(storage_bucket_id + One::one());
- <StorageBucketById<T>>::insert(storage_bucket_id, storage_bucket);
- Self::deposit_event(
- RawEvent::StorageBucketCreated(
- storage_bucket_id,
- invite_worker,
- accepting_new_bags,
- size_limit,
- objects_limit,
- )
- );
- }
- /// Updates storage buckets for a bag..
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn update_storage_buckets_for_bag(
- origin,
- bag_id: BagId<T>,
- add_buckets: BTreeSet<T::StorageBucketId>,
- remove_buckets: BTreeSet<T::StorageBucketId>,
- ) {
- T::ensure_working_group_leader_origin(origin)?;
- let voucher_update = Self::validate_update_storage_buckets_for_bag_params(
- &bag_id,
- &add_buckets,
- &remove_buckets,
- )?;
- //
- // == MUTATION SAFE ==
- //
- // Update vouchers.
- if !add_buckets.is_empty() {
- BagManager::<T>::add_storage_buckets(&bag_id, add_buckets.clone());
- Self::change_storage_buckets_vouchers(
- &add_buckets,
- &voucher_update,
- OperationType::Increase
- );
- }
- if !remove_buckets.is_empty() {
- BagManager::<T>::remove_storage_buckets(&bag_id, remove_buckets.clone());
- Self::change_storage_buckets_vouchers(
- &remove_buckets,
- &voucher_update,
- OperationType::Decrease
- );
- }
- Self::deposit_event(
- RawEvent::StorageBucketsUpdatedForBag(bag_id, add_buckets, remove_buckets)
- );
- }
- /// Cancel pending storage bucket invite. An invitation must be pending.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn cancel_storage_bucket_operator_invite(origin, storage_bucket_id: T::StorageBucketId){
- T::ensure_working_group_leader_origin(origin)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_pending_invitation_status(&bucket)?;
- //
- // == MUTATION SAFE ==
- //
- <StorageBucketById<T>>::mutate(storage_bucket_id, |bucket| {
- bucket.operator_status = StorageBucketOperatorStatus::Missing;
- });
- Self::deposit_event(
- RawEvent::StorageBucketInvitationCancelled(storage_bucket_id)
- );
- }
- /// Invite storage bucket operator. Must be missing.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn invite_storage_bucket_operator(
- origin,
- storage_bucket_id: T::StorageBucketId,
- operator_id: WorkerId<T>,
- ){
- T::ensure_working_group_leader_origin(origin)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_missing_invitation_status(&bucket)?;
- Self::ensure_storage_provider_operator_exists(&operator_id)?;
- //
- // == MUTATION SAFE ==
- //
- <StorageBucketById<T>>::mutate(storage_bucket_id, |bucket| {
- bucket.operator_status =
- StorageBucketOperatorStatus::InvitedStorageWorker(operator_id);
- });
- Self::deposit_event(
- RawEvent::StorageBucketOperatorInvited(storage_bucket_id, operator_id)
- );
- }
- /// Removes storage bucket operator. Must be invited.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn remove_storage_bucket_operator(
- origin,
- storage_bucket_id: T::StorageBucketId,
- ){
- T::ensure_working_group_leader_origin(origin)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_storage_provider_invitation_status_for_removal(&bucket)?;
- //
- // == MUTATION SAFE ==
- //
- <StorageBucketById<T>>::mutate(storage_bucket_id, |bucket| {
- bucket.operator_status =
- StorageBucketOperatorStatus::Missing;
- });
- Self::deposit_event(
- RawEvent::StorageBucketOperatorRemoved(storage_bucket_id)
- );
- }
- // ===== Storage Operator actions =====
- /// Accept the storage bucket invitation. An invitation must match the worker_id parameter.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn accept_storage_bucket_invitation(
- origin,
- worker_id: WorkerId<T>,
- storage_bucket_id: T::StorageBucketId
- ) {
- T::ensure_worker_origin(origin, worker_id)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_storage_provider_invitation_status(&bucket, worker_id)?;
- //
- // == MUTATION SAFE ==
- //
- <StorageBucketById<T>>::mutate(storage_bucket_id, |bucket| {
- bucket.operator_status = StorageBucketOperatorStatus::StorageWorker(worker_id);
- });
- Self::deposit_event(
- RawEvent::StorageBucketInvitationAccepted(storage_bucket_id, worker_id)
- );
- }
- /// Sets storage operator metadata (eg.: storage node URL).
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn set_storage_operator_metadata(
- origin,
- worker_id: WorkerId<T>,
- storage_bucket_id: T::StorageBucketId,
- metadata: Vec<u8>
- ) {
- T::ensure_worker_origin(origin, worker_id)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_invitation_accepted(&bucket, worker_id)?;
- //
- // == MUTATION SAFE ==
- //
- <StorageBucketById<T>>::mutate(storage_bucket_id, |bucket| {
- bucket.metadata = metadata.clone();
- });
- Self::deposit_event(
- RawEvent::StorageOperatorMetadataSet(storage_bucket_id, worker_id, metadata)
- );
- }
- /// Sets storage bucket voucher limits.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn set_storage_bucket_voucher_limits(
- origin,
- worker_id: WorkerId<T>,
- storage_bucket_id: T::StorageBucketId,
- new_objects_size_limit: u64,
- new_objects_number_limit: u64,
- ) {
- T::ensure_worker_origin(origin, worker_id)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_invitation_accepted(&bucket, worker_id)?;
- ensure!(
- new_objects_size_limit <= Self::voucher_max_objects_size_limit(),
- Error::<T>::VoucherMaxObjectSizeLimitExceeded
- );
- ensure!(
- new_objects_number_limit <= Self::voucher_max_objects_number_limit(),
- Error::<T>::VoucherMaxObjectNumberLimitExceeded
- );
- //
- // == MUTATION SAFE ==
- //
- <StorageBucketById<T>>::mutate(storage_bucket_id, |bucket| {
- bucket.voucher = Voucher{
- size_limit: new_objects_size_limit,
- objects_limit: new_objects_number_limit,
- ..bucket.voucher
- };
- });
- Self::deposit_event(
- RawEvent::StorageBucketVoucherLimitsSet(
- storage_bucket_id,
- worker_id,
- new_objects_size_limit,
- new_objects_number_limit
- )
- );
- }
- /// A storage provider signals that the data object was successfully uploaded to its storage.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn accept_pending_data_objects(
- origin,
- worker_id: WorkerId<T>,
- storage_bucket_id: T::StorageBucketId,
- bag_id: BagId<T>,
- data_objects: BTreeSet<T::DataObjectId>,
- ) {
- T::ensure_worker_origin(origin, worker_id)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_invitation_accepted(&bucket, worker_id)?;
- Self::validate_accept_pending_data_objects_params(
- &bag_id,
- &data_objects,
- &storage_bucket_id
- )?;
- //
- // == MUTATION SAFE ==
- //
- for data_object_id in data_objects.iter() {
- BagManager::<T>::accept_data_objects(&bag_id, &data_object_id);
- }
- Self::deposit_event(
- RawEvent::PendingDataObjectsAccepted(storage_bucket_id, worker_id, bag_id, data_objects)
- );
- }
- /// Update whether new bags are being accepted for storage.
- #[weight = 10_000_000] // TODO: adjust weight
- pub fn update_storage_bucket_status(
- origin,
- worker_id: WorkerId<T>,
- storage_bucket_id: T::StorageBucketId,
- accepting_new_bags: bool
- ) {
- T::ensure_worker_origin(origin, worker_id)?;
- let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
- Self::ensure_bucket_invitation_accepted(&bucket, worker_id)?;
- //
- // == MUTATION SAFE ==
- //
- <StorageBucketById<T>>::mutate(storage_bucket_id, |bucket| {
- bucket.accepting_new_bags = accepting_new_bags;
- });
- Self::deposit_event(
- RawEvent::StorageBucketStatusUpdated(
- storage_bucket_id,
- worker_id,
- accepting_new_bags
- )
- );
- }
- }
- }
- // Public methods
- impl<T: Trait> DataObjectStorage<T> for Module<T> {
- fn can_upload_data_objects(params: &UploadParameters<T>) -> DispatchResult {
- Self::validate_upload_data_objects_parameters(params).map(|_| ())
- }
- fn upload_data_objects(params: UploadParameters<T>) -> DispatchResult {
- let bag_change = Self::validate_upload_data_objects_parameters(¶ms)?;
- //
- // == MUTATION SAFE ==
- //
- let data = Self::create_data_objects(params.object_creation_list.clone());
- <StorageTreasury<T>>::deposit(
- ¶ms.deletion_prize_source_account_id,
- bag_change.total_deletion_prize,
- )?;
- Self::slash_data_size_fee(
- ¶ms.deletion_prize_source_account_id,
- bag_change.voucher_update.objects_total_size,
- );
- <NextDataObjectId<T>>::put(data.next_data_object_id);
- BagManager::<T>::append_data_objects(¶ms.bag_id, &data.data_objects_map);
- let operation_type = OperationType::Increase;
- // Add a deletion prize for the dynamic bag only.
- Self::change_deletion_prize_for_bag(
- ¶ms.bag_id,
- bag_change.total_deletion_prize,
- operation_type,
- );
- Self::change_storage_bucket_vouchers_for_bag(
- ¶ms.bag_id,
- &bag_change.voucher_update,
- operation_type,
- );
- Self::deposit_event(RawEvent::DataObjectdUploaded(
- data.data_objects_map.keys().cloned().collect(),
- params,
- ));
- Ok(())
- }
- fn can_move_data_objects(
- src_bag_id: &BagId<T>,
- dest_bag_id: &BagId<T>,
- objects: &BTreeSet<<T as Trait>::DataObjectId>,
- ) -> DispatchResult {
- Self::validate_data_objects_on_moving(src_bag_id, dest_bag_id, objects).map(|_| ())
- }
- fn move_data_objects(
- src_bag_id: BagId<T>,
- dest_bag_id: BagId<T>,
- objects: BTreeSet<T::DataObjectId>,
- ) -> DispatchResult {
- let bag_change =
- Self::validate_data_objects_on_moving(&src_bag_id, &dest_bag_id, &objects)?;
- //
- // == MUTATION SAFE ==
- //
- BagManager::<T>::move_data_objects(&src_bag_id, &dest_bag_id, &objects);
- // Change source bag.
- let src_operation_type = OperationType::Decrease;
- Self::change_storage_bucket_vouchers_for_bag(
- &src_bag_id,
- &bag_change.voucher_update,
- src_operation_type,
- );
- Self::change_deletion_prize_for_bag(
- &src_bag_id,
- bag_change.total_deletion_prize,
- src_operation_type,
- );
- // Change destination bag.
- let dest_operation_type = OperationType::Increase;
- Self::change_storage_bucket_vouchers_for_bag(
- &dest_bag_id,
- &bag_change.voucher_update,
- dest_operation_type,
- );
- Self::change_deletion_prize_for_bag(
- &dest_bag_id,
- bag_change.total_deletion_prize,
- dest_operation_type,
- );
- Self::deposit_event(RawEvent::DataObjectsMoved(src_bag_id, dest_bag_id, objects));
- Ok(())
- }
- fn can_delete_data_objects(
- bag_id: &BagId<T>,
- objects: &BTreeSet<T::DataObjectId>,
- ) -> DispatchResult {
- Self::validate_delete_data_objects_params(bag_id, objects).map(|_| ())
- }
- fn delete_data_objects(
- deletion_prize_account_id: T::AccountId,
- bag_id: BagId<T>,
- objects: BTreeSet<T::DataObjectId>,
- ) -> DispatchResult {
- let bag_change = Self::validate_delete_data_objects_params(&bag_id, &objects)?;
- //
- // == MUTATION SAFE ==
- //
- <StorageTreasury<T>>::withdraw(
- &deletion_prize_account_id,
- bag_change.total_deletion_prize,
- )?;
- for data_object_id in objects.iter() {
- BagManager::<T>::delete_data_object(&bag_id, &data_object_id);
- }
- let operation_type = OperationType::Decrease;
- Self::change_storage_bucket_vouchers_for_bag(
- &bag_id,
- &bag_change.voucher_update,
- operation_type,
- );
- // Subtract deletion prize for dynamic bags only.
- Self::change_deletion_prize_for_bag(
- &bag_id,
- bag_change.total_deletion_prize,
- operation_type,
- );
- Self::deposit_event(RawEvent::DataObjectsDeleted(
- deletion_prize_account_id,
- bag_id,
- objects,
- ));
- Ok(())
- }
- fn can_delete_dynamic_bag(bag_id: &DynamicBagId<T>) -> DispatchResult {
- Self::validate_delete_dynamic_bag_params(bag_id).map(|_| ())
- }
- fn delete_dynamic_bag(
- deletion_prize_account_id: T::AccountId,
- bag_id: DynamicBagId<T>,
- ) -> DispatchResult {
- let bag_change = Self::validate_delete_dynamic_bag_params(&bag_id)?;
- //
- // == MUTATION SAFE ==
- //
- <StorageTreasury<T>>::withdraw(
- &deletion_prize_account_id,
- bag_change.total_deletion_prize,
- )?;
- let dynamic_bag = Self::dynamic_bag(&bag_id);
- Self::change_storage_buckets_vouchers(
- &dynamic_bag.stored_by,
- &bag_change.voucher_update,
- OperationType::Decrease,
- );
- <DynamicBags<T>>::remove(&bag_id);
- Self::deposit_event(RawEvent::DynamicBagDeleted(
- deletion_prize_account_id,
- bag_id,
- ));
- Ok(())
- }
- fn create_dynamic_bag(bag_id: DynamicBagId<T>) -> DispatchResult {
- Self::validate_create_dynamic_bag_params(&bag_id)?;
- //
- // == MUTATION SAFE ==
- //
- let storage_buckets = Self::pick_storage_buckets_for_dynamic_bag(bag_id.clone().into());
- let bag = DynamicBag::<T> {
- stored_by: storage_buckets,
- ..Default::default()
- };
- <DynamicBags<T>>::insert(&bag_id, bag);
- Self::deposit_event(RawEvent::DynamicBagCreated(bag_id));
- Ok(())
- }
- fn can_create_dynamic_bag(bag_id: &DynamicBagId<T>) -> DispatchResult {
- Self::validate_create_dynamic_bag_params(bag_id)
- }
- }
- impl<T: Trait> Module<T> {
- // Validates dynamic bag creation params and conditions.
- fn validate_create_dynamic_bag_params(bag_id: &DynamicBagId<T>) -> DispatchResult {
- ensure!(
- !<DynamicBags<T>>::contains_key(&bag_id),
- Error::<T>::DynamicBagExists
- );
- Ok(())
- }
- // Validates dynamic bag deletion params and conditions.
- fn validate_delete_dynamic_bag_params(
- bag_id: &DynamicBagId<T>,
- ) -> Result<BagChangeInfo<BalanceOf<T>>, DispatchError> {
- BagManager::<T>::ensure_bag_exists(&BagId::<T>::DynamicBag(bag_id.clone()))?;
- let dynamic_bag = Self::dynamic_bag(bag_id);
- let voucher_update = VoucherUpdate {
- objects_number: dynamic_bag.objects_number(),
- objects_total_size: dynamic_bag.objects_total_size(),
- };
- ensure!(
- <StorageTreasury<T>>::usable_balance() >= dynamic_bag.deletion_prize,
- Error::<T>::InsufficientTreasuryBalance
- );
- let bag_change = BagChangeInfo {
- voucher_update,
- total_deletion_prize: dynamic_bag.deletion_prize,
- };
- Ok(bag_change)
- }
- // Ensures the existence of the storage bucket.
- // Returns the StorageBucket object or error.
- fn ensure_storage_bucket_exists(
- storage_bucket_id: &T::StorageBucketId,
- ) -> Result<StorageBucket<WorkerId<T>>, Error<T>> {
- ensure!(
- <StorageBucketById<T>>::contains_key(storage_bucket_id),
- Error::<T>::StorageBucketDoesntExist
- );
- Ok(Self::storage_bucket_by_id(storage_bucket_id))
- }
- // Ensures the correct invitation for the storage bucket and storage provider. Storage provider
- // must be invited.
- fn ensure_bucket_storage_provider_invitation_status(
- bucket: &StorageBucket<WorkerId<T>>,
- worker_id: WorkerId<T>,
- ) -> DispatchResult {
- match bucket.operator_status {
- StorageBucketOperatorStatus::Missing => {
- Err(Error::<T>::NoStorageBucketInvitation.into())
- }
- StorageBucketOperatorStatus::StorageWorker(_) => {
- Err(Error::<T>::StorageProviderAlreadySet.into())
- }
- StorageBucketOperatorStatus::InvitedStorageWorker(invited_worker_id) => {
- ensure!(
- worker_id == invited_worker_id,
- Error::<T>::DifferentStorageProviderInvited
- );
- Ok(())
- }
- }
- }
- // Ensures the correct invitation for the storage bucket and storage provider for removal.
- // Must be invited storage provider.
- fn ensure_bucket_storage_provider_invitation_status_for_removal(
- bucket: &StorageBucket<WorkerId<T>>,
- ) -> DispatchResult {
- if let StorageBucketOperatorStatus::StorageWorker(_) = bucket.operator_status {
- Ok(())
- } else {
- Err(Error::<T>::StorageProviderMustBeSet.into())
- }
- }
- // Ensures the correct invitation for the storage bucket and storage provider. Must be pending.
- fn ensure_bucket_pending_invitation_status(
- bucket: &StorageBucket<WorkerId<T>>,
- ) -> DispatchResult {
- match bucket.operator_status {
- StorageBucketOperatorStatus::Missing => {
- Err(Error::<T>::NoStorageBucketInvitation.into())
- }
- StorageBucketOperatorStatus::StorageWorker(_) => {
- Err(Error::<T>::StorageProviderAlreadySet.into())
- }
- StorageBucketOperatorStatus::InvitedStorageWorker(_) => Ok(()),
- }
- }
- // Ensures the missing invitation for the storage bucket and storage provider.
- fn ensure_bucket_missing_invitation_status(
- bucket: &StorageBucket<WorkerId<T>>,
- ) -> DispatchResult {
- match bucket.operator_status {
- StorageBucketOperatorStatus::Missing => Ok(()),
- StorageBucketOperatorStatus::StorageWorker(_) => {
- Err(Error::<T>::StorageProviderAlreadySet.into())
- }
- StorageBucketOperatorStatus::InvitedStorageWorker(_) => {
- Err(Error::<T>::InvitedStorageProvider.into())
- }
- }
- }
- // Ensures correct storage provider for the storage bucket.
- fn ensure_bucket_invitation_accepted(
- bucket: &StorageBucket<WorkerId<T>>,
- worker_id: WorkerId<T>,
- ) -> DispatchResult {
- match bucket.operator_status {
- StorageBucketOperatorStatus::Missing => {
- Err(Error::<T>::StorageProviderMustBeSet.into())
- }
- StorageBucketOperatorStatus::InvitedStorageWorker(_) => {
- Err(Error::<T>::InvalidStorageProvider.into())
- }
- StorageBucketOperatorStatus::StorageWorker(invited_worker_id) => {
- ensure!(
- worker_id == invited_worker_id,
- Error::<T>::InvalidStorageProvider
- );
- Ok(())
- }
- }
- }
- // Create data objects from the creation data.
- fn create_data_objects(
- object_creation_list: Vec<DataObjectCreationParameters>,
- ) -> DataObjectCandidates<T> {
- let deletion_prize = T::DataObjectDeletionPrize::get();
- let data_objects = object_creation_list.iter().cloned().map(|obj| DataObject {
- accepted: false,
- deletion_prize,
- size: obj.size,
- });
- let mut next_data_object_id = Self::next_data_object_id();
- let ids = iter::repeat_with(|| {
- let id = next_data_object_id;
- next_data_object_id += One::one();
- id
- })
- .take(data_objects.len());
- let data_objects_map = ids.zip(data_objects).collect::<BTreeMap<_, _>>();
- DataObjectCandidates {
- next_data_object_id,
- data_objects_map,
- }
- }
- // Ensures validity of the `accept_pending_data_objects` extrinsic parameters
- fn validate_accept_pending_data_objects_params(
- bag_id: &BagId<T>,
- data_objects: &BTreeSet<T::DataObjectId>,
- storage_bucket_id: &T::StorageBucketId,
- ) -> DispatchResult {
- ensure!(
- !data_objects.is_empty(),
- Error::<T>::DataObjectIdParamsAreEmpty
- );
- BagManager::<T>::ensure_bag_exists(bag_id)?;
- BagManager::<T>::ensure_storage_bucket_bound(bag_id, storage_bucket_id)?;
- for data_object_id in data_objects.iter() {
- BagManager::<T>::ensure_data_object_existence(bag_id, data_object_id)?;
- }
- Ok(())
- }
- // Ensures validity of the `update_storage_buckets_for_bag` extrinsic parameters
- fn validate_update_storage_buckets_for_bag_params(
- bag_id: &BagId<T>,
- add_buckets: &BTreeSet<T::StorageBucketId>,
- remove_buckets: &BTreeSet<T::StorageBucketId>,
- ) -> Result<VoucherUpdate, DispatchError> {
- ensure!(
- !add_buckets.is_empty() || !remove_buckets.is_empty(),
- Error::<T>::StorageBucketIdCollectionsAreEmpty
- );
- BagManager::<T>::ensure_bag_exists(&bag_id)?;
- let storage_bucket_ids = BagManager::<T>::get_storage_bucket_ids(bag_id);
- ensure!(
- storage_bucket_ids.len().saturated_into::<u64>()
- <= Self::storage_buckets_per_bag_limit(),
- Error::<T>::StorageBucketPerBagLimitExceeded
- );
- for bucket_id in remove_buckets.iter() {
- ensure!(
- <StorageBucketById<T>>::contains_key(&bucket_id),
- Error::<T>::StorageBucketDoesntExist
- );
- ensure!(
- storage_bucket_ids.contains(&bucket_id),
- Error::<T>::StorageBucketIsNotBoundToBag
- );
- }
- for bucket_id in add_buckets.iter() {
- let bucket = Self::ensure_storage_bucket_exists(bucket_id)?;
- ensure!(
- bucket.accepting_new_bags,
- Error::<T>::StorageBucketDoesntAcceptNewBags
- );
- ensure!(
- !storage_bucket_ids.contains(&bucket_id),
- Error::<T>::StorageBucketIsBoundToBag
- );
- }
- let objects_total_size = BagManager::<T>::get_data_objects_total_size(bag_id);
- let objects_number = BagManager::<T>::get_data_objects_number(bag_id);
- let voucher_update = VoucherUpdate {
- objects_number,
- objects_total_size,
- };
- Self::check_buckets_for_overflow(&add_buckets, &voucher_update)?;
- Ok(voucher_update)
- }
- // Validate the "Move data objects between bags" operation data.
- fn validate_data_objects_on_moving(
- src_bag_id: &BagId<T>,
- dest_bag_id: &BagId<T>,
- object_ids: &BTreeSet<T::DataObjectId>,
- ) -> Result<BagChangeInfo<BalanceOf<T>>, DispatchError> {
- ensure!(
- *src_bag_id != *dest_bag_id,
- Error::<T>::SourceAndDestinationBagsAreEqual
- );
- ensure!(
- !object_ids.is_empty(),
- Error::<T>::DataObjectIdCollectionIsEmpty
- );
- BagManager::<T>::ensure_bag_exists(src_bag_id)?;
- BagManager::<T>::ensure_bag_exists(dest_bag_id)?;
- let mut bag_change = BagChangeInfo::<BalanceOf<T>>::default();
- for object_id in object_ids.iter() {
- let data_object = BagManager::<T>::ensure_data_object_existence(src_bag_id, object_id)?;
- bag_change.add_object(data_object.size, data_object.deletion_prize);
- }
- Self::check_bag_for_buckets_overflow(dest_bag_id, &bag_change.voucher_update)?;
- Ok(bag_change)
- }
- // Returns only existing hashes in the blacklist from the original collection.
- #[allow(clippy::redundant_closure)] // doesn't work with Substrate storage functions.
- fn get_existing_hashes(hashes: &BTreeSet<ContentId>) -> BTreeSet<ContentId> {
- Self::get_hashes_by_predicate(hashes, |cid| Blacklist::contains_key(cid))
- }
- // Returns only nonexisting hashes in the blacklist from the original collection.
- fn get_nonexisting_hashes(hashes: &BTreeSet<ContentId>) -> BTreeSet<ContentId> {
- Self::get_hashes_by_predicate(hashes, |cid| !Blacklist::contains_key(cid))
- }
- // Returns hashes from the original collection selected by predicate.
- fn get_hashes_by_predicate<P: FnMut(&&ContentId) -> bool>(
- hashes: &BTreeSet<ContentId>,
- predicate: P,
- ) -> BTreeSet<ContentId> {
- hashes
- .iter()
- .filter(predicate)
- .cloned()
- .collect::<BTreeSet<_>>()
- }
- // Ensure the new bucket could be created. It also validates some parameters.
- fn can_create_storage_bucket(
- voucher: &Voucher,
- invited_worker: &Option<WorkerId<T>>,
- ) -> DispatchResult {
- ensure!(
- Self::storage_buckets_number() < T::MaxStorageBucketNumber::get(),
- Error::<T>::MaxStorageBucketNumberLimitExceeded
- );
- ensure!(
- voucher.size_limit <= Self::voucher_max_objects_size_limit(),
- Error::<T>::VoucherMaxObjectSizeLimitExceeded
- );
- ensure!(
- voucher.objects_limit <= Self::voucher_max_objects_number_limit(),
- Error::<T>::VoucherMaxObjectNumberLimitExceeded
- );
- if let Some(operator_id) = invited_worker {
- Self::ensure_storage_provider_operator_exists(operator_id)?;
- }
- Ok(())
- }
- // Update total objects size and number for all storage buckets assigned to a bag.
- fn change_storage_bucket_vouchers_for_bag(
- bag_id: &BagId<T>,
- voucher_update: &VoucherUpdate,
- voucher_operation: OperationType,
- ) {
- let bucket_ids = BagManager::<T>::get_storage_bucket_ids(bag_id);
- Self::change_storage_buckets_vouchers(&bucket_ids, voucher_update, voucher_operation);
- }
- // Update total objects size and number for provided storage buckets.
- fn change_storage_buckets_vouchers(
- bucket_ids: &BTreeSet<T::StorageBucketId>,
- voucher_update: &VoucherUpdate,
- voucher_operation: OperationType,
- ) {
- for bucket_id in bucket_ids.iter() {
- <StorageBucketById<T>>::mutate(bucket_id, |bucket| {
- bucket.voucher =
- voucher_update.get_updated_voucher(&bucket.voucher, voucher_operation);
- Self::deposit_event(RawEvent::VoucherChanged(*bucket_id, bucket.voucher.clone()));
- });
- }
- }
- // Validates `delete_data_objects` parameters.
- // Returns voucher update for an affected bag.
- fn validate_delete_data_objects_params(
- bag_id: &BagId<T>,
- data_object_ids: &BTreeSet<T::DataObjectId>,
- ) -> Result<BagChangeInfo<BalanceOf<T>>, DispatchError> {
- ensure!(
- !data_object_ids.is_empty(),
- Error::<T>::DataObjectIdParamsAreEmpty
- );
- BagManager::<T>::ensure_bag_exists(bag_id)?;
- let mut bag_change = BagChangeInfo::default();
- for data_object_id in data_object_ids.iter() {
- let data_object =
- BagManager::<T>::ensure_data_object_existence(bag_id, data_object_id)?;
- bag_change.add_object(data_object.size, data_object.deletion_prize);
- }
- ensure!(
- <StorageTreasury<T>>::usable_balance() >= bag_change.total_deletion_prize,
- Error::<T>::InsufficientTreasuryBalance
- );
- Ok(bag_change)
- }
- // Validates upload parameters and conditions (like global uploading block).
- // Returns voucher update parameters for the storage buckets.
- fn validate_upload_data_objects_parameters(
- params: &UploadParameters<T>,
- ) -> Result<BagChangeInfo<BalanceOf<T>>, DispatchError> {
- // TODO: consider refactoring and splitting the method.
- // Check global uploading block.
- ensure!(!Self::uploading_blocked(), Error::<T>::UploadingBlocked);
- // Check object creation list validity.
- ensure!(
- !params.object_creation_list.is_empty(),
- Error::<T>::NoObjectsOnUpload
- );
- BagManager::<T>::ensure_bag_exists(¶ms.bag_id)?;
- let bag_objects_number = BagManager::<T>::get_data_objects_number(¶ms.bag_id.clone());
- let new_objects_number: u64 = params.object_creation_list.len().saturated_into();
- let total_possible_data_objects_number: u64 = new_objects_number + bag_objects_number;
- // Check bag capacity.
- ensure!(
- total_possible_data_objects_number <= T::MaxNumberOfDataObjectsPerBag::get(),
- Error::<T>::DataObjectsPerBagLimitExceeded
- );
- let mut bag_change = BagChangeInfo::default();
- // Check data objects.
- for object_params in params.object_creation_list.iter() {
- // Should be non-empty hash.
- ensure!(
- !object_params.ipfs_content_id.is_empty(),
- Error::<T>::EmptyContentId
- );
- // Should be non-zero size.
- ensure!(object_params.size != 0, Error::<T>::ZeroObjectSize);
- // Should not be blacklisted.
- ensure!(
- !Blacklist::contains_key(&object_params.ipfs_content_id),
- Error::<T>::DataObjectBlacklisted,
- );
- bag_change.add_object(object_params.size, T::DataObjectDeletionPrize::get());
- }
- let size_fee =
- Self::calculate_data_storage_fee(bag_change.voucher_update.objects_total_size);
- let usable_balance =
- Balances::<T>::usable_balance(¶ms.deletion_prize_source_account_id);
- // Check account balance to satisfy deletion prize and storage fee.
- let total_fee = bag_change.total_deletion_prize + size_fee;
- ensure!(usable_balance >= total_fee, Error::<T>::InsufficientBalance);
- // Check buckets.
- Self::check_bag_for_buckets_overflow(¶ms.bag_id, &bag_change.voucher_update)?;
- Ok(bag_change)
- }
- // Iterates through buckets in the bag. Verifies voucher parameters to fit the new limits:
- // objects number and total objects size.
- fn check_bag_for_buckets_overflow(
- bag_id: &BagId<T>,
- voucher_update: &VoucherUpdate,
- ) -> DispatchResult {
- let bucket_ids = BagManager::<T>::get_storage_bucket_ids(bag_id);
- Self::check_buckets_for_overflow(&bucket_ids, voucher_update)
- }
- // Iterates through buckets. Verifies voucher parameters to fit the new limits:
- // objects number and total objects size.
- fn check_buckets_for_overflow(
- bucket_ids: &BTreeSet<T::StorageBucketId>,
- voucher_update: &VoucherUpdate,
- ) -> DispatchResult {
- for bucket_id in bucket_ids.iter() {
- let bucket = Self::storage_bucket_by_id(bucket_id);
- // Total object number limit is not exceeded.
- ensure!(
- voucher_update.objects_number + bucket.voucher.objects_used
- <= bucket.voucher.objects_limit,
- Error::<T>::StorageBucketObjectNumberLimitReached
- );
- // Total object size limit is not exceeded.
- ensure!(
- voucher_update.objects_total_size + bucket.voucher.size_used
- <= bucket.voucher.size_limit,
- Error::<T>::StorageBucketObjectSizeLimitReached
- );
- }
- Ok(())
- }
- // Increase or decrease a deletion prize for a dynamic bag.
- fn change_deletion_prize_for_dynamic_bag(
- dynamic_bag_id: &DynamicBagId<T>,
- deletion_prize: BalanceOf<T>,
- operation: OperationType,
- ) {
- <DynamicBags<T>>::mutate(dynamic_bag_id, |bag| {
- bag.deletion_prize = match operation {
- OperationType::Increase => bag.deletion_prize.saturating_add(deletion_prize),
- OperationType::Decrease => bag.deletion_prize.saturating_sub(deletion_prize),
- };
- Self::deposit_event(RawEvent::DeletionPrizeChanged(
- dynamic_bag_id.clone(),
- bag.deletion_prize,
- ));
- });
- }
- // Increase or decrease a deletion prize for a dynamic bag.
- // Affect dynamic bags only. Skips static bags.
- fn change_deletion_prize_for_bag(
- bag_id: &BagId<T>,
- deletion_prize: BalanceOf<T>,
- operation: OperationType,
- ) {
- if let BagId::<T>::DynamicBag(ref dynamic_bag_id) = bag_id {
- Self::change_deletion_prize_for_dynamic_bag(dynamic_bag_id, deletion_prize, operation);
- }
- }
- // Calculate data storage fee based on size. Fee-value uses megabytes as measure value.
- // Data size will be rounded to nearest greater MB integer.
- pub(crate) fn calculate_data_storage_fee(bytes: u64) -> BalanceOf<T> {
- let mb_fee = Self::data_object_per_mega_byte_fee();
- const ONE_MB: u64 = 1_048_576;
- let mut megabytes = bytes / ONE_MB;
- if bytes % ONE_MB > 0 {
- megabytes += 1; // rounding to the nearest greater integer
- }
- mb_fee.saturating_mul(megabytes.saturated_into())
- }
- // Slash data size fee if fee value is set to non-zero.
- fn slash_data_size_fee(account_id: &T::AccountId, bytes: u64) {
- let fee = Self::calculate_data_storage_fee(bytes);
- if fee != Zero::zero() {
- let _ = Balances::<T>::slash(account_id, fee);
- }
- }
- // Selects storage bucket ID sets to assign to the storage bucket.
- pub(crate) fn pick_storage_buckets_for_dynamic_bag(
- bag_type: DynamicBagType,
- ) -> BTreeSet<T::StorageBucketId> {
- StorageBucketPicker::<T>::pick_storage_buckets(bag_type)
- }
- // Get default dynamic bag policy by bag type.
- fn get_default_dynamic_bag_creation_policy(
- bag_type: DynamicBagType,
- ) -> DynamicBagCreationPolicy {
- match bag_type {
- DynamicBagType::Member => T::DefaultMemberDynamicBagCreationPolicy::get(),
- DynamicBagType::Channel => T::DefaultChannelDynamicBagCreationPolicy::get(),
- }
- }
- // Loads dynamic bag creation policy or use default values.
- pub(crate) fn get_dynamic_bag_creation_policy(
- bag_type: DynamicBagType,
- ) -> DynamicBagCreationPolicy {
- if DynamicBagCreationPolicies::contains_key(bag_type) {
- return Self::dynamic_bag_creation_policy(bag_type);
- }
- Self::get_default_dynamic_bag_creation_policy(bag_type)
- }
- // Verifies storage provider operator existence.
- fn ensure_storage_provider_operator_exists(operator_id: &WorkerId<T>) -> DispatchResult {
- ensure!(
- T::ensure_worker_exists(operator_id).is_ok(),
- Error::<T>::StorageProviderOperatorDoesntExist
- );
- Ok(())
- }
- }
|