Skip to content

Commit

Permalink
handle max tasks reached overall and per account
Browse files Browse the repository at this point in the history
  • Loading branch information
v9n committed Oct 10, 2023
1 parent 55820f9 commit fbd7242
Show file tree
Hide file tree
Showing 4 changed files with 260 additions and 19 deletions.
76 changes: 60 additions & 16 deletions pallets/automation-price/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

//! # Automation time pallet
//! # Automation price pallet
//!
//! DISCLAIMER: This pallet is still in it's early stages. At this point
//! we only support scheduling two tasks per hour, and sending an on-chain
Expand All @@ -24,8 +24,6 @@
//! This pallet allows a user to schedule tasks. Tasks can scheduled for any whole hour in the future.
//! In order to run tasks this pallet consumes up to a certain amount of weight during `on_initialize`.
//!
//! The pallet supports the following tasks:
//! * On-chain events with custom text
//!
#![cfg_attr(not(feature = "std"), no_std)]
Expand Down Expand Up @@ -101,7 +99,6 @@ pub mod pallet {
pub type TaskAddress<T> = (AccountOf<T>, TaskId);
pub type TaskIdList<T> = Vec<TaskAddress<T>>;

// TODO: Cleanup before merge
type ChainName = Vec<u8>;
type Exchange = Vec<u8>;

Expand Down Expand Up @@ -298,14 +295,21 @@ pub mod pallet {
pub type Tasks<T: Config> =
StorageDoubleMap<_, Twox64Concat, AccountOf<T>, Twox64Concat, TaskId, Task<T>>;

// A map lookup from task id -> owner id
// Track various metric on our chain regarding tasks such as total task
//
// We shard the task by account id, knowing a task id alone won't allow us to look up the
// task. We need to have the pair of (TaskId, AccountID)
#[pallet::storage]
#[pallet::getter(fn get_task_stat)]
pub type TaskStats<T: Config> = StorageMap<_, Twox64Concat, StatType, u64>;

// Track various metric per account regarding tasks
// To count task per account, relying on Tasks storage alone mean we have to iterate overs
// value that share the first key (owner_id) to count.
//
// Store the task count
#[pallet::storage]
#[pallet::getter(fn taskid_to_owner)]
pub type TaskIdToOwner<T: Config> = StorageMap<_, Twox64Concat, TaskId, AccountOf<T>>;
#[pallet::getter(fn get_account_stat)]
pub type AccountStats<T: Config> =
StorageDoubleMap<_, Twox64Concat, AccountOf<T>, Twox64Concat, StatType, u64>;

// TaskQueue stores the task to be executed. To run any tasks, they need to be move into this
// queue, from there our task execution pick it up and run it
Expand Down Expand Up @@ -346,6 +350,8 @@ pub mod pallet {
InvalidAssetExpirationWindow,
/// Maximum tasks reached for the slot
MaxTasksReached,
/// Maximum tasks reached for a given account
MaxTasksPerAccountReached,
/// Failed to insert task
TaskInsertionFailure,
/// Failed to remove task
Expand Down Expand Up @@ -755,9 +761,9 @@ pub mod pallet {

if let Some(task) = Self::get_task(&who, &task_id) {
Tasks::<T>::remove(&who, &task.task_id);
Self::remove_task(&task);
let key = (&task.chain, &task.exchange, &task.asset_pair, &task.trigger_function);
SortedTasksIndex::<T>::remove(&key);
Self::remove_task_from_account(&task);
Self::deposit_event(Event::TaskCancelled {
who: task.owner_id.clone(),
task_id: task.task_id.clone(),
Expand Down Expand Up @@ -1034,6 +1040,14 @@ pub mod pallet {
task_id: task.task_id.clone(),
});

let total_task =
Self::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v);
let total_task_per_account = Self::get_account_stat(
&task.owner_id,
StatType::TotalTasksPerAccount,
)
.map_or(0, |v| v);

let (task_action_weight, task_dispatch_error) =
match task.action.clone() {
// TODO: Run actual task later to return weight
Expand All @@ -1058,7 +1072,14 @@ pub mod pallet {
),
};

Self::remove_task(&task);
Tasks::<T>::remove(task.owner_id.clone(), task_id.clone());
TaskStats::<T>::insert(StatType::TotalTasksOverall, total_task - 1);
AccountStats::<T>::insert(
task.owner_id.clone(),
StatType::TotalTasksPerAccount,
total_task_per_account - 1,
);

if let Some(err) = task_dispatch_error {
Self::deposit_event(Event::<T>::TaskExecutionFailed {
Expand All @@ -1073,9 +1094,6 @@ pub mod pallet {
});
}

// TODO: add this weight
Self::remove_task_from_account(&task);

Self::deposit_event(Event::<T>::TaskCompleted {
who: task.owner_id.clone(),
task_id: task.task_id.clone(),
Expand Down Expand Up @@ -1105,7 +1123,7 @@ pub mod pallet {
}
}

fn remove_task_from_account(task: &Task<T>) {
fn remove_task(task: &Task<T>) {
//AccountTasks::<T>::remove(task.owner_id.clone(), task.task_id.clone());
}

Expand All @@ -1119,7 +1137,29 @@ pub mod pallet {
Err(Error::<T>::InvalidTaskId)?
}

<Tasks<T>>::insert(task.owner_id.clone(), task.task_id.clone(), &task);
let total_task = Self::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v);
let total_task_per_account =
Self::get_account_stat(&task.owner_id, StatType::TotalTasksPerAccount)
.map_or(0, |v| v);

// check task total limit per account and overall
if total_task >= T::MaxTasksOverall::get().into() {
Err(Error::<T>::MaxTasksReached)?
}

// check task total limit per account and overall
if total_task_per_account >= T::MaxTasksPerAccount::get().into() {
Err(Error::<T>::MaxTasksPerAccountReached)?
}

Tasks::<T>::insert(task.owner_id.clone(), task.task_id.clone(), &task);
// Post tax processing, increase relevant metrics data
TaskStats::<T>::insert(StatType::TotalTasksOverall, total_task + 1);
AccountStats::<T>::insert(
task.owner_id.clone(),
StatType::TotalTasksPerAccount,
total_task_per_account + 1,
);

let key = (&task.chain, &task.exchange, &task.asset_pair, &task.trigger_function);

Expand All @@ -1146,7 +1186,11 @@ pub mod pallet {
SortedTasksIndex::<T>::insert(key, sorted_task_index);
}

Self::deposit_event(Event::TaskScheduled { who: task.owner_id, task_id: task.task_id });
Self::deposit_event(Event::TaskScheduled {
who: task.owner_id.clone(),
task_id: task.task_id,
});

Ok(())
}
}
Expand Down
1 change: 1 addition & 0 deletions pallets/automation-price/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ pub type CurrencyId = u32;
pub const DEFAULT_SCHEDULE_FEE_LOCATION: MultiLocation = MOONBASE_ASSET_LOCATION;

pub const ALICE: [u8; 32] = [1u8; 32];
pub const BOB: [u8; 32] = [2u8; 32];
pub const DELEGATOR_ACCOUNT: [u8; 32] = [3u8; 32];
pub const PROXY_ACCOUNT: [u8; 32] = [4u8; 32];

Expand Down
194 changes: 191 additions & 3 deletions pallets/automation-price/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use crate::{mock::*, Action, AssetPayment, Config, Error, Task, TaskIdList};
use crate::{
mock::*, AccountStats, Action, AssetPayment, Config, Error, StatType, Task, TaskIdList,
TaskStats,
};
use pallet_xcmp_handler::InstructionSequence;

use frame_support::{
Expand Down Expand Up @@ -384,9 +387,96 @@ fn test_schedule_xcmp_task_ok() {
task_ids,
vec!(vec!(
(creator.clone(), vec!(49, 45, 48, 45, 49)),
(creator, vec!(49, 45, 48, 45, 50))
(creator.clone(), vec!(49, 45, 48, 45, 50))
))
);

// We had schedule 2 tasks so far, all two belong to the same account
assert_eq!(
2,
AutomationPrice::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v),
"total task count is incorrect"
);
assert_eq!(
2,
AutomationPrice::get_account_stat(creator, StatType::TotalTasksPerAccount)
.map_or(0, |v| v),
"total task count is incorrect"
);
})
}

#[test]
fn test_schedule_return_error_when_reaching_max_tasks_overall_limit() {
new_test_ext(START_BLOCK_TIME).execute_with(|| {
let para_id: u32 = 1000;
let creator = AccountId32::new(ALICE);
let call: Vec<u8> = vec![2, 4, 5];
let destination = MultiLocation::new(1, X1(Parachain(para_id)));

setup_asset(&creator, chain1.to_vec());

TaskStats::<Test>::insert(StatType::TotalTasksOverall, 1_000_000_000);

assert_noop!(
AutomationPrice::schedule_xcmp_task(
RuntimeOrigin::signed(creator.clone()),
chain1.to_vec(),
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1005u128,
"gt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Box::new(NATIVE_LOCATION.into()),
Box::new(AssetPayment {
asset_location: MultiLocation::new(0, Here).into(),
amount: 10000000000000
}),
call.clone(),
Weight::from_ref_time(100_000),
Weight::from_ref_time(200_000)
),
Error::<Test>::MaxTasksReached,
);
})
}

#[test]
fn test_schedule_return_error_when_reaching_max_account_tasks_limit() {
new_test_ext(START_BLOCK_TIME).execute_with(|| {
let para_id: u32 = 1000;
let creator = AccountId32::new(ALICE);
let call: Vec<u8> = vec![2, 4, 5];
let destination = MultiLocation::new(1, X1(Parachain(para_id)));

setup_asset(&creator, chain1.to_vec());

AccountStats::<Test>::insert(creator.clone(), StatType::TotalTasksPerAccount, 1_000);

assert_noop!(
AutomationPrice::schedule_xcmp_task(
RuntimeOrigin::signed(creator),
chain1.to_vec(),
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1005u128,
"gt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Box::new(NATIVE_LOCATION.into()),
Box::new(AssetPayment {
asset_location: MultiLocation::new(0, Here).into(),
amount: 10000000000000
}),
call,
Weight::from_ref_time(100_000),
Weight::from_ref_time(200_000)
),
Error::<Test>::MaxTasksPerAccountReached,
);
})
}

Expand Down Expand Up @@ -621,7 +711,7 @@ fn test_emit_event_when_execute_tasks() {
let overall_weight = Weight::from_ref_time(200_000);

let task = Task::<Test> {
owner_id: creator.into(),
owner_id: creator.clone().into(),
task_id: "123-0-1".as_bytes().to_vec(),
chain: chain1.to_vec(),
exchange: exchange1.to_vec(),
Expand Down Expand Up @@ -660,6 +750,104 @@ fn test_emit_event_when_execute_tasks() {
})
}

#[test]
fn test_decrease_task_count_when_execute_tasks() {
new_test_ext(START_BLOCK_TIME).execute_with(|| {
let creator1 = AccountId32::new(ALICE);
let creator2 = AccountId32::new(BOB);
let para_id: u32 = 1000;

let destination = MultiLocation::new(1, X1(Parachain(para_id)));
let schedule_fee = MultiLocation::default();
let execution_fee = AssetPayment {
asset_location: MultiLocation::new(1, X1(Parachain(para_id))).into(),
amount: 0,
};
let encoded_call_weight = Weight::from_ref_time(100_000);
let overall_weight = Weight::from_ref_time(200_000);

let task1 = Task::<Test> {
owner_id: creator1.clone().into(),
task_id: "123-0-1".as_bytes().to_vec(),
chain: chain1.to_vec(),
exchange: exchange1.to_vec(),
asset_pair: (asset1.to_vec(), asset2.to_vec()),
expired_at: (START_BLOCK_TIME + 10000) as u128,
trigger_function: "gt".as_bytes().to_vec(),
trigger_params: vec![123],
action: Action::XCMP {
destination,
schedule_fee,
execution_fee: execution_fee.clone(),
encoded_call: vec![1, 2, 3],
encoded_call_weight,
overall_weight,
schedule_as: None,
instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount,
},
};

let task2 = Task::<Test> {
owner_id: creator2.clone().into(),
task_id: "123-1-1".as_bytes().to_vec(),
chain: chain1.to_vec(),
exchange: exchange1.to_vec(),
asset_pair: (asset1.to_vec(), asset2.to_vec()),
expired_at: (START_BLOCK_TIME + 10000) as u128,
trigger_function: "gt".as_bytes().to_vec(),
trigger_params: vec![123],
action: Action::XCMP {
destination,
schedule_fee,
execution_fee,
encoded_call: vec![1, 2, 3],
encoded_call_weight,
overall_weight,
schedule_as: None,
instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount,
},
};

AutomationPrice::validate_and_schedule_task(task1.clone());
AutomationPrice::validate_and_schedule_task(task2.clone());

assert_eq!(
2,
AutomationPrice::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v),
"total task count is wrong"
);
assert_eq!(
1,
AutomationPrice::get_account_stat(creator1.clone(), StatType::TotalTasksPerAccount)
.map_or(0, |v| v),
"total task count is wrong"
);
assert_eq!(
1,
AutomationPrice::get_account_stat(creator2.clone(), StatType::TotalTasksPerAccount)
.map_or(0, |v| v),
"total task count is wrong"
);

AutomationPrice::run_tasks(
vec![(task1.owner_id.clone(), task1.task_id.clone())],
100_000_000_000.into(),
);

assert_eq!(
1,
AutomationPrice::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v),
"total task count is wrong"
);
assert_eq!(
0,
AutomationPrice::get_account_stat(creator1, StatType::TotalTasksPerAccount)
.map_or(0, |v| v),
"total task count of creator1 is wrong"
);
})
}

// when running a task, if the task is already expired, the execution engine won't run the task,
// instead an even TaskExpired is emiited
#[test]
Expand Down
Loading

0 comments on commit fbd7242

Please sign in to comment.