Skip to content

Commit

Permalink
chore: improve permission
Browse files Browse the repository at this point in the history
  • Loading branch information
zensh committed Jul 12, 2024
1 parent 79bfcd9 commit 65e4eb6
Show file tree
Hide file tree
Showing 7 changed files with 116 additions and 116 deletions.
4 changes: 2 additions & 2 deletions src/ic_oss/src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ pub struct UploadFileChunksResult {
impl Client {
pub fn new(agent: Arc<Agent>, bucket: Principal) -> Client {
Client {
chunk_size: MAX_CHUNK_SIZE,
chunk_size: CHUNK_SIZE,
concurrency: 20,
agent,
bucket,
Expand All @@ -40,7 +40,7 @@ impl Client {
}

pub fn set_chunk_size(&mut self, chunk_size: u32) {
if chunk_size > 1024 && chunk_size <= MAX_CHUNK_SIZE {
if chunk_size > 1024 && chunk_size <= CHUNK_SIZE {
self.chunk_size = chunk_size;
}
}
Expand Down
12 changes: 6 additions & 6 deletions src/ic_oss_bucket/src/api_http.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use hyperx::header::{Charset, ContentDisposition, DispositionParam, DispositionT
use hyperx::header::{ContentRangeSpec, Header, IfRange, Range, Raw};
use ic_http_certification::{HeaderField, HttpRequest};
use ic_oss_types::{
file::{UrlFileParam, MAX_CHUNK_SIZE, MAX_FILE_SIZE_PER_CALL},
file::{UrlFileParam, CHUNK_SIZE, MAX_FILE_SIZE_PER_CALL},
to_cbor_bytes,
};
use ic_stable_structures::Storable;
Expand Down Expand Up @@ -343,10 +343,10 @@ fn range_response(
metadata: store::FileMetadata,
(start, end): (u64, u64),
) -> HttpStreamingResponse {
let chunk_index = start / MAX_CHUNK_SIZE as u64;
let chunk_offset = (start % MAX_CHUNK_SIZE as u64) as usize;
let chunk_end = end / MAX_CHUNK_SIZE as u64;
let end_offset = (end % MAX_CHUNK_SIZE as u64) as usize;
let chunk_index = start / CHUNK_SIZE as u64;
let chunk_offset = (start % CHUNK_SIZE as u64) as usize;
let chunk_end = end / CHUNK_SIZE as u64;
let end_offset = (end % CHUNK_SIZE as u64) as usize;

let mut body = ByteBuf::with_capacity((end + 1 - start) as usize);
for i in chunk_index..=chunk_end {
Expand All @@ -357,7 +357,7 @@ fn range_response(
let end = if i == chunk_end {
end_offset
} else {
MAX_CHUNK_SIZE as usize - 1
CHUNK_SIZE as usize - 1
};

if end >= chunk.len() {
Expand Down
2 changes: 1 addition & 1 deletion src/ic_oss_bucket/src/api_update.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ fn create_file(
Err("content size mismatch".to_string())?;
}

for (i, chunk) in content.chunks(MAX_CHUNK_SIZE as usize).enumerate() {
for (i, chunk) in content.chunks(CHUNK_SIZE as usize).enumerate() {
store::fs::update_chunk(id, i as u32, now_ms, chunk.to_vec(), |_| Ok(()))?;
}

Expand Down
108 changes: 39 additions & 69 deletions src/ic_oss_bucket/src/permission.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use candid::Principal;
use ic_oss_types::permission::{Operation, Permission, PermissionChecker, Policies, Resource};
use ic_oss_types::permission::{
Operation, Permission, PermissionChecker, PermissionCheckerAny, Policies, Resource,
};

use crate::store::fs;

Expand All @@ -10,7 +12,7 @@ pub fn check_bucket_read(ps: &Policies, bucket: &Principal) -> bool {
operation: Operation::Read,
constraint: Some(Resource::Other("Info".to_string())),
},
bucket.to_string().as_str(),
bucket.to_string(),
)
}

Expand All @@ -21,20 +23,16 @@ pub fn check_folder_list(ps: &Policies, bucket: &Principal, parent: u32) -> bool
operation: Operation::List,
constraint: Some(Resource::Folder),
},
bucket.to_string().as_str(),
bucket.to_string(),
) {
let ancestors: Vec<String> = fs::get_ancestors(parent)
.into_iter()
.map(|f| f.id.to_string())
.collect();
let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect();
if !ps.has_permission(
let ancestors = fs::get_ancestors(parent);
if !ps.has_permission_any(
&Permission {
resource: Resource::Folder,
operation: Operation::List,
constraint: None,
},
rs.as_slice(),
&ancestors,
) {
return false;
}
Expand All @@ -49,20 +47,16 @@ pub fn check_folder_read(ps: &Policies, bucket: &Principal, id: u32) -> bool {
operation: Operation::Read,
constraint: Some(Resource::Folder),
},
bucket.to_string().as_str(),
bucket.to_string(),
) {
let ancestors: Vec<String> = fs::get_ancestors(id)
.into_iter()
.map(|f| f.id.to_string())
.collect();
let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect();
if !ps.has_permission(
let ancestors = fs::get_ancestors(id);
if !ps.has_permission_any(
&Permission {
resource: Resource::Folder,
operation: Operation::Read,
constraint: Some(Resource::Folder),
},
rs.as_slice(),
&ancestors,
) {
return false;
}
Expand All @@ -77,20 +71,16 @@ pub fn check_file_list(ps: &Policies, bucket: &Principal, parent: u32) -> bool {
operation: Operation::List,
constraint: Some(Resource::File),
},
bucket.to_string().as_str(),
bucket.to_string(),
) {
let ancestors: Vec<String> = fs::get_ancestors(parent)
.into_iter()
.map(|f| f.id.to_string())
.collect();
let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect();
if !ps.has_permission(
let ancestors = fs::get_ancestors(parent);
if !ps.has_permission_any(
&Permission {
resource: Resource::Folder,
operation: Operation::List,
constraint: Some(Resource::File),
},
rs.as_slice(),
&ancestors,
) {
return false;
}
Expand All @@ -105,27 +95,23 @@ pub fn check_file_read(ps: &Policies, bucket: &Principal, id: u32, parent: u32)
operation: Operation::Read,
constraint: None,
},
id.to_string().as_str(),
id.to_string(),
) && !ps.has_permission(
&Permission {
resource: Resource::Bucket,
operation: Operation::Read,
constraint: Some(Resource::File),
},
bucket.to_string().as_str(),
bucket.to_string(),
) {
let ancestors: Vec<String> = fs::get_ancestors(parent)
.into_iter()
.map(|f| f.id.to_string())
.collect();
let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect();
if !ps.has_permission(
let ancestors = fs::get_ancestors(parent);
if !ps.has_permission_any(
&Permission {
resource: Resource::Folder,
operation: Operation::Read,
constraint: Some(Resource::File),
},
rs.as_slice(),
&ancestors,
) {
return false;
}
Expand All @@ -140,20 +126,16 @@ pub fn check_file_create(ps: &Policies, bucket: &Principal, parent: u32) -> bool
operation: Operation::Write,
constraint: Some(Resource::File),
},
bucket.to_string().as_str(),
bucket.to_string(),
) {
let ancestors: Vec<String> = fs::get_ancestors(parent)
.into_iter()
.map(|f| f.id.to_string())
.collect();
let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect();
if !ps.has_permission(
let ancestors = fs::get_ancestors(parent);
if !ps.has_permission_any(
&Permission {
resource: Resource::Folder,
operation: Operation::Write,
constraint: Some(Resource::File),
},
rs.as_slice(),
&ancestors,
) {
return false;
}
Expand All @@ -168,20 +150,16 @@ pub fn check_file_delete(ps: &Policies, bucket: &Principal, parent: u32) -> bool
operation: Operation::Delete,
constraint: Some(Resource::File),
},
bucket.to_string().as_str(),
bucket.to_string(),
) {
let ancestors: Vec<String> = fs::get_ancestors(parent)
.into_iter()
.map(|f| f.id.to_string())
.collect();
let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect();
if !ps.has_permission(
let ancestors = fs::get_ancestors(parent);
if !ps.has_permission_any(
&Permission {
resource: Resource::Folder,
operation: Operation::Delete,
constraint: Some(Resource::File),
},
rs.as_slice(),
&ancestors,
) {
return false;
}
Expand All @@ -196,7 +174,7 @@ pub fn check_file_update(ps: &Policies, bucket: &Principal, id: u32, parent: u32
operation: Operation::Write,
constraint: None,
},
id.to_string().as_str(),
id.to_string(),
) {
return check_file_create(ps, bucket, parent);
}
Expand All @@ -210,20 +188,16 @@ pub fn check_folder_create(ps: &Policies, bucket: &Principal, parent: u32) -> bo
operation: Operation::Write,
constraint: Some(Resource::Folder),
},
bucket.to_string().as_str(),
bucket.to_string(),
) {
let ancestors: Vec<String> = fs::get_ancestors(parent)
.into_iter()
.map(|f| f.id.to_string())
.collect();
let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect();
if !ps.has_permission(
let ancestors = fs::get_ancestors(parent);
if !ps.has_permission_any(
&Permission {
resource: Resource::Folder,
operation: Operation::Write,
constraint: Some(Resource::Folder),
},
rs.as_slice(),
&ancestors,
) {
return false;
}
Expand All @@ -238,20 +212,16 @@ pub fn check_folder_delete(ps: &Policies, bucket: &Principal, parent: u32) -> bo
operation: Operation::Delete,
constraint: Some(Resource::Folder),
},
bucket.to_string().as_str(),
bucket.to_string(),
) {
let ancestors: Vec<String> = fs::get_ancestors(parent)
.into_iter()
.map(|f| f.id.to_string())
.collect();
let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect();
if !ps.has_permission(
let ancestors = fs::get_ancestors(parent);
if !ps.has_permission_any(
&Permission {
resource: Resource::Folder,
operation: Operation::Delete,
constraint: Some(Resource::Folder),
},
rs.as_slice(),
&ancestors,
) {
return false;
}
Expand All @@ -266,7 +236,7 @@ pub fn check_folder_update(ps: &Policies, bucket: &Principal, id: u32, parent: u
operation: Operation::Write,
constraint: None,
},
id.to_string().as_str(),
id.to_string(),
) {
return check_folder_create(ps, bucket, parent);
}
Expand Down
29 changes: 23 additions & 6 deletions src/ic_oss_bucket/src/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use ic_http_certification::{
use ic_oss_types::{
cwt::{Token, BUCKET_TOKEN_AAD},
file::{
FileChunk, FileInfo, UpdateFileInput, MAX_CHUNK_SIZE, MAX_FILE_SIZE, MAX_FILE_SIZE_PER_CALL,
FileChunk, FileInfo, UpdateFileInput, CHUNK_SIZE, MAX_FILE_SIZE, MAX_FILE_SIZE_PER_CALL,
},
folder::{FolderInfo, FolderName, UpdateFolderInput},
permission::Policies,
Expand Down Expand Up @@ -215,7 +215,7 @@ pub struct Chunk(pub Vec<u8>);

impl Storable for Chunk {
const BOUND: Bound = Bound::Bounded {
max_size: MAX_CHUNK_SIZE,
max_size: CHUNK_SIZE,
is_fixed_size: false,
};

Expand Down Expand Up @@ -353,6 +353,23 @@ impl FoldersTree {
res
}

fn ancestors_map<F, U>(&self, mut parent: u32, f: F) -> Vec<U>
where
F: Fn(u32, &FolderMetadata) -> U,
{
let mut res = Vec::with_capacity(10);
while parent != 0 {
match self.get(&parent) {
None => break,
Some(folder) => {
res.push(f(parent, folder));
parent = folder.parent;
}
}
}
res
}

fn list_folders(&self, parent: u32) -> Vec<FolderInfo> {
match self.0.get(&parent) {
None => Vec::new(),
Expand Down Expand Up @@ -760,10 +777,10 @@ pub mod fs {
FS_METADATA.with(|r| r.borrow().get(&id))
}

pub fn get_ancestors(start: u32) -> Vec<FolderName> {
pub fn get_ancestors(start: u32) -> Vec<String> {
FOLDERS.with(|r| {
let m = r.borrow();
m.ancestors(start)
m.ancestors_map(start, |id, _| id.to_string())
})
}

Expand Down Expand Up @@ -1084,10 +1101,10 @@ pub mod fs {
Err("empty chunk".to_string())?;
}

if chunk.len() > MAX_CHUNK_SIZE as usize {
if chunk.len() > CHUNK_SIZE as usize {
Err(format!(
"chunk size too large, max size is {} bytes",
MAX_CHUNK_SIZE
CHUNK_SIZE
))?;
}

Expand Down
2 changes: 1 addition & 1 deletion src/ic_oss_types/src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use url::Url;

use crate::{ByteN, MapValue};

pub const MAX_CHUNK_SIZE: u32 = 256 * 1024;
pub const CHUNK_SIZE: u32 = 256 * 1024;
pub const MAX_FILE_SIZE: u64 = 384 * 1024 * 1024 * 1024; // 384G
pub const MAX_FILE_SIZE_PER_CALL: u64 = 1024 * 2000; // should less than 2M

Expand Down
Loading

0 comments on commit 65e4eb6

Please sign in to comment.