@@ -2,14 +2,17 @@ use std::collections::{HashMap, HashSet};
2
2
3
3
use async_trait:: async_trait;
4
4
use entities:: enums:: SpecificationVersions ;
5
+ use serde_json:: json;
5
6
use solana_sdk:: pubkey:: Pubkey ;
6
7
7
- use crate :: asset:: { AssetsUpdateIdx , SlotAssetIdx } ;
8
+ use crate :: asset:: { AssetCollection , AssetLeaf , AssetsUpdateIdx , SlotAssetIdx } ;
9
+ use crate :: cl_items:: { ClItem , ClLeaf } ;
8
10
use crate :: column:: TypedColumn ;
11
+ use crate :: errors:: StorageError ;
9
12
use crate :: key_encoders:: { decode_u64x2_pubkey, encode_u64x2_pubkey} ;
10
13
use crate :: storage_traits:: { AssetIndexReader , AssetSlotStorage , AssetUpdateIndexStorage } ;
11
- use crate :: { Result , Storage } ;
12
- use entities:: models:: { AssetIndex , UrlWithStatus } ;
14
+ use crate :: { AssetAuthority , AssetDynamicDetails , AssetOwner , AssetStaticDetails , Result , Storage } ;
15
+ use entities:: models:: { AssetIndex , CompleteAssetDetails , Updated , UrlWithStatus } ;
13
16
14
17
impl AssetUpdateIndexStorage for Storage {
15
18
fn last_known_asset_updated_key ( & self ) -> Result < Option < ( u64 , u64 , Pubkey ) > > {
@@ -245,3 +248,135 @@ impl AssetSlotStorage for Storage {
245
248
Ok ( None )
246
249
}
247
250
}
251
+
252
+ impl Storage {
253
+ pub async fn insert_gaped_data ( & self , data : CompleteAssetDetails ) -> Result < ( ) > {
254
+ let mut batch = rocksdb:: WriteBatch :: default ( ) ;
255
+ self . asset_static_data . merge_with_batch (
256
+ & mut batch,
257
+ data. pubkey ,
258
+ & AssetStaticDetails {
259
+ pubkey : data. pubkey ,
260
+ specification_asset_class : data. specification_asset_class ,
261
+ royalty_target_type : data. royalty_target_type ,
262
+ created_at : data. slot_created as i64 ,
263
+ } ,
264
+ ) ?;
265
+
266
+ self . asset_dynamic_data . merge_with_batch (
267
+ & mut batch,
268
+ data. pubkey ,
269
+ & AssetDynamicDetails {
270
+ pubkey : data. pubkey ,
271
+ is_compressible : data. is_compressible ,
272
+ is_compressed : data. is_compressed ,
273
+ is_frozen : data. is_frozen ,
274
+ supply : data. supply ,
275
+ seq : data. seq ,
276
+ is_burnt : data. is_burnt ,
277
+ was_decompressed : data. was_decompressed ,
278
+ onchain_data : data. onchain_data . map ( |chain_data| {
279
+ Updated :: new (
280
+ chain_data. slot_updated ,
281
+ chain_data. seq ,
282
+ json ! ( chain_data. value) . to_string ( ) ,
283
+ )
284
+ } ) ,
285
+ creators : data. creators ,
286
+ royalty_amount : data. royalty_amount ,
287
+ url : data. url ,
288
+ } ,
289
+ ) ?;
290
+
291
+ self . asset_authority_data . merge_with_batch (
292
+ & mut batch,
293
+ data. pubkey ,
294
+ & AssetAuthority {
295
+ pubkey : data. pubkey ,
296
+ authority : data. authority . value ,
297
+ slot_updated : data. authority . slot_updated ,
298
+ } ,
299
+ ) ?;
300
+
301
+ if let Some ( collection) = data. collection {
302
+ self . asset_collection_data . merge_with_batch (
303
+ & mut batch,
304
+ data. pubkey ,
305
+ & AssetCollection {
306
+ pubkey : data. pubkey ,
307
+ collection : collection. value . collection ,
308
+ is_collection_verified : collection. value . is_collection_verified ,
309
+ collection_seq : collection. value . collection_seq ,
310
+ slot_updated : collection. slot_updated ,
311
+ } ,
312
+ ) ?;
313
+ }
314
+
315
+ if let Some ( leaf) = data. asset_leaf {
316
+ self . asset_leaf_data . merge_with_batch (
317
+ & mut batch,
318
+ data. pubkey ,
319
+ & AssetLeaf {
320
+ pubkey : data. pubkey ,
321
+ tree_id : leaf. value . tree_id ,
322
+ leaf : leaf. value . leaf . clone ( ) ,
323
+ nonce : leaf. value . nonce ,
324
+ data_hash : leaf. value . data_hash ,
325
+ creator_hash : leaf. value . creator_hash ,
326
+ leaf_seq : leaf. value . leaf_seq ,
327
+ slot_updated : leaf. slot_updated ,
328
+ } ,
329
+ ) ?
330
+ }
331
+
332
+ self . asset_owner_data . merge_with_batch (
333
+ & mut batch,
334
+ data. pubkey ,
335
+ & AssetOwner {
336
+ pubkey : data. pubkey ,
337
+ owner : data. owner ,
338
+ delegate : data. delegate ,
339
+ owner_type : data. owner_type ,
340
+ owner_delegate_seq : data. owner_delegate_seq ,
341
+ } ,
342
+ ) ?;
343
+
344
+ if let Some ( leaf) = data. cl_leaf {
345
+ self . cl_leafs . put_with_batch (
346
+ & mut batch,
347
+ ( leaf. cli_leaf_idx , leaf. cli_tree_key ) ,
348
+ & ClLeaf {
349
+ cli_leaf_idx : leaf. cli_leaf_idx ,
350
+ cli_tree_key : leaf. cli_tree_key ,
351
+ cli_node_idx : leaf. cli_node_idx ,
352
+ } ,
353
+ ) ?
354
+ }
355
+ for item in data. cl_items {
356
+ self . cl_items . merge_with_batch (
357
+ & mut batch,
358
+ ( item. cli_node_idx , item. cli_tree_key ) ,
359
+ & ClItem {
360
+ cli_node_idx : item. cli_node_idx ,
361
+ cli_tree_key : item. cli_tree_key ,
362
+ cli_leaf_idx : item. cli_leaf_idx ,
363
+ cli_seq : item. cli_seq ,
364
+ cli_level : item. cli_level ,
365
+ cli_hash : item. cli_hash . clone ( ) ,
366
+ slot_updated : item. slot_updated ,
367
+ } ,
368
+ ) ?;
369
+ }
370
+ self . write_batch ( batch) . await ?;
371
+ Ok ( ( ) )
372
+ }
373
+
374
+ pub ( crate ) async fn write_batch ( & self , batch : rocksdb:: WriteBatch ) -> Result < ( ) > {
375
+ let backend = self . db . clone ( ) ;
376
+ tokio:: task:: spawn_blocking ( move || backend. write ( batch) )
377
+ . await
378
+ . map_err ( |e| StorageError :: Common ( e. to_string ( ) ) ) ?
379
+ . map_err ( |e| StorageError :: Common ( e. to_string ( ) ) ) ?;
380
+ Ok ( ( ) )
381
+ }
382
+ }
0 commit comments