1
1
using System ;
2
+ using System . Collections . Concurrent ;
2
3
using System . Collections . Generic ;
3
4
using System . Diagnostics ;
4
5
using System . Diagnostics . CodeAnalysis ;
7
8
using System . Net ;
8
9
using System . Threading . Tasks ;
9
10
using CloudinaryDotNet . Actions ;
11
+ using CloudinaryDotNet . Core ;
10
12
using Newtonsoft . Json ;
11
13
using Newtonsoft . Json . Linq ;
12
14
using NUnit . Framework ;
@@ -260,7 +262,7 @@ public void TestModerationManual()
260
262
Assert . AreEqual ( MODERATION_MANUAL , uploadResult . Moderation [ 0 ] . Kind ) ;
261
263
Assert . AreEqual ( ModerationStatus . Pending , uploadResult . Moderation [ 0 ] . Status ) ;
262
264
263
- var getResult = m_cloudinary . GetResource ( uploadResult . PublicId ) ;
265
+ var getResult = m_cloudinary . GetResourceByAssetId ( uploadResult . AssetId ) ;
264
266
265
267
Assert . NotNull ( getResult ) ;
266
268
Assert . NotNull ( getResult . Moderation , getResult . Error ? . Message ) ;
@@ -561,7 +563,7 @@ public NonSeekableStream(byte[] buffer) : base(buffer) { }
561
563
[ Test , RetryWithDelay ]
562
564
public void TestUploadLargeNonSeekableStream ( )
563
565
{
564
- byte [ ] bytes = File . ReadAllBytes ( m_testLargeImagePath ) ;
566
+ var bytes = File . ReadAllBytes ( m_testLargeImagePath ) ;
565
567
const string streamed = "stream_non_seekable" ;
566
568
567
569
using ( var memoryStream = new NonSeekableStream ( bytes ) )
@@ -593,15 +595,15 @@ public void TestUploadLargeRawFiles()
593
595
}
594
596
595
597
[ Test , RetryWithDelay ]
596
- public async Task TestUploadLargeRawFilesAsync ( )
598
+ public async Task TestUploadLargeRawFilesAsyncInParallel ( )
597
599
{
598
600
// support asynchronous uploading large raw files
599
601
var largeFilePath = m_testLargeImagePath ;
600
602
int largeFileLength = ( int ) new FileInfo ( largeFilePath ) . Length ;
601
603
602
604
var uploadParams = GetUploadLargeRawParams ( largeFilePath ) ;
603
605
604
- var result = await m_cloudinary . UploadLargeAsync ( uploadParams , TEST_CHUNK_SIZE ) ;
606
+ var result = await m_cloudinary . UploadLargeAsync < RawUploadResult > ( uploadParams , TEST_CHUNK_SIZE , 2 ) ;
605
607
606
608
AssertUploadLarge ( result , largeFileLength ) ;
607
609
}
@@ -617,6 +619,7 @@ private RawUploadParams GetUploadLargeRawParams(string path)
617
619
618
620
private void AssertUploadLarge ( RawUploadResult result , int fileLength )
619
621
{
622
+ Assert . NotNull ( result ) ;
620
623
Assert . AreEqual ( fileLength , result . Bytes , result . Error ? . Message ) ;
621
624
}
622
625
@@ -655,6 +658,172 @@ public async Task TestUploadLargeAutoFilesAsync()
655
658
656
659
Assert . AreEqual ( "image" , result . ResourceType ) ;
657
660
}
661
+
662
+ [ Test , RetryWithDelay ]
663
+ public void TestUploadChunkSingleStream ( )
664
+ {
665
+ var largeFilePath = m_testLargeImagePath ;
666
+ var largeFileLength = ( int ) new FileInfo ( largeFilePath ) . Length ;
667
+
668
+ ImageUploadResult result = null ;
669
+
670
+ using ( var currChunk = new MemoryStream ( ) )
671
+ {
672
+ var uploadParams = new ImageUploadParams ( )
673
+ {
674
+ File = new FileDescription ( $ "ImageFromChunks_{ GetTaggedRandomValue ( ) } ", currChunk ) ,
675
+ Tags = m_apiTag
676
+ } ;
677
+
678
+ var buffer = new byte [ TEST_CHUNK_SIZE ] ;
679
+
680
+ using ( var source = File . Open ( largeFilePath , FileMode . Open ) )
681
+ {
682
+ int read ;
683
+ while ( ( read = source . Read ( buffer , 0 , buffer . Length ) ) > 0 )
684
+ {
685
+ currChunk . Seek ( 0 , SeekOrigin . End ) ;
686
+ currChunk . Write ( buffer , 0 , read ) ;
687
+
688
+ // Need to specify whether the chunk is the last one in order to finish the upload.
689
+ uploadParams . File . LastChunk = read != TEST_CHUNK_SIZE ;
690
+
691
+ result = m_cloudinary . UploadChunk ( uploadParams ) ;
692
+ }
693
+ }
694
+ }
695
+
696
+ AssertUploadLarge ( result , largeFileLength ) ;
697
+ Assert . AreEqual ( "image" , result ? . ResourceType ) ;
698
+ }
699
+
700
+ [ Test , RetryWithDelay ]
701
+ public async Task TestUploadChunkMultipleStreamsCustomOffsetAsync ( )
702
+ {
703
+ var largeFilePath = m_testLargeImagePath ;
704
+ var largeFileLength = ( int ) new FileInfo ( largeFilePath ) . Length ;
705
+
706
+ ImageUploadResult result = null ;
707
+
708
+ var uploadParams = new ImageUploadParams ( )
709
+ {
710
+ // File path will be ignored, since we use streams.
711
+ File = new FileDescription ( $ "ImageFromMultipleChunks_{ GetTaggedRandomValue ( ) } ", true ) ,
712
+ Tags = m_apiTag
713
+ } ;
714
+
715
+ var buffer = new byte [ TEST_CHUNK_SIZE ] ;
716
+
717
+ using ( var source = File . Open ( largeFilePath , FileMode . Open ) )
718
+ {
719
+ int read ;
720
+ while ( ( read = source . Read ( buffer , 0 , buffer . Length ) ) > 0 )
721
+ {
722
+ var currChunk = new MemoryStream ( buffer ) ;
723
+ // Set current chunk
724
+ uploadParams . File . AddChunk ( currChunk , source . Position - read , read , read != TEST_CHUNK_SIZE ) ;
725
+
726
+ result = await m_cloudinary . UploadChunkAsync ( uploadParams ) ;
727
+ }
728
+ }
729
+
730
+ AssertUploadLarge ( result , largeFileLength ) ;
731
+ Assert . AreEqual ( "image" , result ? . ResourceType ) ;
732
+ }
733
+
734
+ [ Test , RetryWithDelay ]
735
+ public void TestUploadChunkMultipleFileParts ( )
736
+ {
737
+ var largeFilePath = m_testLargeImagePath ;
738
+ var largeFileLength = ( int ) new FileInfo ( largeFilePath ) . Length ;
739
+
740
+ ImageUploadResult result = null ;
741
+
742
+ var fileChunks = SplitFile ( largeFilePath , TEST_CHUNK_SIZE , "multiple" ) ;
743
+
744
+ var uploadParams = new ImageUploadParams ( )
745
+ {
746
+ File = new FileDescription ( $ "ImageFromFileChunks_{ GetTaggedRandomValue ( ) } ", true ) ,
747
+ Tags = m_apiTag ,
748
+ } ;
749
+ try
750
+ {
751
+ foreach ( var chunk in fileChunks )
752
+ {
753
+ // Set file path of the current chunk.
754
+ uploadParams . File . AddChunk ( chunk , fileChunks . IndexOf ( chunk ) == fileChunks . Count - 1 ) ;
755
+ // Need to specify whether the chunk is the last one in order to finish the upload.
756
+
757
+ result = m_cloudinary . UploadChunk ( uploadParams ) ;
758
+ }
759
+ }
760
+ finally
761
+ {
762
+ uploadParams . File . Dispose ( ) ;
763
+ foreach ( var chunk in fileChunks )
764
+ {
765
+ try
766
+ {
767
+ File . Delete ( chunk ) ;
768
+ }
769
+ catch ( IOException )
770
+ {
771
+ // nothing to do
772
+ }
773
+ }
774
+ }
775
+
776
+ AssertUploadLarge ( result , largeFileLength ) ;
777
+ Assert . AreEqual ( "image" , result ? . ResourceType ) ;
778
+ }
779
+
780
+ [ Test , RetryWithDelay ]
781
+ public void TestUploadChunkMultipleFilePartsInParallel ( )
782
+ {
783
+ var largeFilePath = m_testLargeImagePath ;
784
+ var largeFileLength = ( int ) new FileInfo ( largeFilePath ) . Length ;
785
+
786
+ var fileChunks = SplitFile ( largeFilePath , TEST_CHUNK_SIZE , "multiple_parallel" ) ;
787
+
788
+ var uploadParams = new RawUploadParams ( )
789
+ {
790
+ File = new FileDescription ( $ "ImageFromFileChunks_{ GetTaggedRandomValue ( ) } ", true ) ,
791
+ Tags = m_apiTag
792
+ } ;
793
+
794
+ var resultCollection = new ConcurrentBag < RawUploadResult > ( ) ;
795
+
796
+ uploadParams . File . AddChunks ( fileChunks ) ;
797
+
798
+ try
799
+ {
800
+ Parallel . For ( 0 , fileChunks . Count , new ParallelOptions { MaxDegreeOfParallelism = 2 } , chunkNum =>
801
+ {
802
+ resultCollection . Add ( m_cloudinary . UploadChunk ( uploadParams ) ) ;
803
+ } ) ;
804
+ }
805
+ finally
806
+ {
807
+ uploadParams . File . Dispose ( ) ;
808
+ foreach ( var chunk in fileChunks )
809
+ {
810
+ try
811
+ {
812
+ File . Delete ( chunk ) ;
813
+ }
814
+ catch ( IOException )
815
+ {
816
+ // nothing to do
817
+ }
818
+ }
819
+ }
820
+
821
+ var uploadResult = resultCollection . FirstOrDefault ( r => r . AssetId != null ) ;
822
+
823
+ AssertUploadLarge ( uploadResult , largeFileLength ) ;
824
+ Assert . AreEqual ( "raw" , uploadResult ? . ResourceType ) ;
825
+ }
826
+
658
827
/// <summary>
659
828
/// Test access control rules
660
829
/// </summary>
0 commit comments