@@ -253,7 +253,7 @@ def get_delegated_s3pars(self, exp=3600):
253
253
'token' : cred ['SessionToken' ], 'anon' : False }
254
254
255
255
def _open (self , path , mode = 'rb' , block_size = None , acl = '' , version_id = None ,
256
- fill_cache = None , cache_type = 'bytes' , ** kwargs ):
256
+ fill_cache = None , cache_type = 'bytes' , autocommit = True , ** kwargs ):
257
257
""" Open a file for reading or writing
258
258
259
259
Parameters
@@ -299,7 +299,8 @@ def _open(self, path, mode='rb', block_size=None, acl='', version_id=None,
299
299
300
300
return S3File (self , path , mode , block_size = block_size , acl = acl ,
301
301
version_id = version_id , fill_cache = fill_cache ,
302
- s3_additional_kwargs = kw , cache_type = cache_type )
302
+ s3_additional_kwargs = kw , cache_type = cache_type ,
303
+ autocommit = autocommit )
303
304
304
305
def _lsdir (self , path , refresh = False , max_items = None ):
305
306
if path .startswith ('s3://' ):
@@ -899,8 +900,11 @@ class S3File(AbstractBufferedFile):
899
900
def __init__ (self , s3 , path , mode = 'rb' , block_size = 5 * 2 ** 20 , acl = "" ,
900
901
version_id = None , fill_cache = True , s3_additional_kwargs = None ,
901
902
autocommit = True , cache_type = 'bytes' ):
902
- if not split_path (path )[1 ]:
903
+ bucket , key = split_path (path )
904
+ if not key :
903
905
raise ValueError ('Attempt to open non key-like path: %s' % path )
906
+ self .bucket = bucket
907
+ self .key = key
904
908
self .version_id = version_id
905
909
self .acl = acl
906
910
self .mpu = None
@@ -935,7 +939,6 @@ def _call_s3(self, method, *kwarglist, **kwargs):
935
939
** kwargs )
936
940
937
941
def _initiate_upload (self ):
938
- bucket , key = split_path (self .path )
939
942
if self .acl and self .acl not in key_acls :
940
943
raise ValueError ('ACL not in %s' , key_acls )
941
944
self .parts = []
@@ -945,7 +948,7 @@ def _initiate_upload(self):
945
948
try :
946
949
self .mpu = self ._call_s3 (
947
950
self .fs .s3 .create_multipart_upload ,
948
- Bucket = bucket , Key = key , ACL = self .acl )
951
+ Bucket = self . bucket , Key = self . key , ACL = self .acl )
949
952
except ClientError as e :
950
953
raise translate_boto_error (e )
951
954
except ParamValidationError as e :
@@ -958,8 +961,9 @@ def _initiate_upload(self):
958
961
out = self .fs ._call_s3 (
959
962
self .fs .s3 .upload_part_copy ,
960
963
self .s3_additional_kwargs ,
961
- Bucket = bucket ,
962
- Key = key , PartNumber = 1 ,
964
+ Bucket = self .bucket ,
965
+ Key = self .key ,
966
+ PartNumber = 1 ,
963
967
UploadId = self .mpu ['UploadId' ],
964
968
CopySource = self .path )
965
969
self .parts .append ({'PartNumber' : 1 ,
@@ -1003,8 +1007,7 @@ def url(self, **kwargs):
1003
1007
return self .fs .url (self .path , ** kwargs )
1004
1008
1005
1009
def _fetch_range (self , start , end ):
1006
- bucket , key = self .path .split ('/' , 1 )
1007
- return _fetch_range (self .fs .s3 , bucket , key , self .version_id , start , end )
1010
+ return _fetch_range (self .fs .s3 , self .bucket , self .key , self .version_id , start , end )
1008
1011
1009
1012
def _upload_chunk (self , final = False ):
1010
1013
bucket , key = split_path (self .path )
@@ -1050,12 +1053,11 @@ def _upload_chunk(self, final=False):
1050
1053
1051
1054
def commit (self ):
1052
1055
logger .debug ("COMMIT" )
1053
- bucket , key = self .path .split ('/' , 1 )
1054
1056
part_info = {'Parts' : self .parts }
1055
1057
write_result = self ._call_s3 (
1056
1058
self .fs .s3 .complete_multipart_upload ,
1057
- Bucket = bucket ,
1058
- Key = key ,
1059
+ Bucket = self . bucket ,
1060
+ Key = self . key ,
1059
1061
UploadId = self .mpu ['UploadId' ],
1060
1062
MultipartUpload = part_info )
1061
1063
if self .fs .version_aware :
@@ -1072,6 +1074,16 @@ def commit(self):
1072
1074
self .fs .invalidate_cache (path )
1073
1075
path = path + '/' + p
1074
1076
1077
+ def discard (self ):
1078
+ if self .autocommit :
1079
+ raise ValueError ("Cannot discard when autocommit is enabled" )
1080
+ self ._call_s3 (
1081
+ self .fs .s3 .abort_multipart_upload ,
1082
+ Bucket = self .bucket ,
1083
+ Key = self .key ,
1084
+ UploadId = self .mpu ['UploadId' ],
1085
+ )
1086
+
1075
1087
1076
1088
def _fetch_range (client , bucket , key , version_id , start , end , max_attempts = 10 ,
1077
1089
req_kw = None ):
0 commit comments