@@ -1458,6 +1458,7 @@ def copy( # pylint: disable=too-many-arguments
1458
1458
boto3_session : Optional [boto3 .Session ] = None ,
1459
1459
s3_additional_kwargs : Optional [Dict [str , str ]] = None ,
1460
1460
max_rows_by_file : Optional [int ] = 10_000_000 ,
1461
+ precombine_key : Optional [str ] = None ,
1461
1462
) -> None :
1462
1463
"""Load Pandas DataFrame as a Table on Amazon Redshift using parquet files on S3 as stage.
1463
1464
@@ -1556,6 +1557,10 @@ def copy( # pylint: disable=too-many-arguments
1556
1557
Max number of rows in each file.
1557
1558
Default is None i.e. dont split the files.
1558
1559
(e.g. 33554432, 268435456)
1560
+ precombine_key : str, optional
1561
+ When there is a primary_key match during upsert, this column will change the upsert method,
1562
+ comparing the values of the specified column from source and target, and keeping the
1563
+ larger of the two. Will only work when mode = upsert.
1559
1564
1560
1565
Returns
1561
1566
-------
@@ -1623,6 +1628,7 @@ def copy( # pylint: disable=too-many-arguments
1623
1628
boto3_session = session ,
1624
1629
s3_additional_kwargs = s3_additional_kwargs ,
1625
1630
sql_copy_extra_params = sql_copy_extra_params ,
1631
+ precombine_key = precombine_key ,
1626
1632
)
1627
1633
finally :
1628
1634
if keep_files is False :
0 commit comments