@@ -626,19 +626,6 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
626
626
return NULL ;
627
627
}
628
628
629
- /* Disgusting wrapper functions */
630
- static inline unsigned long sg_kmap_atomic (struct scatterlist * sgl , int idx )
631
- {
632
- void * addr = kmap_atomic (sg_page (sgl + idx ));
633
- return (unsigned long )addr ;
634
- }
635
-
636
- static inline void sg_kunmap_atomic (unsigned long addr )
637
- {
638
- kunmap_atomic ((void * )addr );
639
- }
640
-
641
-
642
629
/* Assume the original sgl has enough room */
643
630
static unsigned int copy_from_bounce_buffer (struct scatterlist * orig_sgl ,
644
631
struct scatterlist * bounce_sgl ,
@@ -653,32 +640,38 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
653
640
unsigned long bounce_addr = 0 ;
654
641
unsigned long dest_addr = 0 ;
655
642
unsigned long flags ;
643
+ struct scatterlist * cur_dest_sgl ;
644
+ struct scatterlist * cur_src_sgl ;
656
645
657
646
local_irq_save (flags );
658
-
647
+ cur_dest_sgl = orig_sgl ;
648
+ cur_src_sgl = bounce_sgl ;
659
649
for (i = 0 ; i < orig_sgl_count ; i ++ ) {
660
- dest_addr = sg_kmap_atomic (orig_sgl ,i ) + orig_sgl [i ].offset ;
650
+ dest_addr = (unsigned long )
651
+ kmap_atomic (sg_page (cur_dest_sgl )) +
652
+ cur_dest_sgl -> offset ;
661
653
dest = dest_addr ;
662
- destlen = orig_sgl [ i ]. length ;
654
+ destlen = cur_dest_sgl -> length ;
663
655
664
656
if (bounce_addr == 0 )
665
- bounce_addr = sg_kmap_atomic (bounce_sgl ,j );
657
+ bounce_addr = (unsigned long )kmap_atomic (
658
+ sg_page (cur_src_sgl ));
666
659
667
660
while (destlen ) {
668
- src = bounce_addr + bounce_sgl [ j ]. offset ;
669
- srclen = bounce_sgl [ j ]. length - bounce_sgl [ j ]. offset ;
661
+ src = bounce_addr + cur_src_sgl -> offset ;
662
+ srclen = cur_src_sgl -> length - cur_src_sgl -> offset ;
670
663
671
664
copylen = min (srclen , destlen );
672
665
memcpy ((void * )dest , (void * )src , copylen );
673
666
674
667
total_copied += copylen ;
675
- bounce_sgl [ j ]. offset += copylen ;
668
+ cur_src_sgl -> offset += copylen ;
676
669
destlen -= copylen ;
677
670
dest += copylen ;
678
671
679
- if (bounce_sgl [ j ]. offset == bounce_sgl [ j ]. length ) {
672
+ if (cur_src_sgl -> offset == cur_src_sgl -> length ) {
680
673
/* full */
681
- sg_kunmap_atomic ( bounce_addr );
674
+ kunmap_atomic (( void * ) bounce_addr );
682
675
j ++ ;
683
676
684
677
/*
@@ -692,21 +685,27 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
692
685
/*
693
686
* We are done; cleanup and return.
694
687
*/
695
- sg_kunmap_atomic (dest_addr - orig_sgl [i ].offset );
688
+ kunmap_atomic ((void * )(dest_addr -
689
+ cur_dest_sgl -> offset ));
696
690
local_irq_restore (flags );
697
691
return total_copied ;
698
692
}
699
693
700
694
/* if we need to use another bounce buffer */
701
- if (destlen || i != orig_sgl_count - 1 )
702
- bounce_addr = sg_kmap_atomic (bounce_sgl ,j );
695
+ if (destlen || i != orig_sgl_count - 1 ) {
696
+ cur_src_sgl = sg_next (cur_src_sgl );
697
+ bounce_addr = (unsigned long )
698
+ kmap_atomic (
699
+ sg_page (cur_src_sgl ));
700
+ }
703
701
} else if (destlen == 0 && i == orig_sgl_count - 1 ) {
704
702
/* unmap the last bounce that is < PAGE_SIZE */
705
- sg_kunmap_atomic ( bounce_addr );
703
+ kunmap_atomic (( void * ) bounce_addr );
706
704
}
707
705
}
708
706
709
- sg_kunmap_atomic (dest_addr - orig_sgl [i ].offset );
707
+ kunmap_atomic ((void * )(dest_addr - cur_dest_sgl -> offset ));
708
+ cur_dest_sgl = sg_next (cur_dest_sgl );
710
709
}
711
710
712
711
local_irq_restore (flags );
@@ -727,48 +726,61 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
727
726
unsigned long bounce_addr = 0 ;
728
727
unsigned long src_addr = 0 ;
729
728
unsigned long flags ;
729
+ struct scatterlist * cur_src_sgl ;
730
+ struct scatterlist * cur_dest_sgl ;
730
731
731
732
local_irq_save (flags );
732
733
734
+ cur_src_sgl = orig_sgl ;
735
+ cur_dest_sgl = bounce_sgl ;
736
+
733
737
for (i = 0 ; i < orig_sgl_count ; i ++ ) {
734
- src_addr = sg_kmap_atomic (orig_sgl ,i ) + orig_sgl [i ].offset ;
738
+ src_addr = (unsigned long )
739
+ kmap_atomic (sg_page (cur_src_sgl )) +
740
+ cur_src_sgl -> offset ;
735
741
src = src_addr ;
736
- srclen = orig_sgl [ i ]. length ;
742
+ srclen = cur_src_sgl -> length ;
737
743
738
744
if (bounce_addr == 0 )
739
- bounce_addr = sg_kmap_atomic (bounce_sgl ,j );
745
+ bounce_addr = (unsigned long )
746
+ kmap_atomic (sg_page (cur_dest_sgl ));
740
747
741
748
while (srclen ) {
742
749
/* assume bounce offset always == 0 */
743
- dest = bounce_addr + bounce_sgl [ j ]. length ;
744
- destlen = PAGE_SIZE - bounce_sgl [ j ]. length ;
750
+ dest = bounce_addr + cur_dest_sgl -> length ;
751
+ destlen = PAGE_SIZE - cur_dest_sgl -> length ;
745
752
746
753
copylen = min (srclen , destlen );
747
754
memcpy ((void * )dest , (void * )src , copylen );
748
755
749
756
total_copied += copylen ;
750
- bounce_sgl [ j ]. length += copylen ;
757
+ cur_dest_sgl -> length += copylen ;
751
758
srclen -= copylen ;
752
759
src += copylen ;
753
760
754
- if (bounce_sgl [ j ]. length == PAGE_SIZE ) {
761
+ if (cur_dest_sgl -> length == PAGE_SIZE ) {
755
762
/* full..move to next entry */
756
- sg_kunmap_atomic ( bounce_addr );
763
+ kunmap_atomic (( void * ) bounce_addr );
757
764
bounce_addr = 0 ;
758
765
j ++ ;
759
766
}
760
767
761
768
/* if we need to use another bounce buffer */
762
- if (srclen && bounce_addr == 0 )
763
- bounce_addr = sg_kmap_atomic (bounce_sgl , j );
769
+ if (srclen && bounce_addr == 0 ) {
770
+ cur_dest_sgl = sg_next (cur_dest_sgl );
771
+ bounce_addr = (unsigned long )
772
+ kmap_atomic (
773
+ sg_page (cur_dest_sgl ));
774
+ }
764
775
765
776
}
766
777
767
- sg_kunmap_atomic (src_addr - orig_sgl [i ].offset );
778
+ kunmap_atomic ((void * )(src_addr - cur_src_sgl -> offset ));
779
+ cur_src_sgl = sg_next (cur_src_sgl );
768
780
}
769
781
770
782
if (bounce_addr )
771
- sg_kunmap_atomic ( bounce_addr );
783
+ kunmap_atomic (( void * ) bounce_addr );
772
784
773
785
local_irq_restore (flags );
774
786
@@ -1536,6 +1548,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1536
1548
struct scatterlist * sgl ;
1537
1549
unsigned int sg_count = 0 ;
1538
1550
struct vmscsi_request * vm_srb ;
1551
+ struct scatterlist * cur_sgl ;
1539
1552
1540
1553
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR ) {
1541
1554
/*
@@ -1617,10 +1630,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1617
1630
}
1618
1631
1619
1632
cmd_request -> data_buffer .offset = sgl [0 ].offset ;
1620
-
1621
- for (i = 0 ; i < sg_count ; i ++ )
1633
+ cur_sgl = sgl ;
1634
+ for (i = 0 ; i < sg_count ; i ++ ) {
1622
1635
cmd_request -> data_buffer .pfn_array [i ] =
1623
- page_to_pfn (sg_page ((& sgl [i ])));
1636
+ page_to_pfn (sg_page ((cur_sgl )));
1637
+ cur_sgl = sg_next (cur_sgl );
1638
+ }
1624
1639
1625
1640
} else if (scsi_sglist (scmnd )) {
1626
1641
cmd_request -> data_buffer .offset =
0 commit comments