Skip to content

Commit 518e49d

Browse files
committed
Introduce RowDocumentExtractor.
Transform the tabular structure into a graph of RowDocument associated with nested documents and lists. Add container license acceptance for updated container images. See #1446 See #1450 See #1445 Original pull request: #1572
1 parent d9b5488 commit 518e49d

File tree

7 files changed

+1062
-33
lines changed

7 files changed

+1062
-33
lines changed

Diff for: ci/accept-third-party-license.sh

+2
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,14 @@
44
echo "mcr.microsoft.com/mssql/server:2022-CU5-ubuntu-20.04"
55
echo "ibmcom/db2:11.5.7.0a"
66
echo "harbor-repo.vmware.com/mcr-proxy-cache/mssql/server:2019-CU16-ubuntu-20.04"
7+
echo "harbor-repo.vmware.com/mcr-proxy-cache/mssql/server:2022-CU5-ubuntu-20.04"
78
echo "harbor-repo.vmware.com/dockerhub-proxy-cache/ibmcom/db2:11.5.7.0a"
89
} > spring-data-jdbc/src/test/resources/container-license-acceptance.txt
910

1011
{
1112
echo "mcr.microsoft.com/mssql/server:2022-latest"
1213
echo "ibmcom/db2:11.5.7.0a"
1314
echo "harbor-repo.vmware.com/mcr-proxy-cache/mssql/server:2022-latest"
15+
echo "harbor-repo.vmware.com/mcr-proxy-cache/mssql/server:2022-CU5-ubuntu-20.04"
1416
echo "harbor-repo.vmware.com/dockerhub-proxy-cache/ibmcom/db2:11.5.7.0a"
1517
} > spring-data-r2dbc/src/test/resources/container-license-acceptance.txt

Diff for: spring-data-jdbc/src/main/java/org/springframework/data/jdbc/core/convert/AggregateResultSetExtractor.java

+4-2
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,8 @@ class AggregateResultSetExtractor<T> implements ResultSetExtractor<Iterable<T>>
6868
* column of the {@link ResultSet} that holds the data for that
6969
* {@link org.springframework.data.relational.core.mapping.AggregatePath}.
7070
*/
71-
AggregateResultSetExtractor(RelationalPersistentEntity<T> rootEntity,
72-
JdbcConverter converter, PathToColumnMapping pathToColumn) {
71+
AggregateResultSetExtractor(RelationalPersistentEntity<T> rootEntity, JdbcConverter converter,
72+
PathToColumnMapping pathToColumn) {
7373

7474
Assert.notNull(rootEntity, "rootEntity must not be null");
7575
Assert.notNull(converter, "converter must not be null");
@@ -126,6 +126,8 @@ private Object hydrateInstance(EntityInstantiator instantiator, ResultSetParamet
126126
return instance;
127127
}
128128

129+
130+
129131
/**
130132
* A {@link Reader} is responsible for reading a single entity or collection of entities from a set of columns
131133
*
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
/*
2+
* Copyright 2023 the original author or authors.
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* https://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
package org.springframework.data.jdbc.core.convert;
17+
18+
import java.sql.ResultSet;
19+
import java.sql.ResultSetMetaData;
20+
import java.sql.SQLException;
21+
import java.util.Iterator;
22+
import java.util.Map;
23+
24+
import org.springframework.dao.DataRetrievalFailureException;
25+
import org.springframework.data.jdbc.core.convert.RowDocumentExtractorSupport.AggregateContext;
26+
import org.springframework.data.jdbc.core.convert.RowDocumentExtractorSupport.RowDocumentSink;
27+
import org.springframework.data.jdbc.core.convert.RowDocumentExtractorSupport.TabularResultAdapter;
28+
import org.springframework.data.relational.core.mapping.AggregatePath;
29+
import org.springframework.data.relational.core.mapping.RelationalMappingContext;
30+
import org.springframework.data.relational.core.mapping.RelationalPersistentEntity;
31+
import org.springframework.data.relational.domain.RowDocument;
32+
import org.springframework.jdbc.support.JdbcUtils;
33+
import org.springframework.lang.Nullable;
34+
import org.springframework.util.LinkedCaseInsensitiveMap;
35+
36+
/**
37+
* {@link ResultSet}-driven extractor to extract {@link RowDocument documents}.
38+
*
39+
* @author Mark Paluch
40+
* @since 3.2
41+
*/
42+
class ResultSetRowDocumentExtractor {
43+
44+
private final RelationalMappingContext context;
45+
private final PathToColumnMapping propertyToColumn;
46+
47+
ResultSetRowDocumentExtractor(RelationalMappingContext context, PathToColumnMapping propertyToColumn) {
48+
this.context = context;
49+
this.propertyToColumn = propertyToColumn;
50+
}
51+
52+
/**
53+
* Adapter to extract values and column metadata from a {@link ResultSet}.
54+
*/
55+
enum ResultSetAdapter implements TabularResultAdapter<ResultSet> {
56+
INSTANCE;
57+
58+
@Override
59+
public Object getObject(ResultSet row, int index) {
60+
try {
61+
return JdbcUtils.getResultSetValue(row, index);
62+
} catch (SQLException e) {
63+
throw new DataRetrievalFailureException("Cannot retrieve column " + index + " from ResultSet", e);
64+
}
65+
}
66+
67+
@Override
68+
public Map<String, Integer> getColumnMap(ResultSet result) {
69+
70+
try {
71+
ResultSetMetaData metaData = result.getMetaData();
72+
Map<String, Integer> columns = new LinkedCaseInsensitiveMap<>(metaData.getColumnCount());
73+
74+
for (int i = 0; i < metaData.getColumnCount(); i++) {
75+
columns.put(metaData.getColumnLabel(i + 1), i + 1);
76+
}
77+
return columns;
78+
} catch (SQLException e) {
79+
throw new DataRetrievalFailureException("Cannot retrieve ColumnMap from ResultSet", e);
80+
}
81+
}
82+
}
83+
84+
/**
85+
* Reads the next {@link RowDocument} from the {@link ResultSet}. The result set can be pristine (i.e.
86+
* {@link ResultSet#isBeforeFirst()}) or pointing already at a row.
87+
*
88+
* @param entity entity defining the document structure.
89+
* @param resultSet the result set to consume.
90+
* @return a {@link RowDocument}.
91+
* @throws SQLException if thrown by the JDBC API.
92+
* @throws IllegalStateException if the {@link ResultSet#isAfterLast() fully consumed}.
93+
*/
94+
public RowDocument extractNextDocument(Class<?> entity, ResultSet resultSet) throws SQLException {
95+
return extractNextDocument(context.getRequiredPersistentEntity(entity), resultSet);
96+
}
97+
98+
/**
99+
* Reads the next {@link RowDocument} from the {@link ResultSet}. The result set can be pristine (i.e.
100+
* {@link ResultSet#isBeforeFirst()}) or pointing already at a row.
101+
*
102+
* @param entity entity defining the document structure.
103+
* @param resultSet the result set to consume.
104+
* @return a {@link RowDocument}.
105+
* @throws SQLException if thrown by the JDBC API.
106+
* @throws IllegalStateException if the {@link ResultSet#isAfterLast() fully consumed}.
107+
*/
108+
public RowDocument extractNextDocument(RelationalPersistentEntity<?> entity, ResultSet resultSet)
109+
throws SQLException {
110+
111+
Iterator<RowDocument> iterator = iterate(entity, resultSet);
112+
113+
if (!iterator.hasNext()) {
114+
throw new IllegalStateException("ResultSet is fully consumed");
115+
}
116+
117+
return iterator.next();
118+
}
119+
120+
/**
121+
* Obtain a {@link Iterator} to retrieve {@link RowDocument documents} from a {@link ResultSet}.
122+
*
123+
* @param entity the entity to determine the document structure.
124+
* @param rs the input result set.
125+
* @return an iterator to consume the {@link ResultSet} as RowDocuments.
126+
* @throws SQLException if thrown by the JDBC API.
127+
*/
128+
public Iterator<RowDocument> iterate(RelationalPersistentEntity<?> entity, ResultSet rs) throws SQLException {
129+
return new RowDocumentIterator(entity, rs);
130+
}
131+
132+
/**
133+
* Iterator implementation that advances through the {@link ResultSet} and feeds its input into a
134+
* {@link org.springframework.data.jdbc.core.convert.RowDocumentExtractorSupport.RowDocumentSink}.
135+
*/
136+
private class RowDocumentIterator implements Iterator<RowDocument> {
137+
138+
private final ResultSet resultSet;
139+
private final AggregatePath rootPath;
140+
private final RelationalPersistentEntity<?> rootEntity;
141+
private final Integer identifierIndex;
142+
private final AggregateContext<ResultSet> aggregateContext;
143+
144+
private final boolean initiallyConsumed;
145+
private boolean hasNext;
146+
147+
RowDocumentIterator(RelationalPersistentEntity<?> entity, ResultSet resultSet) throws SQLException {
148+
149+
ResultSetAdapter adapter = ResultSetAdapter.INSTANCE;
150+
151+
if (resultSet.isBeforeFirst()) {
152+
hasNext = resultSet.next();
153+
}
154+
155+
this.initiallyConsumed = resultSet.isAfterLast();
156+
this.rootPath = context.getAggregatePath(entity);
157+
this.rootEntity = entity;
158+
159+
String idColumn = propertyToColumn.column(rootPath.append(entity.getRequiredIdProperty()));
160+
Map<String, Integer> columns = adapter.getColumnMap(resultSet);
161+
this.aggregateContext = new AggregateContext<>(adapter, context, propertyToColumn, columns);
162+
163+
this.resultSet = resultSet;
164+
this.identifierIndex = columns.get(idColumn);
165+
}
166+
167+
@Override
168+
public boolean hasNext() {
169+
170+
if (initiallyConsumed) {
171+
return false;
172+
}
173+
174+
return hasNext;
175+
}
176+
177+
@Override
178+
@Nullable
179+
public RowDocument next() {
180+
181+
RowDocumentSink<ResultSet> reader = new RowDocumentSink<>(aggregateContext, rootEntity, rootPath);
182+
Object key = ResultSetAdapter.INSTANCE.getObject(resultSet, identifierIndex);
183+
184+
try {
185+
do {
186+
Object nextKey = ResultSetAdapter.INSTANCE.getObject(resultSet, identifierIndex);
187+
188+
if (nextKey != null && !nextKey.equals(key)) {
189+
break;
190+
}
191+
192+
reader.accept(resultSet);
193+
hasNext = resultSet.next();
194+
} while (hasNext);
195+
} catch (SQLException e) {
196+
throw new DataRetrievalFailureException("Cannot advance ResultSet", e);
197+
}
198+
199+
return reader.getResult();
200+
}
201+
}
202+
203+
}

0 commit comments

Comments
 (0)