@@ -131,6 +131,13 @@ public class ComputeStatsStmt extends StatementBase {
131
131
private static final String STATS_FETCH_NUM_PARTITIONS_WITH_STATS =
132
132
STATS_FETCH_PREFIX + ".NumPartitionsWithStats" ;
133
133
134
+ // The maximum number of partitions that may be explicitly selected by filter
135
+ // predicates. Any query that selects more than this automatically drops back to a full
136
+ // incremental stats recomputation.
137
+ // TODO: We can probably do better than this, e.g. running several queries, each of
138
+ // which selects up to MAX_INCREMENTAL_PARTITIONS partitions.
139
+ private static final int MAX_INCREMENTAL_PARTITIONS = 1000 ;
140
+
134
141
protected final TableName tableName_ ;
135
142
protected final TableSampleClause sampleParams_ ;
136
143
@@ -151,14 +158,14 @@ public class ComputeStatsStmt extends StatementBase {
151
158
protected String columnStatsQueryStr_ ;
152
159
153
160
// If true, stats will be gathered incrementally per-partition.
154
- private boolean isIncremental_ = false ;
161
+ private boolean isIncremental_ ;
155
162
156
163
// If true, expect the compute stats process to produce output for all partitions in the
157
164
// target table. In that case, 'expectedPartitions_' will be empty. The point of this
158
165
// flag is to optimize the case where all partitions are targeted.
159
166
// False for unpartitioned HDFS tables, non-HDFS tables or when stats extrapolation
160
167
// is enabled.
161
- private boolean expectAllPartitions_ = false ;
168
+ private boolean expectAllPartitions_ ;
162
169
163
170
// The list of valid partition statistics that can be used in an incremental computation
164
171
// without themselves being recomputed. Populated in analyze().
@@ -173,23 +180,16 @@ public class ComputeStatsStmt extends StatementBase {
173
180
174
181
// If non-null, partitions that an incremental computation might apply to. Must be
175
182
// null if this is a non-incremental computation.
176
- private PartitionSet partitionSet_ = null ;
183
+ private PartitionSet partitionSet_ ;
177
184
178
185
// If non-null, represents the user-specified list of columns for computing statistics.
179
186
// Not supported for incremental statistics.
180
- private List <String > columnWhitelist_ = null ;
187
+ private List <String > columnWhitelist_ ;
181
188
182
189
// The set of columns to be analyzed. Each column is valid: it must exist in the table
183
190
// schema, it must be of a type that can be analyzed, and cannot refer to a partitioning
184
191
// column for HDFS tables. If the set is null, no columns are restricted.
185
- private Set <Column > validatedColumnWhitelist_ = null ;
186
-
187
- // The maximum number of partitions that may be explicitly selected by filter
188
- // predicates. Any query that selects more than this automatically drops back to a full
189
- // incremental stats recomputation.
190
- // TODO: We can probably do better than this, e.g. running several queries, each of
191
- // which selects up to MAX_INCREMENTAL_PARTITIONS partitions.
192
- private static final int MAX_INCREMENTAL_PARTITIONS = 1000 ;
192
+ private Set <Column > validatedColumnWhitelist_ ;
193
193
194
194
/**
195
195
* Should only be constructed via static creation functions.
0 commit comments