Skip to content
This repository was archived by the owner on Apr 14, 2022. It is now read-only.

Commit a2e918f

Browse files
committed
Provide list of usability features:
1. Zero config 2. require('graphql').execute()/compile() functionality 3. GraphiQL 4. Config complementation
1 parent 2b8b86a commit a2e918f

15 files changed

+1631
-22
lines changed

graphql/accessor_general.lua

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -839,7 +839,7 @@ local function process_tuple(state, tuple, opts)
839839

840840
-- convert tuple -> object
841841
local obj = opts.unflatten_tuple(collection_name, tuple,
842-
opts.default_unflatten_tuple)
842+
{ use_tomap = opts.use_tomap }, opts.default_unflatten_tuple)
843843

844844
-- skip all items before pivot (the item pointed by offset)
845845
if not state.pivot_found and pivot_filter then
@@ -987,6 +987,7 @@ local function select_internal(self, collection_name, from, filter, args, extra)
987987
fetched_object_cnt_max = self.settings.fetched_object_cnt_max,
988988
collection_name = collection_name,
989989
unflatten_tuple = self.funcs.unflatten_tuple,
990+
use_tomap = self.collection_use_tomap[collection_name] or false,
990991
default_unflatten_tuple = default_unflatten_tuple,
991992
pcre = args.pcre,
992993
resolveField = extra.resolveField,
@@ -1196,7 +1197,10 @@ end
11961197
--- Provided `funcs` argument determines certain functions for retrieving
11971198
--- tuples.
11981199
---
1199-
--- @tparam table opts `schemas`, `collections`, `service_fields` and `indexes`
1200+
--- @tparam table opts `schemas`, `collections`, `service_fields`, `indexes` and
1201+
--- `collection_use_tomap` ({[collection_name] = whether objects in collection
1202+
--- collection_name intended to be unflattened using tuple:tomap({names_only = true})
1203+
--- method instead of compiled_avro_schema.unflatten(tuple), ...})
12001204
--- to give the data accessor all needed meta-information re data; the format is
12011205
--- shown below; additional attributes `resulting_object_cnt_max` and
12021206
--- `fetched_object_cnt_max` are optional positive numbers which help to control
@@ -1301,6 +1305,7 @@ function accessor_general.new(opts, funcs)
13011305
indexes = indexes,
13021306
models = models,
13031307
default_unflatten_tuple = default_unflatten_tuple,
1308+
collection_use_tomap = opts.collection_use_tomap or {},
13041309
index_cache = index_cache,
13051310
funcs = funcs,
13061311
settings = {

graphql/accessor_shard.lua

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -84,12 +84,20 @@ end
8484
---
8585
--- @tparam string collection_name
8686
--- @tparam cdata/table tuple
87+
--- @tparam table opts
88+
--- * `use_tomap` (boolean, default: false; whether objects in collection
89+
--- collection_name intended to be unflattened using tuple:tomap({names_only = true})
90+
--- method instead of compiled_avro_schema.unflatten(tuple)
8791
--- @tparam function default unflatten action, call it in the following way:
8892
---
89-
--- ```
90-
--- return default(collection_name, tuple)
91-
--- ```
92-
local function unflatten_tuple(collection_name, tuple, default)
93+
---
94+
--- return default(collection_name, tuple)
95+
---
96+
local function unflatten_tuple(collection_name, tuple, opts, default)
97+
if opts.use_tomap then
98+
return tuple:tomap({ names_only = true })
99+
end
100+
93101
return default(collection_name, tuple)
94102
end
95103

graphql/accessor_space.lua

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,20 @@ end
2626
---
2727
--- @tparam string collection_name
2828
--- @tparam cdata/table tuple
29+
--- @tparam table opts
30+
--- @tparam table opts
31+
--- * `use_tomap` (boolean, default: false; whether objects in collection
32+
--- collection_name intended to be unflattened using tuple:tomap({names_only = true})
33+
--- method instead of compiled_avro_schema.unflatten(tuple)
2934
--- @tparam function default unflatten action, call it in the following way:
3035
---
31-
--- ```
32-
--- return default(collection_name, tuple)
33-
--- ```
34-
local function unflatten_tuple(collection_name, tuple, default)
36+
---
37+
--- return default(collection_name, tuple)
38+
---
39+
local function unflatten_tuple(collection_name, tuple, opts, default)
40+
if opts.use_tomap then
41+
return tuple:tomap({ names_only = true })
42+
end
3543
return default(collection_name, tuple)
3644
end
3745

graphql/config_complement.lua

Lines changed: 255 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,255 @@
1+
--- Config complement module provides an ability to complement user-defined config
2+
--- (in a simplified format) to a fully specified format.
3+
---
4+
--- Notes:
5+
---
6+
--- * Currently the module complements only connections (cfg.connections),
7+
--- see @{complement_connections}.
8+
9+
local json = require('json')
10+
local yaml = require('yaml')
11+
local log = require('log')
12+
local utils = require('graphql.utils')
13+
local check = utils.check
14+
local get_spaces_formats = require('graphql.simple_config').get_spaces_formats
15+
16+
local config_complement = {}
17+
18+
--- The function determines connection type by connection.parts
19+
--- and source collection space format.
20+
---
21+
--- XXX Currently there are two possible situations when connection_parts form
22+
--- unique index - all source_fields are nullable (1:1*) or all source_fields
23+
--- are non nullable (1:1). In case of partially nullable connection_parts (which
24+
--- form unique index) the error is raised. There is an alternative: relax
25+
--- this requirement and deduce non-null connection type in the case.
26+
local function determine_connection_type(connection_parts, index, source_space_format)
27+
local type
28+
29+
if #connection_parts < #(index.fields) then
30+
type = '1:N'
31+
end
32+
33+
if #connection_parts == #(index.fields) then
34+
if index.unique then
35+
type = '1:1'
36+
else
37+
type = '1:N'
38+
end
39+
end
40+
41+
local is_all_nullable = true
42+
local is_all_not_nullable = true
43+
44+
for _, connection_part in pairs(connection_parts) do
45+
for _,field_format in ipairs(source_space_format) do
46+
if connection_part.source_field == field_format.name then
47+
if field_format.is_nullable == true then
48+
is_all_not_nullable = false
49+
else
50+
is_all_nullable = false
51+
end
52+
end
53+
end
54+
end
55+
56+
if is_all_nullable == is_all_not_nullable and type == '1:1' then
57+
error('source_fields in connection_parts must be all nullable or ' ..
58+
'not nullable at the same time')
59+
end
60+
61+
if is_all_nullable and type == '1:1' then
62+
type = '1:1*'
63+
end
64+
65+
return type
66+
end
67+
68+
-- The function returns connection_parts sorted by destination_fields as
69+
-- index_fields prefix.
70+
local function sort_parts(connection_parts, index_fields)
71+
local sorted_parts = {}
72+
73+
-- check if fields in connection_parts exist in index_fields
74+
for _, part in ipairs(connection_parts) do
75+
local is_found = false
76+
for i, index_field in ipairs(index_fields) do
77+
if part.destination_field == index_field then
78+
is_found = true
79+
end
80+
end
81+
assert(is_found, ('part.destination_field %s was not found in ' ..
82+
'connection index %s'):format(part.destination_field,
83+
json.encode(index_fields)))
84+
end
85+
86+
-- sort parts and check that sorted_parts form index prefix
87+
-- (including index itself)
88+
for i = 1, utils.table_size(connection_parts) do
89+
local index_field = index_fields[i]
90+
for _, part in ipairs(connection_parts) do
91+
if part.destination_field == index_field then
92+
sorted_parts[i] = {destination_field = part.destination_field,
93+
source_field = part.source_field}
94+
break
95+
end
96+
-- no match found
97+
error(('given parts %s does not form an index or an index ' ..
98+
'prefix %s'):format(json.encode(connection_parts),
99+
json.encode(index_fields)))
100+
end
101+
end
102+
return sorted_parts
103+
end
104+
105+
local function is_order_different(connection_parts, index_fields)
106+
for i, _ in ipairs(connection_parts) do
107+
if connection_parts[i].destination_field ~= index_fields[i] then
108+
return true
109+
end
110+
end
111+
return false
112+
end
113+
114+
--- The function complements partially defined (nil/number) connection parts
115+
--- or check and sort fully defined (table) connection parts.
116+
--- @tparam table parts partially defined connection's part given by user
117+
--- @tparam table index connection index (cfg.indexes[collection][index_name])
118+
--- index.fields will be used as the source of information about the index parts order.
119+
--- An error will be raised in cases when parts is a table and cannot form a
120+
--- prefix of index.fields. When parts can be resorted to fit right order, they
121+
--- will be resorted.
122+
local function determine_connection_parts(parts, index)
123+
check(parts, 'parts', 'nil', 'number', 'table')
124+
local result_parts = {}
125+
126+
-- User defined no parts of the connection. All connection's index fields
127+
-- are taken as 'parts'
128+
if type(parts) == 'nil' then
129+
for i, v in ipairs(index.fields) do
130+
result_parts[i] = {source_field = v, destination_field = v}
131+
end
132+
end
133+
134+
-- User defined a number of fields of index which must form index prefix.
135+
-- First 'number' index fields are taken as 'parts'
136+
if type(parts) == 'number' then
137+
for i = 1, parts do
138+
local v = index.fields[i]
139+
result_parts[i] = {source_field = v, destination_field = v}
140+
end
141+
end
142+
143+
-- User defined parts as pairs of {source_field: foo_field,
144+
-- destination_field: boo_field}. These 'parts' may correspond either to full
145+
-- index or index prefix
146+
if type(parts) == 'table' then
147+
-- sorting parts is necessary to check if user defined part form an
148+
-- index or an index prefix
149+
if is_order_different(parts, index) then
150+
log.warn(('Parts \n %s \n were given in the wrong order and ' ..
151+
'sorted to match the right order of destination collection ' ..
152+
'index fields \n %s \n'):format(yaml.encode(parts),
153+
yaml.encode(index.fields)))
154+
result_parts = sort_parts(parts, index.fields)
155+
else
156+
result_parts = parts
157+
end
158+
end
159+
160+
return result_parts
161+
end
162+
163+
--- The function complements collections' connections, described in simplified
164+
--- format, to connections in a fully specified format. Type determined on index type.
165+
--- Each connection will be added to a `source_collection' collection,
166+
--- because the format of a collection assumes inclusion of all outcoming connections.
167+
--- Notice an example:
168+
---
169+
--- "connections" : [
170+
--- {
171+
--- "name": "order_connection",
172+
--- "source_collection": "user_collection",
173+
--- "destination_collection": "order_collection"
174+
--- "index_name": "user_id_index",
175+
--- "parts" : nil | number | table (destination fields can be omitted)
176+
--- in case of 'table' expected format is:
177+
--- "parts": [
178+
--- {"source_field": "user_id", "destination_field": "user_id"},
179+
--- ...
180+
--- ]
181+
--- },
182+
--- ...
183+
--- ]
184+
---
185+
--- will produce following complement in 'user_collection' :
186+
---
187+
--- "user_collection": {
188+
--- "schema_name": "user",
189+
--- "connections": [
190+
--- {
191+
--- "type": "1:N",
192+
--- "name": "order_connection",
193+
--- "destination_collection": "order_collection",
194+
--- "parts": [
195+
--- { "source_field": "user_id", "destination_field": "user_id" }
196+
--- ],
197+
--- "index_name": "user_id_index"
198+
--- },
199+
--- ]
200+
--- }
201+
---
202+
--- @tparam table collections cfg.collections (will be changed in place)
203+
--- @tparam table connections cfg.connections - user-defined collections
204+
--- @tparam table indexes cfg.indexes - {[collection_name] = collection_indexes, ...}
205+
--- @treturn table `collections` is complemented collections
206+
local function complement_connections(collections, connections, indexes, schemas)
207+
if connections == nil then
208+
return collections
209+
end
210+
211+
check(collections, 'collections', 'table')
212+
check(connections, 'connections', 'table')
213+
214+
local spaces_formats = get_spaces_formats()
215+
216+
for _, c in pairs(connections) do
217+
check(c.name, 'connection.name', 'string')
218+
check(c.source_collection, 'connection.source_collection', 'string')
219+
check(c.destination_collection, 'connection.destination_collection',
220+
'string')
221+
check(c.index_name, 'connection.index_name', 'string')
222+
check(c.parts, 'connection.parts', 'string', 'table', 'nil')
223+
224+
local index = indexes[c.source_collection][c.index_name]
225+
assert(index.unique ~= nil, 'index.unique must not be nil ' ..
226+
'during connections complementing')
227+
228+
local result_c = {}
229+
result_c.source_collection = c.source_collection
230+
result_c.destination_collection = c.destination_collection
231+
result_c.parts = determine_connection_parts(c.parts, index)
232+
233+
local source_space_format = spaces_formats[result_c.source_collection]
234+
235+
result_c.type = determine_connection_type(result_c.parts, index,
236+
source_space_format)
237+
result_c.index_name = c.index_name
238+
result_c.name = c.name
239+
240+
local collection_connections = collections[c.source_collection].
241+
connections or {}
242+
collection_connections[#collection_connections + 1] = result_c
243+
end
244+
return collections
245+
end
246+
247+
--- The function complements cfg.collection.connections using given
248+
--- cfg.connections. See @{complement_connections} for details.
249+
function config_complement.complement_cfg(cfg)
250+
cfg.collections = complement_connections(cfg.collections, cfg.connections,
251+
cfg.indexes)
252+
return cfg
253+
end
254+
255+
return config_complement

0 commit comments

Comments
 (0)