From 13c5c7b952f92b6001992f3eca303a7037425f3b Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Tue, 6 Mar 2018 04:56:17 +0300 Subject: [PATCH] Nullable 1:1 connections Fixes #44. --- graphql/tarantool_graphql.lua | 86 +++- test/local/space_nullable_1_1_conn.result | 180 ++++++++ test/local/space_nullable_1_1_conn.test.lua | 56 +++ .../shard_nullable_1_1_conn.result | 318 ++++++++++++++ .../shard_nullable_1_1_conn.test.lua | 76 ++++ .../shard_redundancy/nullable_1_1_conn.result | 353 +++++++++++++++ .../nullable_1_1_conn.test.lua | 93 ++++ test/testdata/nullable_1_1_conn_testdata.lua | 406 ++++++++++++++++++ 8 files changed, 1554 insertions(+), 14 deletions(-) create mode 100644 test/local/space_nullable_1_1_conn.result create mode 100755 test/local/space_nullable_1_1_conn.test.lua create mode 100644 test/shard_no_redundancy/shard_nullable_1_1_conn.result create mode 100644 test/shard_no_redundancy/shard_nullable_1_1_conn.test.lua create mode 100644 test/shard_redundancy/nullable_1_1_conn.result create mode 100644 test/shard_redundancy/nullable_1_1_conn.test.lua create mode 100644 test/testdata/nullable_1_1_conn_testdata.lua diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index d1e8eb5..a063c2b 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -230,7 +230,7 @@ end --- Convert each field of an avro-schema to a graphql type. --- --- @tparam table state for read state.accessor and previously filled ---- state.types +--- state.nullable_collection_types --- @tparam table fields fields part from an avro-schema --- --- @treturn table `res` -- map with type names as keys and graphql types as @@ -253,7 +253,7 @@ end --- The function converts passed avro-schema to a GraphQL type. --- --- @tparam table state for read state.accessor and previously filled ---- state.types (state.types are gql types) +--- state.nullable_collection_types (those are gql types) --- @tparam table avro_schema input avro-schema --- @tparam[opt] table collection table with schema_name, connections fields --- described a collection (e.g. tarantool's spaces) @@ -307,8 +307,8 @@ gql_type = function(state, avro_schema, collection, collection_name) for _, c in ipairs((collection or {}).connections or {}) do assert(type(c.type) == 'string', 'connection.type must be a string, got ' .. type(c.type)) - assert(c.type == '1:1' or c.type == '1:N', - 'connection.type must be 1:1 or 1:N, got ' .. c.type) + assert(c.type == '1:1' or c.type == '1:1*' or c.type == '1:N', + 'connection.type must be 1:1, 1:1* or 1:N, got ' .. c.type) assert(type(c.name) == 'string', 'connection.name must be a string, got ' .. type(c.name)) assert(type(c.destination_collection) == 'string', @@ -319,16 +319,20 @@ gql_type = function(state, avro_schema, collection, collection_name) -- gql type of connection field local destination_type = - state.types[c.destination_collection] + state.nullable_collection_types[c.destination_collection] assert(destination_type ~= nil, ('destination_type (named %s) must not be nil'):format( c.destination_collection)) local c_args if c.type == '1:1' then + destination_type = types.nonNull(destination_type) + c_args = state.object_arguments[c.destination_collection] + elseif c.type == '1:1*' then c_args = state.object_arguments[c.destination_collection] elseif c.type == '1:N' then - destination_type = types.nonNull(types.list(destination_type)) + destination_type = types.nonNull(types.list(types.nonNull( + destination_type))) c_args = state.all_arguments[c.destination_collection] else error('unknown connection type: ' .. tostring(c.type)) @@ -343,6 +347,8 @@ gql_type = function(state, avro_schema, collection, collection_name) resolve = function(parent, args_instance, info) local destination_args_names = {} local destination_args_values = {} + local are_all_parts_non_null = true + local are_all_parts_null = true for _, part in ipairs(c.parts) do assert(type(part.source_field) == 'string', @@ -354,8 +360,45 @@ gql_type = function(state, avro_schema, collection, collection_name) destination_args_names[#destination_args_names + 1] = part.destination_field + + local value = parent[part.source_field] destination_args_values[#destination_args_values + 1] = - parent[part.source_field] + value + + if value ~= nil then -- nil or box.NULL + are_all_parts_null = false + else + are_all_parts_non_null = false + end + end + + -- Check FULL match constraint before request of + -- destination object(s). Note that connection key parts + -- can be prefix of index key parts. Zero parts count + -- considered as ok by this check. + local ok = are_all_parts_null or are_all_parts_non_null + if not ok then -- avoid extra json.encode() + assert(ok, + 'FULL MATCH constraint was failed: connection ' .. + 'key parts must be all non-nulls or all nulls; ' .. + 'object: ' .. json.encode(parent)) + end + + -- Avoid non-needed index lookup on a destination + -- collection when all connection parts are null: + -- * return null for 1:1* connection; + -- * return {} for 1:N connection (except the case when + -- source collection is the Query pseudo-collection). + if collection_name ~= 'Query' and are_all_parts_null then + if c.type ~= '1:1*' and c.type ~= '1:N' then + -- `if` is to avoid extra json.encode + assert(c.type == '1:1*' or c.type == '1:N', + ('only 1:1* or 1:N connections can have ' .. + 'all key parts null; parent is %s from ' .. + 'collection "%s"'):format(json.encode(parent), + tostring(collection_name))) + end + return c.type == '1:N' and {} or nil end local from = { @@ -386,7 +429,10 @@ gql_type = function(state, avro_schema, collection, collection_name) assert(type(objs) == 'table', 'objs list received from an accessor ' .. 'must be a table, got ' .. type(objs)) - if c.type == '1:1' then + if c.type == '1:1' or c.type == '1:1*' then + -- we expect here exactly one object even for 1:1* + -- connections because we processed all-parts-are-null + -- situation above assert(#objs == 1, 'expect one matching object, got ' .. tostring(#objs)) @@ -405,7 +451,7 @@ gql_type = function(state, avro_schema, collection, collection_name) avro_schema.name, fields = fields, }) - return avro_t == 'enum' and types.nonNull(res) or res + return avro_t == 'record' and types.nonNull(res) or res elseif avro_t == 'enum' then error('enums not implemented yet') -- XXX elseif avro_t == 'array' or avro_t == 'array*' then @@ -476,15 +522,21 @@ local function create_root_collection(state) -- `gql_type` is designed to create GQL type corresponding to a real schema -- and connections. However it also works with the fake schema. + -- Query type must be the Object, so it cannot be nonNull. local root_type = gql_type(state, root_schema, root_collection, "Query") state.schema = schema.create({ - query = root_type + query = nullable(root_type), }) end local function parse_cfg(cfg) local state = {} - state.types = utils.gen_booking_table({}) + + -- collection type is always record, so always non-null; we can lazily + -- evaluate non-null type from nullable type, but not vice versa, so we + -- collect nullable types here and evaluate non-null ones where needed + state.nullable_collection_types = utils.gen_booking_table({}) + state.object_arguments = utils.gen_booking_table({}) state.list_arguments = utils.gen_booking_table({}) state.all_arguments = utils.gen_booking_table({}) @@ -523,8 +575,15 @@ local function parse_cfg(cfg) assert(schema.type == 'record', 'top-level schema must have record avro type, got ' .. tostring(schema.type)) - state.types[collection_name] = gql_type(state, schema, collection, - collection_name) + local collection_type = + gql_type(state, schema, collection, collection_name) + -- we utilize the fact that collection type is always non-null and + -- don't store this information; see comment above for + -- `nullable_collection_types` variable definition + assert(collection_type.__type == 'NonNull', + 'collection must always has non-null type') + state.nullable_collection_types[collection_name] = + nullable(collection_type) -- prepare arguments' types local object_args = convert_record_fields_to_args(schema.fields, @@ -536,7 +595,6 @@ local function parse_cfg(cfg) state.object_arguments[collection_name] = object_args state.list_arguments[collection_name] = list_args state.all_arguments[collection_name] = args - end -- create fake root `Query` collection create_root_collection(state) diff --git a/test/local/space_nullable_1_1_conn.result b/test/local/space_nullable_1_1_conn.result new file mode 100644 index 0000000..29dbcc5 --- /dev/null +++ b/test/local/space_nullable_1_1_conn.result @@ -0,0 +1,180 @@ + + + +---------------------+ + | a-+ h x y | + | |\ \ |\ | + | b c d k l | + | | |\ \ | + | e f g m | + +---------------------+ +RUN downside_a {{{ +QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } +VARIABLES +--- +body: a +... + +RESULT +--- +email: +- successors: + - successors: &0 [] + body: c + - successors: + - successors: *0 + body: g + - successors: *0 + body: f + body: d + - successors: + - successors: *0 + body: e + body: b + body: a +... + +}}} + +RUN downside_h {{{ +QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } +VARIABLES +--- +body: h +... + +RESULT +--- +email: +- successors: + - successors: + - successors: &0 [] + body: m + body: l + - successors: *0 + body: k + body: h +... + +}}} + +RUN upside {{{ +QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } +VARIABLES +--- +body: f +... + +RESULT +--- +email: +- body: f + in_reply_to: + body: d + in_reply_to: + body: a +... + +}}} + +RUN upside_x {{{ +QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } +VARIABLES +--- +body: x +... + +RESULT +--- +ok: false +err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"062b56b1885c71c51153ccb880ac7315","body":"x","in_reply_to_domain":"graphql.tarantool.org","in_reply_to_localpart":null}' +... + +}}} + +RUN upside_y {{{ +QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } +VARIABLES +--- +body: y +... + +RESULT +--- +ok: false +err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"1f70391f6ba858129413bd801b12acbf","body":"y","in_reply_to_domain":null,"in_reply_to_localpart":"1f70391f6ba858129413bd801b12acbf"}' +... + +}}} + diff --git a/test/local/space_nullable_1_1_conn.test.lua b/test/local/space_nullable_1_1_conn.test.lua new file mode 100755 index 0000000..7b311f3 --- /dev/null +++ b/test/local/space_nullable_1_1_conn.test.lua @@ -0,0 +1,56 @@ +#!/usr/bin/env tarantool + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path + +local graphql = require('graphql') +local testdata = require('test.testdata.nullable_1_1_conn_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- init box and data schema +box.cfg{background = false} +testdata.init_spaces() + +-- upload test data +testdata.fill_test_data() + +-- acquire metadata +local metadata = testdata.get_test_metadata() +local schemas = metadata.schemas +local collections = metadata.collections +local service_fields = metadata.service_fields +local indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + +local accessor = graphql.accessor_space.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}) + +local gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}) + +-- run queries +-- ----------- + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +testdata.drop_spaces() + +os.exit() diff --git a/test/shard_no_redundancy/shard_nullable_1_1_conn.result b/test/shard_no_redundancy/shard_nullable_1_1_conn.result new file mode 100644 index 0000000..56b46ff --- /dev/null +++ b/test/shard_no_redundancy/shard_nullable_1_1_conn.result @@ -0,0 +1,318 @@ +env = require('test_run') +--- +... +test_run = env.new() +--- +... +shard = require('shard') +--- +... +test_run:cmd("setopt delimiter ';'") +--- +- true +... +SERVERS = {'shard1', 'shard2'}; +--- +... +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + }, + login = 'guest', + password = '', + redundancy = 1, +}, 'shard_no_redundancy'); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +fio = require('fio') +--- +... +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +--- +... +graphql = require('graphql') +--- +... +testdata = require('test.testdata.nullable_1_1_conn_testdata') +--- +... +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- +-- init box and data schema +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +-- upload test data +testdata.fill_test_data(shard) +--- +- |2 + + + +---------------------+ + | a-+ h x y | + | |\ \ |\ | + | b c d k l | + | | |\ \ | + | e f g m | + +---------------------+ +... +-- acquire metadata +metadata = testdata.get_test_metadata() +--- +... +schemas = metadata.schemas +--- +... +collections = metadata.collections +--- +... +service_fields = metadata.service_fields +--- +... +indexes = metadata.indexes +--- +... +-- build accessor and graphql schemas +-- ---------------------------------- +test_run:cmd("setopt delimiter ';'") +--- +- true +... +accessor = graphql.accessor_shard.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}); +--- +... +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +testdata.run_queries(gql_wrapper) +--- +- |+ + RUN downside_a {{{ + QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + VARIABLES + --- + body: a + ... + + RESULT + --- + email: + - successors: + - successors: &0 [] + body: c + - successors: + - successors: *0 + body: g + - successors: *0 + body: f + body: d + - successors: + - successors: *0 + body: e + body: b + body: a + ... + + }}} + + RUN downside_h {{{ + QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + VARIABLES + --- + body: h + ... + + RESULT + --- + email: + - successors: + - successors: + - successors: &0 [] + body: m + body: l + - successors: *0 + body: k + body: h + ... + + }}} + + RUN upside {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: f + ... + + RESULT + --- + email: + - body: f + in_reply_to: + body: d + in_reply_to: + body: a + ... + + }}} + + RUN upside_x {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: x + ... + + RESULT + --- + ok: false + err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"062b56b1885c71c51153ccb880ac7315","body":"x","in_reply_to_domain":"graphql.tarantool.org","in_reply_to_localpart":null}' + ... + + }}} + + RUN upside_y {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: y + ... + + RESULT + --- + ok: false + err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"1f70391f6ba858129413bd801b12acbf","body":"y","in_reply_to_domain":null,"in_reply_to_localpart":"1f70391f6ba858129413bd801b12acbf"}' + ... + + }}} + +... +-- clean up +-- -------- +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +test_run:drop_cluster(SERVERS) +--- +... diff --git a/test/shard_no_redundancy/shard_nullable_1_1_conn.test.lua b/test/shard_no_redundancy/shard_nullable_1_1_conn.test.lua new file mode 100644 index 0000000..e3584d1 --- /dev/null +++ b/test/shard_no_redundancy/shard_nullable_1_1_conn.test.lua @@ -0,0 +1,76 @@ +env = require('test_run') +test_run = env.new() + +shard = require('shard') + +test_run:cmd("setopt delimiter ';'") +SERVERS = {'shard1', 'shard2'}; +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + }, + login = 'guest', + password = '', + redundancy = 1, +}, 'shard_no_redundancy'); +test_run:cmd("setopt delimiter ''"); + +fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +graphql = require('graphql') +testdata = require('test.testdata.nullable_1_1_conn_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- init box and data schema +test_run:cmd('switch shard1') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch shard2') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch default') + +-- upload test data +testdata.fill_test_data(shard) + +-- acquire metadata +metadata = testdata.get_test_metadata() +schemas = metadata.schemas +collections = metadata.collections +service_fields = metadata.service_fields +indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + +test_run:cmd("setopt delimiter ';'") +accessor = graphql.accessor_shard.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}); + +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}); +test_run:cmd("setopt delimiter ''"); + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +test_run:cmd('switch shard1') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch shard2') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch default') + +test_run:drop_cluster(SERVERS) diff --git a/test/shard_redundancy/nullable_1_1_conn.result b/test/shard_redundancy/nullable_1_1_conn.result new file mode 100644 index 0000000..c57f3d8 --- /dev/null +++ b/test/shard_redundancy/nullable_1_1_conn.result @@ -0,0 +1,353 @@ +-- ---------------------------------------------------------- +-- Motivation: https://github.com/tarantool/graphql/issues/43 +-- ---------------------------------------------------------- +env = require('test_run') +--- +... +test_run = env.new() +--- +... +shard = require('shard') +--- +... +-- we need at least four servers to make sure we have several (two) servers +-- within each replica set and several (two) replica sets +test_run:cmd("setopt delimiter ';'") +--- +- true +... +SERVERS = {'shard1', 'shard2', 'shard3', 'shard4'}; +--- +... +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + { uri = instance_uri('3'), zone = '2' }, + { uri = instance_uri('4'), zone = '3' }, + }, + login = 'guest', + password = '', + redundancy = 2, +}, 'shard_redundancy'); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +fio = require('fio') +--- +... +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +--- +... +graphql = require('graphql') +--- +... +testdata = require('test.testdata.nullable_1_1_conn_testdata') +--- +... +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- +-- init box and data schema +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch shard3') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch shard4') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +-- upload test data +testdata.fill_test_data(shard) +--- +- |2 + + + +---------------------+ + | a-+ h x y | + | |\ \ |\ | + | b c d k l | + | | |\ \ | + | e f g m | + +---------------------+ +... +-- acquire metadata +metadata = testdata.get_test_metadata() +--- +... +schemas = metadata.schemas +--- +... +collections = metadata.collections +--- +... +service_fields = metadata.service_fields +--- +... +indexes = metadata.indexes +--- +... +-- build accessor and graphql schemas +-- ---------------------------------- +test_run:cmd("setopt delimiter ';'") +--- +- true +... +accessor = graphql.accessor_shard.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}); +--- +... +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +testdata.run_queries(gql_wrapper) +--- +- |+ + RUN downside_a {{{ + QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + VARIABLES + --- + body: a + ... + + RESULT + --- + email: + - successors: + - successors: &0 [] + body: c + - successors: + - successors: *0 + body: g + - successors: *0 + body: f + body: d + - successors: + - successors: *0 + body: e + body: b + body: a + ... + + }}} + + RUN downside_h {{{ + QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + VARIABLES + --- + body: h + ... + + RESULT + --- + email: + - successors: + - successors: + - successors: &0 [] + body: m + body: l + - successors: *0 + body: k + body: h + ... + + }}} + + RUN upside {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: f + ... + + RESULT + --- + email: + - body: f + in_reply_to: + body: d + in_reply_to: + body: a + ... + + }}} + + RUN upside_x {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: x + ... + + RESULT + --- + ok: false + err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"062b56b1885c71c51153ccb880ac7315","body":"x","in_reply_to_domain":"graphql.tarantool.org","in_reply_to_localpart":null}' + ... + + }}} + + RUN upside_y {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: y + ... + + RESULT + --- + ok: false + err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"1f70391f6ba858129413bd801b12acbf","body":"y","in_reply_to_domain":null,"in_reply_to_localpart":"1f70391f6ba858129413bd801b12acbf"}' + ... + + }}} + +... +-- clean up +-- -------- +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard3') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard4') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +test_run:drop_cluster(SERVERS) +--- +... diff --git a/test/shard_redundancy/nullable_1_1_conn.test.lua b/test/shard_redundancy/nullable_1_1_conn.test.lua new file mode 100644 index 0000000..a4d8976 --- /dev/null +++ b/test/shard_redundancy/nullable_1_1_conn.test.lua @@ -0,0 +1,93 @@ +-- ---------------------------------------------------------- +-- Motivation: https://github.com/tarantool/graphql/issues/43 +-- ---------------------------------------------------------- + +env = require('test_run') +test_run = env.new() + +shard = require('shard') + +-- we need at least four servers to make sure we have several (two) servers +-- within each replica set and several (two) replica sets + +test_run:cmd("setopt delimiter ';'") +SERVERS = {'shard1', 'shard2', 'shard3', 'shard4'}; +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + { uri = instance_uri('3'), zone = '2' }, + { uri = instance_uri('4'), zone = '3' }, + }, + login = 'guest', + password = '', + redundancy = 2, +}, 'shard_redundancy'); +test_run:cmd("setopt delimiter ''"); + +fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +graphql = require('graphql') +testdata = require('test.testdata.nullable_1_1_conn_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- init box and data schema +test_run:cmd('switch shard1') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch shard2') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch shard3') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch shard4') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch default') + +-- upload test data +testdata.fill_test_data(shard) + +-- acquire metadata +metadata = testdata.get_test_metadata() +schemas = metadata.schemas +collections = metadata.collections +service_fields = metadata.service_fields +indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + +test_run:cmd("setopt delimiter ';'") +accessor = graphql.accessor_shard.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}); + +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}); +test_run:cmd("setopt delimiter ''"); + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +test_run:cmd('switch shard1') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch shard2') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch shard3') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch shard4') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch default') + +test_run:drop_cluster(SERVERS) diff --git a/test/testdata/nullable_1_1_conn_testdata.lua b/test/testdata/nullable_1_1_conn_testdata.lua new file mode 100644 index 0000000..8ec2932 --- /dev/null +++ b/test/testdata/nullable_1_1_conn_testdata.lua @@ -0,0 +1,406 @@ +-- The example was inspired by [1]. Consider [2] for the problem description +-- and [3] for the track of related task. +-- +-- [1]: https://www.jwz.org/doc/mid.html +-- [2]: https://github.com/tarantool/graphql/issues/43 +-- [3]: https://github.com/tarantool/graphql/issues/44 + +local json = require('json') +local yaml = require('yaml') +local utils = require('graphql.utils') + +local nullable_1_1_conn_testdata = {} + +local PRNG_SEED = 42 +local DOMAIN = 'graphql.tarantool.org' + +-- return an error w/o file name and line number +local function strip_error(err) + return tostring(err):gsub('^.-:.-: (.*)$', '%1') +end + +local function print_and_return(...) + print(...) + return table.concat({...}, ' ') .. '\n' +end + +local function format_result(name, query, variables, result) + return ('RUN %s {{{\nQUERY\n%s\nVARIABLES\n%s\nRESULT\n%s\n}}}\n'):format( + name, query:rstrip(), yaml.encode(variables), yaml.encode(result)) +end + +function nullable_1_1_conn_testdata.get_test_metadata() + local schemas = json.decode([[{ + "email": { + "type": "record", + "name": "email", + "fields": [ + { "name": "localpart", "type": "string" }, + { "name": "domain", "type": "string" }, + { "name": "in_reply_to_localpart", "type": "string*" }, + { "name": "in_reply_to_domain", "type": "string*" }, + { "name": "body", "type": "string" } + ] + } + }]]) + + local collections = json.decode([[{ + "email": { + "schema_name": "email", + "connections": [ + { + "type": "1:N", + "name": "successors", + "destination_collection": "email", + "parts": [ + { + "source_field": "localpart", + "destination_field": "in_reply_to_localpart" + }, + { + "source_field": "domain", + "destination_field": "in_reply_to_domain" + } + ], + "index_name": "in_reply_to" + }, + { + "type": "1:1*", + "name": "in_reply_to", + "destination_collection": "email", + "parts": [ + { + "source_field": "in_reply_to_localpart", + "destination_field": "localpart" + }, + { + "source_field": "in_reply_to_domain", + "destination_field": "domain" + } + ], + "index_name": "message_id" + } + ] + } + }]]) + + local service_fields = { + email = {}, + } + + local indexes = { + email = { + message_id = { + service_fields = {}, + fields = {'localpart', 'domain'}, + index_type = 'tree', + unique = true, + primary = true, + }, + in_reply_to = { + service_fields = {}, + fields = {'in_reply_to_localpart', 'in_reply_to_domain'}, + index_type = 'tree', + unique = false, + primary = false, + }, + }, + } + + return { + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, + } +end + +function nullable_1_1_conn_testdata.init_spaces() + -- email fields + local LOCALPART_FN = 1 + local DOMAIN_FN = 2 + local IN_REPLY_TO_LOCALPART_BRANCH_FN = 3 -- luacheck: ignore + local IN_REPLY_TO_LOCALPART_FN = 4 + local IN_REPLY_TO_DOMAIN_BRANCH_FN = 5 -- luacheck: ignore + local IN_REPLY_TO_DOMAIN_FN = 6 + local BODY_FN = 7 -- luacheck: ignore + + box.once('test_space_init_spaces', function() + box.schema.create_space('email') + box.space.email:create_index('message_id', + {type = 'tree', unique = true, parts = { + {LOCALPART_FN, 'string'}, + {DOMAIN_FN, 'string'}, + }} + ) + box.space.email:create_index('in_reply_to', + {type = 'tree', unique = false, parts = { + {IN_REPLY_TO_LOCALPART_FN, 'string', is_nullable = true}, + {IN_REPLY_TO_DOMAIN_FN, 'string', is_nullable = true}, + }} + ) + end) +end + +-- numbers are from https://gist.github.com/blixt/f17b47c62508be59987b +local function gen_prng(seed) + return setmetatable({seed = seed}, { + __index = { + next_int = function(self, min, max) + self.seed = self.seed * 16807 % 2147483647 + return self.seed % (max - min + 1) + min + end, + next_string = function(self, len) + local res = {} + for i = 1, len do + res[i] = string.char(self:next_int(0, 255)) + end + return table.concat(res) + end, + } + }) +end + +function nullable_1_1_conn_testdata.fill_test_data(virtbox) + local results = '' + + local virtbox = virtbox or box.space + + local prng = gen_prng(PRNG_SEED) + + local function new_email(body) + return { + localpart = prng:next_string(16):hex(), + domain = DOMAIN, + body = body, + } + end + + -- the string must contain '\n\n' to being printed in the literal scalar + -- style + results = results .. print_and_return(([[ + + + +---------------------+ + | a-+ h x y | + | |\ \ |\ | + | b c d k l | + | | |\ \ | + | e f g m | + +---------------------+ + ]]):rstrip()) + + local email_trees = { + { + email = new_email('a'), + successors = { + { + email = new_email('b'), + successors = { + {email = new_email('e')}, + } + }, + { + email = new_email('c'), + }, + { + email = new_email('d'), + successors = { + {email = new_email('f')}, + {email = new_email('g')}, + } + } + } + }, + { + email = new_email('h'), + successors = { + { + email = new_email('k'), + }, + { + email = new_email('l'), + successors = { + {email = new_email('m')} + } + } + }, + } + } + + local function union_branch_of(value) + local NULL_T = 0 + local STRING_T = 1 + + if value == nil then + return NULL_T + elseif type(value) == 'string' then + return STRING_T + end + error('value must be nil or a string, got ' .. type(value)) + end + + -- `in_reply_to` is optional parameter with the following format: + -- + -- ``` + -- { + -- localpart = '...', + -- domain = '...', + -- } + -- ``` + local function add_emails(email_nodes, in_reply_to) + local irt_localpart = (in_reply_to or {}).localpart + local irt_domain = (in_reply_to or {}).domain + + for _, email_node in pairs(email_nodes) do + local localpart = email_node.email.localpart + local domain = email_node.email.domain + + virtbox.email:replace({ + localpart, + domain, + union_branch_of(irt_localpart), + irt_localpart, + union_branch_of(irt_domain), + irt_domain, + email_node.email.body, + }) + add_emails(email_node.successors or {}, { + localpart = localpart, + domain = domain, + }) + end + end + + add_emails(email_trees) + + -- add two emails with one null in in_reply_to_{localpart,domain} to test + -- FULL MATCH constraints + local domain = DOMAIN + local localpart = prng:next_string(16):hex() + virtbox.email:replace({ + localpart, + domain, + union_branch_of(box.NULL), + box.NULL, + union_branch_of(domain), + domain, + 'x', + }) + local localpart = prng:next_string(16):hex() + virtbox.email:replace({ + localpart, + domain, + union_branch_of(localpart), + localpart, + union_branch_of(box.NULL), + box.NULL, + 'y', + }) + + return results +end + +function nullable_1_1_conn_testdata.drop_spaces() + box.space._schema:delete('oncetest_space_init_spaces') + box.space.email:drop() +end + +function nullable_1_1_conn_testdata.run_queries(gql_wrapper) + local results = '' + + -- downside traversal (1:N connections) + -- ------------------------------------ + + local query_downside = [[ + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + ]] + + local gql_query_downside = gql_wrapper:compile(query_downside) + + utils.show_trace(function() + local variables_downside_a = {body = 'a'} + local result = gql_query_downside:execute(variables_downside_a) + results = results .. print_and_return(format_result( + 'downside_a', query_downside, variables_downside_a, result)) + end) + + utils.show_trace(function() + local variables_downside_h = {body = 'h'} + local result = gql_query_downside:execute(variables_downside_h) + results = results .. print_and_return(format_result( + 'downside_h', query_downside, variables_downside_h, result)) + end) + + -- upside traversal (1:1 connections) + -- ---------------------------------- + + local query_upside = [[ + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + ]] + + local gql_query_upside = gql_wrapper:compile(query_upside) + + utils.show_trace(function() + local variables_upside = {body = 'f'} + local result = gql_query_upside:execute(variables_upside) + results = results .. print_and_return(format_result( + 'upside', query_upside, variables_upside, result)) + end) + + -- FULL MATCH constraint: connection key parts must be all non-nulls or all + -- nulls; both expected to fail + -- ------------------------------------------------------------------------ + + local variables_upside_x = {body = 'x'} + local ok, err = pcall(function() + local result = gql_query_upside:execute(variables_upside_x) + results = results .. print_and_return(format_result( + 'upside_x', query_upside, variables_upside_x, result)) + end) + + local result = {ok = ok, err = strip_error(err)} + results = results .. print_and_return(format_result( + 'upside_x', query_upside, variables_upside_x, result)) + + local variables_upside_y = {body = 'y'} + local ok, err = pcall(function() + local result = gql_query_upside:execute(variables_upside_y) + results = results .. print_and_return(format_result( + 'upside_y', query_upside, variables_upside_y, result)) + end) + + local result = {ok = ok, err = strip_error(err)} + results = results .. print_and_return(format_result( + 'upside_y', query_upside, variables_upside_y, result)) + + return results +end + +return nullable_1_1_conn_testdata