From ce0cf74856cac7b5a278f55aa73ec5441cd6e87e Mon Sep 17 00:00:00 2001 From: SudoBobo Date: Wed, 14 Mar 2018 13:30:08 +0300 Subject: [PATCH 01/13] add support for union connections --- graphql/accessor_general.lua | 165 ++++++++----- graphql/core/types.lua | 5 +- graphql/tarantool_graphql.lua | 402 +++++++++++++++++++++++-------- graphql/utils.lua | 23 ++ test/local/union.result | 18 ++ test/local/union.test.lua | 60 +++++ test/testdata/union_testdata.lua | 265 ++++++++++++++++++++ 7 files changed, 786 insertions(+), 152 deletions(-) create mode 100644 test/local/union.result create mode 100755 test/local/union.test.lua create mode 100644 test/testdata/union_testdata.lua diff --git a/graphql/accessor_general.lua b/graphql/accessor_general.lua index 32b6e11..f8b996d 100644 --- a/graphql/accessor_general.lua +++ b/graphql/accessor_general.lua @@ -553,6 +553,61 @@ local function build_index_parts_tree(indexes) return roots end +local function set_connection_index(c, c_name, c_type, collection_name, + indexes, connection_indexes) + assert(type(c.index_name) == 'string', + 'index_name must be a string, got ' .. type(c.index_name)) + + -- validate index_name against 'indexes' + local index_meta = indexes[c.destination_collection] + assert(type(index_meta) == 'table', + 'index_meta must be a table, got ' .. type(index_meta)) + + assert(type(collection_name) == 'string', 'collection_name expected to ' .. + 'be string, got ' .. type(collection_name)) + + -- validate connection parts are match or being prefix of index + -- fields + local i = 1 + local index_fields = index_meta[c.index_name].fields + for _, part in ipairs(c.parts) do + assert(type(part.source_field) == 'string', + 'part.source_field must be a string, got ' .. + type(part.source_field)) + assert(type(part.destination_field) == 'string', + 'part.destination_field must be a string, got ' .. + type(part.destination_field)) + assert(part.destination_field == index_fields[i], + ('connection "%s" of collection "%s" ' .. + 'has destination parts that is not prefix of the index ' .. + '"%s" parts (destination collection - "%s")'):format(c_name, collection_name, + c.index_name, c.destination_collection)) + i = i + 1 + end + local parts_cnt = i - 1 + + -- partial index of an unique index is not guaranteed to being + -- unique + assert(c_type == '1:N' or parts_cnt == #index_fields, + ('1:1 connection "%s" of collection "%s" ' .. + 'has less fields than the index "%s" has (destination collection - "%s")' .. + '(cannot prove uniqueness of the partial index)'):format(c_name, + collection_name, c.index_name, c.destination_collection)) + + -- validate connection type against index uniqueness (if provided) + if index_meta.unique ~= nil then + assert(c_type == '1:N' or index_meta.unique == true, + ('1:1 connection ("%s") cannot be implemented ' .. + 'on top of non-unique index ("%s")'):format( + c_name, c.index_name)) + end + + return { + index_name = c.index_name, + connection_type = c_type, + } +end + --- Build `connection_indexes` table (part of `index_cache`) to use in the --- @{get_index_name} function. --- @@ -575,60 +630,28 @@ local function build_connection_indexes(indexes, collections) assert(type(collections) == 'table', 'collections must be a table, got ' .. type(collections)) local connection_indexes = {} - for _, collection in pairs(collections) do + for collection_name, collection in pairs(collections) do for _, c in ipairs(collection.connections) do - if connection_indexes[c.destination_collection] == nil then - connection_indexes[c.destination_collection] = {} - end - local index_name = c.index_name - assert(type(index_name) == 'string', - 'index_name must be a string, got ' .. type(index_name)) + if c.destination_collection ~= nil then + if connection_indexes[c.destination_collection] == nil then + connection_indexes[c.destination_collection] = {} + end - -- validate index_name against 'indexes' - local index_meta = indexes[c.destination_collection] - assert(type(index_meta) == 'table', - 'index_meta must be a table, got ' .. type(index_meta)) - - -- validate connection parts are match or being prefix of index - -- fields - local i = 1 - local index_fields = index_meta[c.index_name].fields - for _, part in ipairs(c.parts) do - assert(type(part.source_field) == 'string', - 'part.source_field must be a string, got ' .. - type(part.source_field)) - assert(type(part.destination_field) == 'string', - 'part.destination_field must be a string, got ' .. - type(part.destination_field)) - assert(part.destination_field == index_fields[i], - ('connection "%s" of collection "%s" ' .. - 'has destination parts that is not prefix of the index ' .. - '"%s" parts'):format(c.name, c.destination_collection, - c.index_name)) - i = i + 1 - end - local parts_cnt = i - 1 - - -- partial index of an unique index is not guaranteed to being - -- unique - assert(c.type == '1:N' or parts_cnt == #index_fields, - ('1:1 connection "%s" of collection "%s" ' .. - 'has less fields than the index "%s" has (cannot prove ' .. - 'uniqueness of the partial index)'):format(c.name, - c.destination_collection, c.index_name)) - - -- validate connection type against index uniqueness (if provided) - if index_meta.unique ~= nil then - assert(c.type == '1:N' or index_meta.unique == true, - ('1:1 connection ("%s") cannot be implemented ' .. - 'on top of non-unique index ("%s")'):format( - c.name, index_name)) + connection_indexes[c.destination_collection][c.name] = + set_connection_index(c, c.name, c.type, collection_name, + indexes, connection_indexes) end - connection_indexes[c.destination_collection][c.name] = { - index_name = index_name, - connection_type = c.type, - } + if c.variants ~= nil then + for _, v in ipairs(c.variants) do + if connection_indexes[v.destination_collection] == nil then + connection_indexes[v.destination_collection] = {} + end + connection_indexes[v.destination_collection][c.name] = + set_connection_index(v, c.name, c.type, collection_name, + indexes, connection_indexes) + end + end end end return connection_indexes @@ -679,23 +702,51 @@ local function validate_collections(collections, schemas) type(connections)) for _, connection in ipairs(connections) do assert(type(connection) == 'table', - 'connection must be a table, got ' .. type(connection)) + 'connection must be a table, got ' .. type(connection)) assert(type(connection.name) == 'string', - 'connection.name must be a string, got ' .. - type(connection.name)) - assert(type(connection.destination_collection) == 'string', + 'connection.name must be a string, got ' .. + type(connection.name)) + assert(type(connection.type) == 'string', 'connection.type must' .. + 'be a string, got ' .. type(connection.type)) + assert(connection.type == '1:1' or connection.type == '1:N', + 'connection.type must be \'1:1\' or \'1:N\', got ' .. + connection.type) + if connection.destination_collection then + assert(type(connection.destination_collection) == 'string', 'connection.destination_collection must be a string, got ' .. type(connection.destination_collection)) - assert(type(connection.parts) == 'table', - 'connection.parts must be a string, got ' .. + assert(type(connection.parts) == 'table', + 'connection.parts must be a table, got ' .. type(connection.parts)) - assert(type(connection.index_name) == 'string', + assert(type(connection.index_name) == 'string', 'connection.index_name must be a string, got ' .. type(connection.index_name)) + return + end + if connection.variants then + for _, v in pairs(connection.variants) do + assert(type(v.determinant) == 'table', 'variant\'s ' .. + 'determinant must be a table, got ' .. + type(v.determinant)) + assert(type(v.destination_collection) == 'string', + 'variant.destination_collection must be a string, ' .. + 'got ' .. type(v.destination_collection)) + assert(type(v.parts) == 'table', + 'variant.parts must be a table, got ' .. type(v.parts)) + assert(type(v.index_name) == 'string', + 'variant.index_name must be a string, got ' .. + type(v.index_name)) + end + return + else + assert(false, ('collection doesn\'t have neither destination' .. + 'collection nor variants field')) + end end end end + --- Perform unflatten, skipping, filtering, limiting of objects. This is the --- core of the `select_internal` function. --- diff --git a/graphql/core/types.lua b/graphql/core/types.lua index e24a30d..7644afb 100644 --- a/graphql/core/types.lua +++ b/graphql/core/types.lua @@ -155,11 +155,14 @@ end function types.union(config) assert(type(config.name) == 'string', 'type name must be provided as a string') assert(type(config.types) == 'table', 'types table must be provided') + assert(type(config.resolveType) == 'function', 'resolveType function must ' .. + 'be provided') local instance = { __type = 'Union', name = config.name, - types = config.types + types = config.types, + resolveType = config.resolveType } instance.nonNull = types.nonNull(instance) diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index d1e8eb5..40cb372 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -8,6 +8,7 @@ --- passed to an accessor function in the filter argument. local json = require('json') +local yaml = require('yaml') local parse = require('graphql.core.parse') local schema = require('graphql.core.schema') @@ -250,6 +251,283 @@ local function convert_record_fields(state, fields) return res end +--- The function converts passed simple connection to a field of GraphQL type. +--- +--- @tparam table state for for collection types +--- @tparam table c simple connection to create field on +--- @tparam table collection_name name of the collection which has given +--- connection +local convert_simple_connection = function(state, c, collection_name) + assert(type(c.destination_collection) == 'string', + 'connection.destination_collection must be a string, got ' .. + type(c.destination_collection)) + assert(type(c.parts) == 'table', + 'connection.parts must be a string, got ' .. type(c.parts)) + + -- gql type of connection field + local destination_type = state.types[c.destination_collection] + assert(destination_type ~= nil, + ('destination_type (named %s) must not be nil'):format( + c.destination_collection)) + + local c_args + if c.type == '1:1' then + c_args = state.object_arguments[c.destination_collection] + elseif c.type == '1:N' then + destination_type = types.nonNull(types.list(destination_type)) + c_args = state.all_arguments[c.destination_collection] + else + error('unknown connection type: ' .. tostring(c.type)) + end + + local c_list_args = state.list_arguments[c.destination_collection] + + local field = { + name = c.name, + kind = destination_type, + arguments = c_args, + resolve = function(parent, args_instance, info) + local destination_args_names = {} + local destination_args_values = {} + + for _, part in ipairs(c.parts) do + assert(type(part.source_field) == 'string', + 'part.source_field must be a string, got ' .. + type(part.destination_field)) + assert(type(part.destination_field) == 'string', + 'part.destination_field must be a string, got ' .. + type(part.destination_field)) + + destination_args_names[#destination_args_names + 1] = + part.destination_field + destination_args_values[#destination_args_values + 1] = + parent[part.source_field] + end + + local from = { + collection_name = collection_name, + connection_name = c.name, + destination_args_names = destination_args_names, + destination_args_values = destination_args_values, + } + local extra = { + qcontext = info.qcontext + } + local object_args_instance = {} -- passed to 'filter' + local list_args_instance = {} -- passed to 'args' + for k, v in pairs(args_instance) do + if c_list_args[k] ~= nil then + list_args_instance[k] = v + elseif c_args[k] ~= nil then + object_args_instance[k] = v + else + error(('cannot found "%s" field ("%s" value) ' .. + 'within allowed fields'):format(tostring(k), + tostring(v))) + end + end + local objs = state.accessor:select(parent, + c.destination_collection, from, + object_args_instance, list_args_instance, extra) + assert(type(objs) == 'table', + 'objs list received from an accessor ' .. + 'must be a table, got ' .. type(objs)) + if c.type == '1:1' then + assert(#objs == 1, + 'expect one matching object, got ' .. + tostring(#objs)) + return objs[1] + else -- c.type == '1:N' + return objs + end + end, + } + + return field +end + +--- The function converts passed union connection to a field of GraphQL type. +--- It builds connections between union collection and destination collections +--- (destination collections are 'types' of a 'Union' in GraphQL). +--- +--- @tparam table state for collection types +--- @tparam table c union connection to create field on +--- @tparam table collection_name name of the collection which has given +--- connection +local convert_union_connection = function(state, c, collection_name) + local union_types = {} + local collection_to_arguments = {} + local collection_to_list_arguments = {} + + local determinant_keys = utils.get_keys(c.variants[1].determinant) + local determinant_to_variant = {} + + for _, v in ipairs(c.variants) do + assert(v.determinant, 'each variant should have a determinant') + assert(type(v.determinant) == 'table', 'variant\'s determinant must ' .. + 'end be a table, got ' .. type(v.determinant)) + assert(type(v.destination_collection) == 'string', + 'variant.destination_collection must be a string, got ' .. + type(v.destination_collection)) + assert(type(v.parts) == 'table', + 'variant.parts must be a string, got ' .. type(v.parts)) + local destination_type = state.types[v.destination_collection] + assert(destination_type ~= nil, + ('destination_type (named %s) must not be nil'):format( + v.destination_collection)) + + determinant_to_variant[v.determinant] = v + + local v_args + if c.type == '1:1' then + v_args = state.object_arguments[v.destination_collection] + elseif c.type == '1:N' then + destination_type = types.nonNull(types.list(destination_type)) + v_args = state.all_arguments[v.destination_collection] + end + + local v_list_args = state.list_arguments[v.destination_collection] + + union_types[#union_types + 1] = destination_type + + collection_to_arguments[v.destination_collection] = v_args + collection_to_list_arguments[v.destination_collection] = v_list_args + end + + local resolveType = function (result) + for _, v in pairs(c.variants) do + local dest_collection = state.types[v.destination_collection] + if utils.do_have_keys(result, utils.get_keys(dest_collection.fields)) then + return dest_collection + end + end + end + + local resolve_variant = function (parent) + assert(utils.do_have_keys(parent, determinant_keys), + ('Parent object of union object doesn\'t have determinant ' .. + 'fields which are nessesary to determine which resolving ' .. + 'variant should be used. Union parent object:\n"%s"\n' .. + 'Determinant keys:\n"%s"'): + format(yaml.encode(parent), yaml.encode(determinant_keys))) + + local resulting_variant + for determinant, variant in pairs(determinant_to_variant) do + local is_match = true + for determinant_key, determinant_value in pairs(determinant) do + if parent[determinant_key] ~= determinant_value then + is_match = false + break + end + end + + if is_match then + resulting_variant = variant + break + end + end + + assert(resulting_variant, ('Variant resolving failed.'.. + 'Parent object: "%s"\n'):format(yaml.encode(parent))) + return resulting_variant + end + + local field = { + name = c.name, + kind = types.union({name = c.name, types = union_types, + resolveType = resolveType}), + arguments = nil, + resolve = function(parent, args_instance, info) + local v = resolve_variant(parent) + local destination_collection = state.types[v.destination_collection] + local destination_args_names = {} + local destination_args_values = {} + + for _, part in ipairs(v.parts) do + assert(type(part.source_field) == 'string', + 'part.source_field must be a string, got ' .. + type(part.destination_field)) + assert(type(part.destination_field) == 'string', + 'part.destination_field must be a string, got ' .. + type(part.destination_field)) + + destination_args_names[#destination_args_names + 1] = + part.destination_field + destination_args_values[#destination_args_values + 1] = + parent[part.source_field] + end + + local from = { + collection_name = collection_name, + connection_name = c.name, + destination_args_names = destination_args_names, + destination_args_values = destination_args_values, + } + local extra = { + qcontext = info.qcontext + } + local object_args_instance = {} -- passed to 'filter' + local list_args_instance = {} -- passed to 'args' + + local c_args = collection_to_arguments[destination_collection] + local c_list_args = collection_to_list_arguments[destination_collection] + + for k, v in pairs(args_instance) do + if c_list_args[k] ~= nil then + list_args_instance[k] = v + elseif c_args[k] ~= nil then + object_args_instance[k] = v + else + error(('cannot found "%s" field ("%s" value) ' .. + 'within allowed fields'):format(tostring(k), + tostring(v))) + end + end + local objs = state.accessor:select(parent, + v.destination_collection, from, + object_args_instance, list_args_instance, extra) + assert(type(objs) == 'table', + 'objs list received from an accessor ' .. + 'must be a table, got ' .. type(objs)) + if c.type == '1:1' then + assert(#objs == 1, + 'expect one matching object, got ' .. + tostring(#objs)) + return objs[1] + else -- c.type == '1:N' + return objs + end + end + } + return field +end + +--- The function converts passed connection to a field of GraphQL type +--- +--- @tparam table state for read state.accessor and previously filled +--- state.types (state.types are gql types) +--- @tparam table connection connection to create field on +--- @tparam table collection_name name of the collection which have given +--- connection +local convert_connection_to_field = function(state, connection, collection_name) + assert(type(connection.type) == 'string', + 'connection.type must be a string, got ' .. type(connection.type)) + assert(connection.type == '1:1' or connection.type == '1:N', + 'connection.type must be 1:1 or 1:N, got ' .. connection.type) + assert(type(connection.name) == 'string', + 'connection.name must be a string, got ' .. type(connection.name)) + assert(connection.destination_collection or connection.variants, + 'connection must either destination_collection or variatns field') + + if connection.destination_collection then + return convert_simple_connection(state, connection, collection_name) + end + + if connection.variants then + return convert_union_connection(state, connection, collection_name) + end +end + --- The function converts passed avro-schema to a GraphQL type. --- --- @tparam table state for read state.accessor and previously filled @@ -303,99 +581,8 @@ gql_type = function(state, avro_schema, collection, collection_name) local fields = convert_record_fields(state, avro_schema.fields) - -- if collection param is passed then go over all connections for _, c in ipairs((collection or {}).connections or {}) do - assert(type(c.type) == 'string', - 'connection.type must be a string, got ' .. type(c.type)) - assert(c.type == '1:1' or c.type == '1:N', - 'connection.type must be 1:1 or 1:N, got ' .. c.type) - assert(type(c.name) == 'string', - 'connection.name must be a string, got ' .. type(c.name)) - assert(type(c.destination_collection) == 'string', - 'connection.destination_collection must be a string, got ' .. - type(c.destination_collection)) - assert(type(c.parts) == 'table', - 'connection.parts must be a string, got ' .. type(c.parts)) - - -- gql type of connection field - local destination_type = - state.types[c.destination_collection] - assert(destination_type ~= nil, - ('destination_type (named %s) must not be nil'):format( - c.destination_collection)) - - local c_args - if c.type == '1:1' then - c_args = state.object_arguments[c.destination_collection] - elseif c.type == '1:N' then - destination_type = types.nonNull(types.list(destination_type)) - c_args = state.all_arguments[c.destination_collection] - else - error('unknown connection type: ' .. tostring(c.type)) - end - - local c_list_args = state.list_arguments[c.destination_collection] - - fields[c.name] = { - name = c.name, - kind = destination_type, - arguments = c_args, - resolve = function(parent, args_instance, info) - local destination_args_names = {} - local destination_args_values = {} - - for _, part in ipairs(c.parts) do - assert(type(part.source_field) == 'string', - 'part.source_field must be a string, got ' .. - type(part.destination_field)) - assert(type(part.destination_field) == 'string', - 'part.destination_field must be a string, got ' .. - type(part.destination_field)) - - destination_args_names[#destination_args_names + 1] = - part.destination_field - destination_args_values[#destination_args_values + 1] = - parent[part.source_field] - end - - local from = { - collection_name = collection_name, - connection_name = c.name, - destination_args_names = destination_args_names, - destination_args_values = destination_args_values, - } - local extra = { - qcontext = info.qcontext - } - local object_args_instance = {} -- passed to 'filter' - local list_args_instance = {} -- passed to 'args' - for k, v in pairs(args_instance) do - if c_list_args[k] ~= nil then - list_args_instance[k] = v - elseif c_args[k] ~= nil then - object_args_instance[k] = v - else - error(('cannot found "%s" field ("%s" value) ' .. - 'within allowed fields'):format(tostring(k), - tostring(v))) - end - end - local objs = accessor:select(parent, - c.destination_collection, from, - object_args_instance, list_args_instance, extra) - assert(type(objs) == 'table', - 'objs list received from an accessor ' .. - 'must be a table, got ' .. type(objs)) - if c.type == '1:1' then - assert(#objs == 1, - 'expect one matching object, got ' .. - tostring(#objs)) - return objs[1] - else -- c.type == '1:N' - return objs - end - end, - } + fields[c.name] = convert_connection_to_field(state, c, collection_name) end -- create gql type @@ -628,7 +815,8 @@ end --- schema_name = 'schema_name_foo', --- connections = { // the optional field --- { ---- name = 'connection_name_bar', +--- type = '1:1' or '1:N', +--- name = 'simple_connection_name', --- destination_collection = 'collection_baz', --- parts = { --- { @@ -641,7 +829,17 @@ end --- -- ignored in the graphql --- -- part --- }, ---- ... +--- { +--- name = 'union_connection_name', +--- type = '1:1' or '1:N', +--- variants = { +--- { +--- see variant format below +--- }, +--- ... +--- } +--- }, +--- ... --- }, --- }, --- ... @@ -675,6 +873,22 @@ end --- } --- }), --- }) +--- +--- variant format +--- { +--- Source collection must have all fields that are keys in determinant +--- table. Based on the values of these fields right destination collection +--- is determined. +--- determinant = {field_or_source: 'destination_1_value', ...}, +--- destination_collection = 'collection_name', +--- parts = { +--- { +--- source_field = 'field_name_source', +--- destination_field = 'field_name_destination' +--- } +--- }, +--- index_name = 'index_name' +--- } function tarantool_graphql.new(cfg) local state = parse_cfg(cfg) return setmetatable(state, { diff --git a/graphql/utils.lua b/graphql/utils.lua index 1a472fa..18be4b4 100644 --- a/graphql/utils.lua +++ b/graphql/utils.lua @@ -132,4 +132,27 @@ function utils.gen_booking_table(data) }) end +--- @return `table` with all keys of the given table +function utils.get_keys(table) + local keys = {} + for k, _ in pairs(table) do + keys[#keys + 1] = k + end + return keys +end + +--- Check if passed table has passed keys with non-nil values. +--- @tparam table table to check +--- @tparam table keys array of keys to check +--- @return[1] `true` if passed table has passed keys +--- @return[2] `false` otherwise +function utils.do_have_keys(table, keys) + for _, k in pairs(keys) do + if table[k] == nil then + return false + end + end + return true +end + return utils diff --git a/test/local/union.result b/test/local/union.result new file mode 100644 index 0000000..e354cd6 --- /dev/null +++ b/test/local/union.result @@ -0,0 +1,18 @@ +RESULT +--- +hero_collection: +- hero_type: human + hero_connection: + name: Luke + hero_id: hero_id_1 +... + +RESULT +--- +hero_collection: +- hero_type: starship + hero_connection: + model: Falcon-42 + hero_id: hero_id_2 +... + diff --git a/test/local/union.test.lua b/test/local/union.test.lua new file mode 100755 index 0000000..4652ae0 --- /dev/null +++ b/test/local/union.test.lua @@ -0,0 +1,60 @@ +#!/usr/bin/env tarantool + +box.cfg { background = false } +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +local graphql = require('graphql') +local testdata = require('test.testdata.union_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + + +-- init box and data schema +testdata.init_spaces() + +-- upload test data +testdata.fill_test_data() + +-- acquire metadata +local metadata = testdata.get_test_metadata() +local schemas = metadata.schemas +local collections = metadata.collections +local service_fields = metadata.service_fields +local indexes = metadata.indexes +local utils = require('graphql.utils') + +-- build accessor and graphql schemas +-- ---------------------------------- +local accessor = utils.show_trace(function() + return graphql.accessor_space.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, + }) +end) + +local gql_wrapper = utils.show_trace(function() + return graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, + }) +end) + +-- run queries +-- ----------- + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +testdata.drop_spaces() + +os.exit() \ No newline at end of file diff --git a/test/testdata/union_testdata.lua b/test/testdata/union_testdata.lua new file mode 100644 index 0000000..9d7d7c5 --- /dev/null +++ b/test/testdata/union_testdata.lua @@ -0,0 +1,265 @@ +local json = require('json') +local yaml = require('yaml') +local utils = require('graphql.utils') + +local union_testdata = {} + +local function print_and_return(...) + print(...) + return table.concat({ ... }, ' ') .. '\n' +end + +function union_testdata.get_test_metadata() + local schemas = json.decode([[{ + "hero": { + "name": "hero", + "type": "record", + "fields": [ + { "name": "hero_id", "type": "string" }, + { "name": "hero_type", "type" : "string" } + ] + }, + "human": { + "name": "human", + "type": "record", + "fields": [ + { "name": "hero_id", "type": "string" }, + { "name": "name", "type": "string" }, + { "name": "episode", "type": "string"} + ] + }, + "starship": { + "name": "starship", + "type": "record", + "fields": [ + { "name": "hero_id", "type": "string" }, + { "name": "model", "type": "string" }, + { "name": "episode", "type": "string"} + ] + }, + "hero_info": { + "name": "hero_info", + "type": "record", + "fields": [ + { "name": "hero_id", "type": "string" } + ] + } + }]]) + + local collections = json.decode([[{ + "hero_collection": { + "schema_name": "hero", + "connections": [ + { + "name": "hero_connection", + "type": "1:1", + "variants": [ + { + "determinant": {"hero_type": "human"}, + "destination_collection": "human_collection", + "parts": [ + { + "source_field": "hero_id", + "destination_field": "hero_id" + } + ], + "index_name": "human_id_index" + }, + { + "determinant": {"hero_type": "starship"}, + "destination_collection": "starship_collection", + "parts": [ + { + "source_field": "hero_id", + "destination_field": "hero_id" + } + ], + "index_name": "starship_id_index" + } + ] + }, + { + "type": "1:1", + "name": "hero_info_connection", + "destination_collection": "hero_info_collection", + "parts": [ + { "source_field": "hero_id", "destination_field": "hero_id" } + ], + "index_name": "hero_info_id_index" + } + ] + }, + "human_collection": { + "schema_name": "human", + "connections": [] + }, + "starship_collection": { + "schema_name": "starship", + "connections": [] + }, + "hero_info_collection": { + "schema_name": "hero_info", + "connections": [] + } + }]]) + + local service_fields = { + hero = { + { name = 'expires_on', type = 'long', default = 0 }, + }, + human = { + { name = 'expires_on', type = 'long', default = 0 }, + }, + starship = { + { name = 'expires_on', type = 'long', default = 0 }, + }, + hero_info = { + { name = 'expires_on', type = 'long', default = 0 }, + } + } + + local indexes = { + hero_collection = { + hero_id_index = { + service_fields = {}, + fields = { 'hero_id' }, + index_type = 'tree', + unique = true, + primary = true, + }, + }, + + human_collection = { + human_id_index = { + service_fields = {}, + fields = { 'hero_id' }, + index_type = 'tree', + unique = true, + primary = true, + }, + }, + + starship_collection = { + starship_id_index = { + service_fields = {}, + fields = { 'hero_id' }, + index_type = 'tree', + unique = true, + primary = true, + }, + }, + + hero_info_collection = { + hero_info_id_index = { + service_fields = {}, + fields = { 'hero_id' }, + index_type = 'tree', + unique = true, + primary = true, + }, + } + } + + return { + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, + } +end + +function union_testdata.init_spaces() + -- user_collection fields + local U_USER_ID_FN = 2 + + box.once('test_space_init_spaces', function() + box.schema.create_space('hero_collection') + box.space.hero_collection:create_index('hero_id_index', + { type = 'tree', unique = true, parts = { U_USER_ID_FN, 'string' }} + ) + + box.schema.create_space('human_collection') + box.space.human_collection:create_index('human_id_index', + { type = 'tree', unique = true, parts = { U_USER_ID_FN, 'string' }} + ) + + box.schema.create_space('starship_collection') + box.space.starship_collection:create_index('starship_id_index', + { type = 'tree', unique = true, parts = { U_USER_ID_FN, 'string' }} + ) + + box.schema.create_space('hero_info_collection') + box.space.hero_info_collection:create_index('hero_info_id_index', + { type = 'tree', unique = true, parts = { U_USER_ID_FN, 'string' }} + ) + end) +end + +function union_testdata.fill_test_data(shard) + local shard = shard or box.space + + shard.hero_collection:replace( + { 1827767717, 'hero_id_1', 'human'}) + shard.hero_collection:replace( + { 1827767717, 'hero_id_2', 'starship'}) + + shard.human_collection:replace( + { 1827767717, 'hero_id_1', 'Luke', "EMPR"}) + + shard.starship_collection:replace( + { 1827767717, 'hero_id_2', 'Falcon-42', "NEW"}) + + shard.hero_info_collection:replace( + { 1827767717, 'hero_id_1'}) + + shard.hero_info_collection:replace( + { 1827767717, 'hero_id_2'}) +end + +function union_testdata.drop_spaces() + box.space._schema:delete('oncetest_space_init_spaces') + box.space.human_collection:drop() + box.space.starship_collection:drop() + box.space.hero_collection:drop() +end + +function union_testdata.run_queries(gql_wrapper) + local results = '' + + local query = [[ + query obtainHeroes($hero_id: String) { + hero_collection(hero_id: $hero_id) { + hero_id + hero_type + hero_connection { + ... on human_collection { + name + } + ... on starship_collection { + model + } + } + } + } + ]] + + utils.show_trace(function() + local variables_1 = {hero_id = 'hero_id_1'} + local gql_query_1 = gql_wrapper:compile(query) + local result = gql_query_1:execute(variables_1) + results = results .. print_and_return( + ('RESULT\n%s'):format(yaml.encode(result))) + end) + + utils.show_trace(function() + local variables_2 = {hero_id = 'hero_id_2'} + local gql_query_2 = gql_wrapper:compile(query) + local result = gql_query_2:execute(variables_2) + results = results .. print_and_return( + ('RESULT\n%s'):format(yaml.encode(result))) + end) + + return results +end + +return union_testdata From d01729cffc9b7441434dd370a1a3b9489991caa1 Mon Sep 17 00:00:00 2001 From: SudoBobo Date: Wed, 14 Mar 2018 13:31:21 +0300 Subject: [PATCH 02/13] fix test to match new more specific error message --- test/local/init_fail.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/local/init_fail.result b/test/local/init_fail.result index ab9b013..12cf161 100644 --- a/test/local/init_fail.result +++ b/test/local/init_fail.result @@ -1,2 +1,2 @@ -INIT: ok: false; err: 1:1 connection "user_connection" of collection "user_collection" has less fields than the index "user_str_num_index" has (cannot prove uniqueness of the partial index) +INIT: ok: false; err: 1:1 connection "user_connection" of collection "order_collection" has less fields than the index "user_str_num_index" has (destination collection - "user_collection")(cannot prove uniqueness of the partial index) INIT: ok: true; type(res): table From b9849b1c06e3e4bd240f0e5ab970e928eb8323d5 Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Tue, 6 Mar 2018 04:56:17 +0300 Subject: [PATCH 03/13] Nullable 1:1 connections Fixes #44. --- graphql/tarantool_graphql.lua | 476 ++++++------------ test/local/space_nullable_1_1_conn.result | 180 +++++++ test/local/space_nullable_1_1_conn.test.lua | 56 +++ .../shard_nullable_1_1_conn.result | 318 ++++++++++++ .../shard_nullable_1_1_conn.test.lua | 76 +++ .../shard_redundancy/nullable_1_1_conn.result | 353 +++++++++++++ .../nullable_1_1_conn.test.lua | 93 ++++ test/testdata/nullable_1_1_conn_testdata.lua | 406 +++++++++++++++ 8 files changed, 1642 insertions(+), 316 deletions(-) create mode 100644 test/local/space_nullable_1_1_conn.result create mode 100755 test/local/space_nullable_1_1_conn.test.lua create mode 100644 test/shard_no_redundancy/shard_nullable_1_1_conn.result create mode 100644 test/shard_no_redundancy/shard_nullable_1_1_conn.test.lua create mode 100644 test/shard_redundancy/nullable_1_1_conn.result create mode 100644 test/shard_redundancy/nullable_1_1_conn.test.lua create mode 100644 test/testdata/nullable_1_1_conn_testdata.lua diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index 40cb372..a063c2b 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -8,7 +8,6 @@ --- passed to an accessor function in the filter argument. local json = require('json') -local yaml = require('yaml') local parse = require('graphql.core.parse') local schema = require('graphql.core.schema') @@ -231,7 +230,7 @@ end --- Convert each field of an avro-schema to a graphql type. --- --- @tparam table state for read state.accessor and previously filled ---- state.types +--- state.nullable_collection_types --- @tparam table fields fields part from an avro-schema --- --- @treturn table `res` -- map with type names as keys and graphql types as @@ -251,287 +250,10 @@ local function convert_record_fields(state, fields) return res end ---- The function converts passed simple connection to a field of GraphQL type. ---- ---- @tparam table state for for collection types ---- @tparam table c simple connection to create field on ---- @tparam table collection_name name of the collection which has given ---- connection -local convert_simple_connection = function(state, c, collection_name) - assert(type(c.destination_collection) == 'string', - 'connection.destination_collection must be a string, got ' .. - type(c.destination_collection)) - assert(type(c.parts) == 'table', - 'connection.parts must be a string, got ' .. type(c.parts)) - - -- gql type of connection field - local destination_type = state.types[c.destination_collection] - assert(destination_type ~= nil, - ('destination_type (named %s) must not be nil'):format( - c.destination_collection)) - - local c_args - if c.type == '1:1' then - c_args = state.object_arguments[c.destination_collection] - elseif c.type == '1:N' then - destination_type = types.nonNull(types.list(destination_type)) - c_args = state.all_arguments[c.destination_collection] - else - error('unknown connection type: ' .. tostring(c.type)) - end - - local c_list_args = state.list_arguments[c.destination_collection] - - local field = { - name = c.name, - kind = destination_type, - arguments = c_args, - resolve = function(parent, args_instance, info) - local destination_args_names = {} - local destination_args_values = {} - - for _, part in ipairs(c.parts) do - assert(type(part.source_field) == 'string', - 'part.source_field must be a string, got ' .. - type(part.destination_field)) - assert(type(part.destination_field) == 'string', - 'part.destination_field must be a string, got ' .. - type(part.destination_field)) - - destination_args_names[#destination_args_names + 1] = - part.destination_field - destination_args_values[#destination_args_values + 1] = - parent[part.source_field] - end - - local from = { - collection_name = collection_name, - connection_name = c.name, - destination_args_names = destination_args_names, - destination_args_values = destination_args_values, - } - local extra = { - qcontext = info.qcontext - } - local object_args_instance = {} -- passed to 'filter' - local list_args_instance = {} -- passed to 'args' - for k, v in pairs(args_instance) do - if c_list_args[k] ~= nil then - list_args_instance[k] = v - elseif c_args[k] ~= nil then - object_args_instance[k] = v - else - error(('cannot found "%s" field ("%s" value) ' .. - 'within allowed fields'):format(tostring(k), - tostring(v))) - end - end - local objs = state.accessor:select(parent, - c.destination_collection, from, - object_args_instance, list_args_instance, extra) - assert(type(objs) == 'table', - 'objs list received from an accessor ' .. - 'must be a table, got ' .. type(objs)) - if c.type == '1:1' then - assert(#objs == 1, - 'expect one matching object, got ' .. - tostring(#objs)) - return objs[1] - else -- c.type == '1:N' - return objs - end - end, - } - - return field -end - ---- The function converts passed union connection to a field of GraphQL type. ---- It builds connections between union collection and destination collections ---- (destination collections are 'types' of a 'Union' in GraphQL). ---- ---- @tparam table state for collection types ---- @tparam table c union connection to create field on ---- @tparam table collection_name name of the collection which has given ---- connection -local convert_union_connection = function(state, c, collection_name) - local union_types = {} - local collection_to_arguments = {} - local collection_to_list_arguments = {} - - local determinant_keys = utils.get_keys(c.variants[1].determinant) - local determinant_to_variant = {} - - for _, v in ipairs(c.variants) do - assert(v.determinant, 'each variant should have a determinant') - assert(type(v.determinant) == 'table', 'variant\'s determinant must ' .. - 'end be a table, got ' .. type(v.determinant)) - assert(type(v.destination_collection) == 'string', - 'variant.destination_collection must be a string, got ' .. - type(v.destination_collection)) - assert(type(v.parts) == 'table', - 'variant.parts must be a string, got ' .. type(v.parts)) - local destination_type = state.types[v.destination_collection] - assert(destination_type ~= nil, - ('destination_type (named %s) must not be nil'):format( - v.destination_collection)) - - determinant_to_variant[v.determinant] = v - - local v_args - if c.type == '1:1' then - v_args = state.object_arguments[v.destination_collection] - elseif c.type == '1:N' then - destination_type = types.nonNull(types.list(destination_type)) - v_args = state.all_arguments[v.destination_collection] - end - - local v_list_args = state.list_arguments[v.destination_collection] - - union_types[#union_types + 1] = destination_type - - collection_to_arguments[v.destination_collection] = v_args - collection_to_list_arguments[v.destination_collection] = v_list_args - end - - local resolveType = function (result) - for _, v in pairs(c.variants) do - local dest_collection = state.types[v.destination_collection] - if utils.do_have_keys(result, utils.get_keys(dest_collection.fields)) then - return dest_collection - end - end - end - - local resolve_variant = function (parent) - assert(utils.do_have_keys(parent, determinant_keys), - ('Parent object of union object doesn\'t have determinant ' .. - 'fields which are nessesary to determine which resolving ' .. - 'variant should be used. Union parent object:\n"%s"\n' .. - 'Determinant keys:\n"%s"'): - format(yaml.encode(parent), yaml.encode(determinant_keys))) - - local resulting_variant - for determinant, variant in pairs(determinant_to_variant) do - local is_match = true - for determinant_key, determinant_value in pairs(determinant) do - if parent[determinant_key] ~= determinant_value then - is_match = false - break - end - end - - if is_match then - resulting_variant = variant - break - end - end - - assert(resulting_variant, ('Variant resolving failed.'.. - 'Parent object: "%s"\n'):format(yaml.encode(parent))) - return resulting_variant - end - - local field = { - name = c.name, - kind = types.union({name = c.name, types = union_types, - resolveType = resolveType}), - arguments = nil, - resolve = function(parent, args_instance, info) - local v = resolve_variant(parent) - local destination_collection = state.types[v.destination_collection] - local destination_args_names = {} - local destination_args_values = {} - - for _, part in ipairs(v.parts) do - assert(type(part.source_field) == 'string', - 'part.source_field must be a string, got ' .. - type(part.destination_field)) - assert(type(part.destination_field) == 'string', - 'part.destination_field must be a string, got ' .. - type(part.destination_field)) - - destination_args_names[#destination_args_names + 1] = - part.destination_field - destination_args_values[#destination_args_values + 1] = - parent[part.source_field] - end - - local from = { - collection_name = collection_name, - connection_name = c.name, - destination_args_names = destination_args_names, - destination_args_values = destination_args_values, - } - local extra = { - qcontext = info.qcontext - } - local object_args_instance = {} -- passed to 'filter' - local list_args_instance = {} -- passed to 'args' - - local c_args = collection_to_arguments[destination_collection] - local c_list_args = collection_to_list_arguments[destination_collection] - - for k, v in pairs(args_instance) do - if c_list_args[k] ~= nil then - list_args_instance[k] = v - elseif c_args[k] ~= nil then - object_args_instance[k] = v - else - error(('cannot found "%s" field ("%s" value) ' .. - 'within allowed fields'):format(tostring(k), - tostring(v))) - end - end - local objs = state.accessor:select(parent, - v.destination_collection, from, - object_args_instance, list_args_instance, extra) - assert(type(objs) == 'table', - 'objs list received from an accessor ' .. - 'must be a table, got ' .. type(objs)) - if c.type == '1:1' then - assert(#objs == 1, - 'expect one matching object, got ' .. - tostring(#objs)) - return objs[1] - else -- c.type == '1:N' - return objs - end - end - } - return field -end - ---- The function converts passed connection to a field of GraphQL type ---- ---- @tparam table state for read state.accessor and previously filled ---- state.types (state.types are gql types) ---- @tparam table connection connection to create field on ---- @tparam table collection_name name of the collection which have given ---- connection -local convert_connection_to_field = function(state, connection, collection_name) - assert(type(connection.type) == 'string', - 'connection.type must be a string, got ' .. type(connection.type)) - assert(connection.type == '1:1' or connection.type == '1:N', - 'connection.type must be 1:1 or 1:N, got ' .. connection.type) - assert(type(connection.name) == 'string', - 'connection.name must be a string, got ' .. type(connection.name)) - assert(connection.destination_collection or connection.variants, - 'connection must either destination_collection or variatns field') - - if connection.destination_collection then - return convert_simple_connection(state, connection, collection_name) - end - - if connection.variants then - return convert_union_connection(state, connection, collection_name) - end -end - --- The function converts passed avro-schema to a GraphQL type. --- --- @tparam table state for read state.accessor and previously filled ---- state.types (state.types are gql types) +--- state.nullable_collection_types (those are gql types) --- @tparam table avro_schema input avro-schema --- @tparam[opt] table collection table with schema_name, connections fields --- described a collection (e.g. tarantool's spaces) @@ -581,8 +303,145 @@ gql_type = function(state, avro_schema, collection, collection_name) local fields = convert_record_fields(state, avro_schema.fields) + -- if collection param is passed then go over all connections for _, c in ipairs((collection or {}).connections or {}) do - fields[c.name] = convert_connection_to_field(state, c, collection_name) + assert(type(c.type) == 'string', + 'connection.type must be a string, got ' .. type(c.type)) + assert(c.type == '1:1' or c.type == '1:1*' or c.type == '1:N', + 'connection.type must be 1:1, 1:1* or 1:N, got ' .. c.type) + assert(type(c.name) == 'string', + 'connection.name must be a string, got ' .. type(c.name)) + assert(type(c.destination_collection) == 'string', + 'connection.destination_collection must be a string, got ' .. + type(c.destination_collection)) + assert(type(c.parts) == 'table', + 'connection.parts must be a string, got ' .. type(c.parts)) + + -- gql type of connection field + local destination_type = + state.nullable_collection_types[c.destination_collection] + assert(destination_type ~= nil, + ('destination_type (named %s) must not be nil'):format( + c.destination_collection)) + + local c_args + if c.type == '1:1' then + destination_type = types.nonNull(destination_type) + c_args = state.object_arguments[c.destination_collection] + elseif c.type == '1:1*' then + c_args = state.object_arguments[c.destination_collection] + elseif c.type == '1:N' then + destination_type = types.nonNull(types.list(types.nonNull( + destination_type))) + c_args = state.all_arguments[c.destination_collection] + else + error('unknown connection type: ' .. tostring(c.type)) + end + + local c_list_args = state.list_arguments[c.destination_collection] + + fields[c.name] = { + name = c.name, + kind = destination_type, + arguments = c_args, + resolve = function(parent, args_instance, info) + local destination_args_names = {} + local destination_args_values = {} + local are_all_parts_non_null = true + local are_all_parts_null = true + + for _, part in ipairs(c.parts) do + assert(type(part.source_field) == 'string', + 'part.source_field must be a string, got ' .. + type(part.destination_field)) + assert(type(part.destination_field) == 'string', + 'part.destination_field must be a string, got ' .. + type(part.destination_field)) + + destination_args_names[#destination_args_names + 1] = + part.destination_field + + local value = parent[part.source_field] + destination_args_values[#destination_args_values + 1] = + value + + if value ~= nil then -- nil or box.NULL + are_all_parts_null = false + else + are_all_parts_non_null = false + end + end + + -- Check FULL match constraint before request of + -- destination object(s). Note that connection key parts + -- can be prefix of index key parts. Zero parts count + -- considered as ok by this check. + local ok = are_all_parts_null or are_all_parts_non_null + if not ok then -- avoid extra json.encode() + assert(ok, + 'FULL MATCH constraint was failed: connection ' .. + 'key parts must be all non-nulls or all nulls; ' .. + 'object: ' .. json.encode(parent)) + end + + -- Avoid non-needed index lookup on a destination + -- collection when all connection parts are null: + -- * return null for 1:1* connection; + -- * return {} for 1:N connection (except the case when + -- source collection is the Query pseudo-collection). + if collection_name ~= 'Query' and are_all_parts_null then + if c.type ~= '1:1*' and c.type ~= '1:N' then + -- `if` is to avoid extra json.encode + assert(c.type == '1:1*' or c.type == '1:N', + ('only 1:1* or 1:N connections can have ' .. + 'all key parts null; parent is %s from ' .. + 'collection "%s"'):format(json.encode(parent), + tostring(collection_name))) + end + return c.type == '1:N' and {} or nil + end + + local from = { + collection_name = collection_name, + connection_name = c.name, + destination_args_names = destination_args_names, + destination_args_values = destination_args_values, + } + local extra = { + qcontext = info.qcontext + } + local object_args_instance = {} -- passed to 'filter' + local list_args_instance = {} -- passed to 'args' + for k, v in pairs(args_instance) do + if c_list_args[k] ~= nil then + list_args_instance[k] = v + elseif c_args[k] ~= nil then + object_args_instance[k] = v + else + error(('cannot found "%s" field ("%s" value) ' .. + 'within allowed fields'):format(tostring(k), + tostring(v))) + end + end + local objs = accessor:select(parent, + c.destination_collection, from, + object_args_instance, list_args_instance, extra) + assert(type(objs) == 'table', + 'objs list received from an accessor ' .. + 'must be a table, got ' .. type(objs)) + if c.type == '1:1' or c.type == '1:1*' then + -- we expect here exactly one object even for 1:1* + -- connections because we processed all-parts-are-null + -- situation above + assert(#objs == 1, + 'expect one matching object, got ' .. + tostring(#objs)) + return objs[1] + else -- c.type == '1:N' + return objs + end + end, + } end -- create gql type @@ -592,7 +451,7 @@ gql_type = function(state, avro_schema, collection, collection_name) avro_schema.name, fields = fields, }) - return avro_t == 'enum' and types.nonNull(res) or res + return avro_t == 'record' and types.nonNull(res) or res elseif avro_t == 'enum' then error('enums not implemented yet') -- XXX elseif avro_t == 'array' or avro_t == 'array*' then @@ -663,15 +522,21 @@ local function create_root_collection(state) -- `gql_type` is designed to create GQL type corresponding to a real schema -- and connections. However it also works with the fake schema. + -- Query type must be the Object, so it cannot be nonNull. local root_type = gql_type(state, root_schema, root_collection, "Query") state.schema = schema.create({ - query = root_type + query = nullable(root_type), }) end local function parse_cfg(cfg) local state = {} - state.types = utils.gen_booking_table({}) + + -- collection type is always record, so always non-null; we can lazily + -- evaluate non-null type from nullable type, but not vice versa, so we + -- collect nullable types here and evaluate non-null ones where needed + state.nullable_collection_types = utils.gen_booking_table({}) + state.object_arguments = utils.gen_booking_table({}) state.list_arguments = utils.gen_booking_table({}) state.all_arguments = utils.gen_booking_table({}) @@ -710,8 +575,15 @@ local function parse_cfg(cfg) assert(schema.type == 'record', 'top-level schema must have record avro type, got ' .. tostring(schema.type)) - state.types[collection_name] = gql_type(state, schema, collection, - collection_name) + local collection_type = + gql_type(state, schema, collection, collection_name) + -- we utilize the fact that collection type is always non-null and + -- don't store this information; see comment above for + -- `nullable_collection_types` variable definition + assert(collection_type.__type == 'NonNull', + 'collection must always has non-null type') + state.nullable_collection_types[collection_name] = + nullable(collection_type) -- prepare arguments' types local object_args = convert_record_fields_to_args(schema.fields, @@ -723,7 +595,6 @@ local function parse_cfg(cfg) state.object_arguments[collection_name] = object_args state.list_arguments[collection_name] = list_args state.all_arguments[collection_name] = args - end -- create fake root `Query` collection create_root_collection(state) @@ -815,8 +686,7 @@ end --- schema_name = 'schema_name_foo', --- connections = { // the optional field --- { ---- type = '1:1' or '1:N', ---- name = 'simple_connection_name', +--- name = 'connection_name_bar', --- destination_collection = 'collection_baz', --- parts = { --- { @@ -829,17 +699,7 @@ end --- -- ignored in the graphql --- -- part --- }, ---- { ---- name = 'union_connection_name', ---- type = '1:1' or '1:N', ---- variants = { ---- { ---- see variant format below ---- }, ---- ... ---- } ---- }, ---- ... +--- ... --- }, --- }, --- ... @@ -873,22 +733,6 @@ end --- } --- }), --- }) ---- ---- variant format ---- { ---- Source collection must have all fields that are keys in determinant ---- table. Based on the values of these fields right destination collection ---- is determined. ---- determinant = {field_or_source: 'destination_1_value', ...}, ---- destination_collection = 'collection_name', ---- parts = { ---- { ---- source_field = 'field_name_source', ---- destination_field = 'field_name_destination' ---- } ---- }, ---- index_name = 'index_name' ---- } function tarantool_graphql.new(cfg) local state = parse_cfg(cfg) return setmetatable(state, { diff --git a/test/local/space_nullable_1_1_conn.result b/test/local/space_nullable_1_1_conn.result new file mode 100644 index 0000000..29dbcc5 --- /dev/null +++ b/test/local/space_nullable_1_1_conn.result @@ -0,0 +1,180 @@ + + + +---------------------+ + | a-+ h x y | + | |\ \ |\ | + | b c d k l | + | | |\ \ | + | e f g m | + +---------------------+ +RUN downside_a {{{ +QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } +VARIABLES +--- +body: a +... + +RESULT +--- +email: +- successors: + - successors: &0 [] + body: c + - successors: + - successors: *0 + body: g + - successors: *0 + body: f + body: d + - successors: + - successors: *0 + body: e + body: b + body: a +... + +}}} + +RUN downside_h {{{ +QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } +VARIABLES +--- +body: h +... + +RESULT +--- +email: +- successors: + - successors: + - successors: &0 [] + body: m + body: l + - successors: *0 + body: k + body: h +... + +}}} + +RUN upside {{{ +QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } +VARIABLES +--- +body: f +... + +RESULT +--- +email: +- body: f + in_reply_to: + body: d + in_reply_to: + body: a +... + +}}} + +RUN upside_x {{{ +QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } +VARIABLES +--- +body: x +... + +RESULT +--- +ok: false +err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"062b56b1885c71c51153ccb880ac7315","body":"x","in_reply_to_domain":"graphql.tarantool.org","in_reply_to_localpart":null}' +... + +}}} + +RUN upside_y {{{ +QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } +VARIABLES +--- +body: y +... + +RESULT +--- +ok: false +err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"1f70391f6ba858129413bd801b12acbf","body":"y","in_reply_to_domain":null,"in_reply_to_localpart":"1f70391f6ba858129413bd801b12acbf"}' +... + +}}} + diff --git a/test/local/space_nullable_1_1_conn.test.lua b/test/local/space_nullable_1_1_conn.test.lua new file mode 100755 index 0000000..7b311f3 --- /dev/null +++ b/test/local/space_nullable_1_1_conn.test.lua @@ -0,0 +1,56 @@ +#!/usr/bin/env tarantool + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path + +local graphql = require('graphql') +local testdata = require('test.testdata.nullable_1_1_conn_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- init box and data schema +box.cfg{background = false} +testdata.init_spaces() + +-- upload test data +testdata.fill_test_data() + +-- acquire metadata +local metadata = testdata.get_test_metadata() +local schemas = metadata.schemas +local collections = metadata.collections +local service_fields = metadata.service_fields +local indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + +local accessor = graphql.accessor_space.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}) + +local gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}) + +-- run queries +-- ----------- + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +testdata.drop_spaces() + +os.exit() diff --git a/test/shard_no_redundancy/shard_nullable_1_1_conn.result b/test/shard_no_redundancy/shard_nullable_1_1_conn.result new file mode 100644 index 0000000..56b46ff --- /dev/null +++ b/test/shard_no_redundancy/shard_nullable_1_1_conn.result @@ -0,0 +1,318 @@ +env = require('test_run') +--- +... +test_run = env.new() +--- +... +shard = require('shard') +--- +... +test_run:cmd("setopt delimiter ';'") +--- +- true +... +SERVERS = {'shard1', 'shard2'}; +--- +... +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + }, + login = 'guest', + password = '', + redundancy = 1, +}, 'shard_no_redundancy'); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +fio = require('fio') +--- +... +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +--- +... +graphql = require('graphql') +--- +... +testdata = require('test.testdata.nullable_1_1_conn_testdata') +--- +... +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- +-- init box and data schema +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +-- upload test data +testdata.fill_test_data(shard) +--- +- |2 + + + +---------------------+ + | a-+ h x y | + | |\ \ |\ | + | b c d k l | + | | |\ \ | + | e f g m | + +---------------------+ +... +-- acquire metadata +metadata = testdata.get_test_metadata() +--- +... +schemas = metadata.schemas +--- +... +collections = metadata.collections +--- +... +service_fields = metadata.service_fields +--- +... +indexes = metadata.indexes +--- +... +-- build accessor and graphql schemas +-- ---------------------------------- +test_run:cmd("setopt delimiter ';'") +--- +- true +... +accessor = graphql.accessor_shard.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}); +--- +... +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +testdata.run_queries(gql_wrapper) +--- +- |+ + RUN downside_a {{{ + QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + VARIABLES + --- + body: a + ... + + RESULT + --- + email: + - successors: + - successors: &0 [] + body: c + - successors: + - successors: *0 + body: g + - successors: *0 + body: f + body: d + - successors: + - successors: *0 + body: e + body: b + body: a + ... + + }}} + + RUN downside_h {{{ + QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + VARIABLES + --- + body: h + ... + + RESULT + --- + email: + - successors: + - successors: + - successors: &0 [] + body: m + body: l + - successors: *0 + body: k + body: h + ... + + }}} + + RUN upside {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: f + ... + + RESULT + --- + email: + - body: f + in_reply_to: + body: d + in_reply_to: + body: a + ... + + }}} + + RUN upside_x {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: x + ... + + RESULT + --- + ok: false + err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"062b56b1885c71c51153ccb880ac7315","body":"x","in_reply_to_domain":"graphql.tarantool.org","in_reply_to_localpart":null}' + ... + + }}} + + RUN upside_y {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: y + ... + + RESULT + --- + ok: false + err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"1f70391f6ba858129413bd801b12acbf","body":"y","in_reply_to_domain":null,"in_reply_to_localpart":"1f70391f6ba858129413bd801b12acbf"}' + ... + + }}} + +... +-- clean up +-- -------- +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +test_run:drop_cluster(SERVERS) +--- +... diff --git a/test/shard_no_redundancy/shard_nullable_1_1_conn.test.lua b/test/shard_no_redundancy/shard_nullable_1_1_conn.test.lua new file mode 100644 index 0000000..e3584d1 --- /dev/null +++ b/test/shard_no_redundancy/shard_nullable_1_1_conn.test.lua @@ -0,0 +1,76 @@ +env = require('test_run') +test_run = env.new() + +shard = require('shard') + +test_run:cmd("setopt delimiter ';'") +SERVERS = {'shard1', 'shard2'}; +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + }, + login = 'guest', + password = '', + redundancy = 1, +}, 'shard_no_redundancy'); +test_run:cmd("setopt delimiter ''"); + +fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +graphql = require('graphql') +testdata = require('test.testdata.nullable_1_1_conn_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- init box and data schema +test_run:cmd('switch shard1') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch shard2') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch default') + +-- upload test data +testdata.fill_test_data(shard) + +-- acquire metadata +metadata = testdata.get_test_metadata() +schemas = metadata.schemas +collections = metadata.collections +service_fields = metadata.service_fields +indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + +test_run:cmd("setopt delimiter ';'") +accessor = graphql.accessor_shard.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}); + +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}); +test_run:cmd("setopt delimiter ''"); + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +test_run:cmd('switch shard1') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch shard2') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch default') + +test_run:drop_cluster(SERVERS) diff --git a/test/shard_redundancy/nullable_1_1_conn.result b/test/shard_redundancy/nullable_1_1_conn.result new file mode 100644 index 0000000..c57f3d8 --- /dev/null +++ b/test/shard_redundancy/nullable_1_1_conn.result @@ -0,0 +1,353 @@ +-- ---------------------------------------------------------- +-- Motivation: https://github.com/tarantool/graphql/issues/43 +-- ---------------------------------------------------------- +env = require('test_run') +--- +... +test_run = env.new() +--- +... +shard = require('shard') +--- +... +-- we need at least four servers to make sure we have several (two) servers +-- within each replica set and several (two) replica sets +test_run:cmd("setopt delimiter ';'") +--- +- true +... +SERVERS = {'shard1', 'shard2', 'shard3', 'shard4'}; +--- +... +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + { uri = instance_uri('3'), zone = '2' }, + { uri = instance_uri('4'), zone = '3' }, + }, + login = 'guest', + password = '', + redundancy = 2, +}, 'shard_redundancy'); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +fio = require('fio') +--- +... +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path +--- +... +graphql = require('graphql') +--- +... +testdata = require('test.testdata.nullable_1_1_conn_testdata') +--- +... +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- +-- init box and data schema +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch shard3') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch shard4') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +-- upload test data +testdata.fill_test_data(shard) +--- +- |2 + + + +---------------------+ + | a-+ h x y | + | |\ \ |\ | + | b c d k l | + | | |\ \ | + | e f g m | + +---------------------+ +... +-- acquire metadata +metadata = testdata.get_test_metadata() +--- +... +schemas = metadata.schemas +--- +... +collections = metadata.collections +--- +... +service_fields = metadata.service_fields +--- +... +indexes = metadata.indexes +--- +... +-- build accessor and graphql schemas +-- ---------------------------------- +test_run:cmd("setopt delimiter ';'") +--- +- true +... +accessor = graphql.accessor_shard.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}); +--- +... +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}); +--- +... +test_run:cmd("setopt delimiter ''"); +--- +- true +... +testdata.run_queries(gql_wrapper) +--- +- |+ + RUN downside_a {{{ + QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + VARIABLES + --- + body: a + ... + + RESULT + --- + email: + - successors: + - successors: &0 [] + body: c + - successors: + - successors: *0 + body: g + - successors: *0 + body: f + body: d + - successors: + - successors: *0 + body: e + body: b + body: a + ... + + }}} + + RUN downside_h {{{ + QUERY + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + VARIABLES + --- + body: h + ... + + RESULT + --- + email: + - successors: + - successors: + - successors: &0 [] + body: m + body: l + - successors: *0 + body: k + body: h + ... + + }}} + + RUN upside {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: f + ... + + RESULT + --- + email: + - body: f + in_reply_to: + body: d + in_reply_to: + body: a + ... + + }}} + + RUN upside_x {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: x + ... + + RESULT + --- + ok: false + err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"062b56b1885c71c51153ccb880ac7315","body":"x","in_reply_to_domain":"graphql.tarantool.org","in_reply_to_localpart":null}' + ... + + }}} + + RUN upside_y {{{ + QUERY + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + VARIABLES + --- + body: y + ... + + RESULT + --- + ok: false + err: 'FULL MATCH constraint was failed: connection key parts must be all non-nulls + or all nulls; object: {"domain":"graphql.tarantool.org","localpart":"1f70391f6ba858129413bd801b12acbf","body":"y","in_reply_to_domain":null,"in_reply_to_localpart":"1f70391f6ba858129413bd801b12acbf"}' + ... + + }}} + +... +-- clean up +-- -------- +test_run:cmd('switch shard1') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard2') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard3') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch shard4') +--- +- true +... +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +--- +... +test_run:cmd('switch default') +--- +- true +... +test_run:drop_cluster(SERVERS) +--- +... diff --git a/test/shard_redundancy/nullable_1_1_conn.test.lua b/test/shard_redundancy/nullable_1_1_conn.test.lua new file mode 100644 index 0000000..a4d8976 --- /dev/null +++ b/test/shard_redundancy/nullable_1_1_conn.test.lua @@ -0,0 +1,93 @@ +-- ---------------------------------------------------------- +-- Motivation: https://github.com/tarantool/graphql/issues/43 +-- ---------------------------------------------------------- + +env = require('test_run') +test_run = env.new() + +shard = require('shard') + +-- we need at least four servers to make sure we have several (two) servers +-- within each replica set and several (two) replica sets + +test_run:cmd("setopt delimiter ';'") +SERVERS = {'shard1', 'shard2', 'shard3', 'shard4'}; +init_shard(SERVERS, { + servers = { + { uri = instance_uri('1'), zone = '0' }, + { uri = instance_uri('2'), zone = '1' }, + { uri = instance_uri('3'), zone = '2' }, + { uri = instance_uri('4'), zone = '3' }, + }, + login = 'guest', + password = '', + redundancy = 2, +}, 'shard_redundancy'); +test_run:cmd("setopt delimiter ''"); + +fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)"):gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. package.path + +graphql = require('graphql') +testdata = require('test.testdata.nullable_1_1_conn_testdata') + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- init box and data schema +test_run:cmd('switch shard1') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch shard2') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch shard3') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch shard4') +require('test.testdata.nullable_1_1_conn_testdata').init_spaces() +test_run:cmd('switch default') + +-- upload test data +testdata.fill_test_data(shard) + +-- acquire metadata +metadata = testdata.get_test_metadata() +schemas = metadata.schemas +collections = metadata.collections +service_fields = metadata.service_fields +indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + +test_run:cmd("setopt delimiter ';'") +accessor = graphql.accessor_shard.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}); + +gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}); +test_run:cmd("setopt delimiter ''"); + +testdata.run_queries(gql_wrapper) + +-- clean up +-- -------- + +test_run:cmd('switch shard1') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch shard2') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch shard3') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch shard4') +require('test.testdata.nullable_1_1_conn_testdata').drop_spaces() +test_run:cmd('switch default') + +test_run:drop_cluster(SERVERS) diff --git a/test/testdata/nullable_1_1_conn_testdata.lua b/test/testdata/nullable_1_1_conn_testdata.lua new file mode 100644 index 0000000..8ec2932 --- /dev/null +++ b/test/testdata/nullable_1_1_conn_testdata.lua @@ -0,0 +1,406 @@ +-- The example was inspired by [1]. Consider [2] for the problem description +-- and [3] for the track of related task. +-- +-- [1]: https://www.jwz.org/doc/mid.html +-- [2]: https://github.com/tarantool/graphql/issues/43 +-- [3]: https://github.com/tarantool/graphql/issues/44 + +local json = require('json') +local yaml = require('yaml') +local utils = require('graphql.utils') + +local nullable_1_1_conn_testdata = {} + +local PRNG_SEED = 42 +local DOMAIN = 'graphql.tarantool.org' + +-- return an error w/o file name and line number +local function strip_error(err) + return tostring(err):gsub('^.-:.-: (.*)$', '%1') +end + +local function print_and_return(...) + print(...) + return table.concat({...}, ' ') .. '\n' +end + +local function format_result(name, query, variables, result) + return ('RUN %s {{{\nQUERY\n%s\nVARIABLES\n%s\nRESULT\n%s\n}}}\n'):format( + name, query:rstrip(), yaml.encode(variables), yaml.encode(result)) +end + +function nullable_1_1_conn_testdata.get_test_metadata() + local schemas = json.decode([[{ + "email": { + "type": "record", + "name": "email", + "fields": [ + { "name": "localpart", "type": "string" }, + { "name": "domain", "type": "string" }, + { "name": "in_reply_to_localpart", "type": "string*" }, + { "name": "in_reply_to_domain", "type": "string*" }, + { "name": "body", "type": "string" } + ] + } + }]]) + + local collections = json.decode([[{ + "email": { + "schema_name": "email", + "connections": [ + { + "type": "1:N", + "name": "successors", + "destination_collection": "email", + "parts": [ + { + "source_field": "localpart", + "destination_field": "in_reply_to_localpart" + }, + { + "source_field": "domain", + "destination_field": "in_reply_to_domain" + } + ], + "index_name": "in_reply_to" + }, + { + "type": "1:1*", + "name": "in_reply_to", + "destination_collection": "email", + "parts": [ + { + "source_field": "in_reply_to_localpart", + "destination_field": "localpart" + }, + { + "source_field": "in_reply_to_domain", + "destination_field": "domain" + } + ], + "index_name": "message_id" + } + ] + } + }]]) + + local service_fields = { + email = {}, + } + + local indexes = { + email = { + message_id = { + service_fields = {}, + fields = {'localpart', 'domain'}, + index_type = 'tree', + unique = true, + primary = true, + }, + in_reply_to = { + service_fields = {}, + fields = {'in_reply_to_localpart', 'in_reply_to_domain'}, + index_type = 'tree', + unique = false, + primary = false, + }, + }, + } + + return { + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, + } +end + +function nullable_1_1_conn_testdata.init_spaces() + -- email fields + local LOCALPART_FN = 1 + local DOMAIN_FN = 2 + local IN_REPLY_TO_LOCALPART_BRANCH_FN = 3 -- luacheck: ignore + local IN_REPLY_TO_LOCALPART_FN = 4 + local IN_REPLY_TO_DOMAIN_BRANCH_FN = 5 -- luacheck: ignore + local IN_REPLY_TO_DOMAIN_FN = 6 + local BODY_FN = 7 -- luacheck: ignore + + box.once('test_space_init_spaces', function() + box.schema.create_space('email') + box.space.email:create_index('message_id', + {type = 'tree', unique = true, parts = { + {LOCALPART_FN, 'string'}, + {DOMAIN_FN, 'string'}, + }} + ) + box.space.email:create_index('in_reply_to', + {type = 'tree', unique = false, parts = { + {IN_REPLY_TO_LOCALPART_FN, 'string', is_nullable = true}, + {IN_REPLY_TO_DOMAIN_FN, 'string', is_nullable = true}, + }} + ) + end) +end + +-- numbers are from https://gist.github.com/blixt/f17b47c62508be59987b +local function gen_prng(seed) + return setmetatable({seed = seed}, { + __index = { + next_int = function(self, min, max) + self.seed = self.seed * 16807 % 2147483647 + return self.seed % (max - min + 1) + min + end, + next_string = function(self, len) + local res = {} + for i = 1, len do + res[i] = string.char(self:next_int(0, 255)) + end + return table.concat(res) + end, + } + }) +end + +function nullable_1_1_conn_testdata.fill_test_data(virtbox) + local results = '' + + local virtbox = virtbox or box.space + + local prng = gen_prng(PRNG_SEED) + + local function new_email(body) + return { + localpart = prng:next_string(16):hex(), + domain = DOMAIN, + body = body, + } + end + + -- the string must contain '\n\n' to being printed in the literal scalar + -- style + results = results .. print_and_return(([[ + + + +---------------------+ + | a-+ h x y | + | |\ \ |\ | + | b c d k l | + | | |\ \ | + | e f g m | + +---------------------+ + ]]):rstrip()) + + local email_trees = { + { + email = new_email('a'), + successors = { + { + email = new_email('b'), + successors = { + {email = new_email('e')}, + } + }, + { + email = new_email('c'), + }, + { + email = new_email('d'), + successors = { + {email = new_email('f')}, + {email = new_email('g')}, + } + } + } + }, + { + email = new_email('h'), + successors = { + { + email = new_email('k'), + }, + { + email = new_email('l'), + successors = { + {email = new_email('m')} + } + } + }, + } + } + + local function union_branch_of(value) + local NULL_T = 0 + local STRING_T = 1 + + if value == nil then + return NULL_T + elseif type(value) == 'string' then + return STRING_T + end + error('value must be nil or a string, got ' .. type(value)) + end + + -- `in_reply_to` is optional parameter with the following format: + -- + -- ``` + -- { + -- localpart = '...', + -- domain = '...', + -- } + -- ``` + local function add_emails(email_nodes, in_reply_to) + local irt_localpart = (in_reply_to or {}).localpart + local irt_domain = (in_reply_to or {}).domain + + for _, email_node in pairs(email_nodes) do + local localpart = email_node.email.localpart + local domain = email_node.email.domain + + virtbox.email:replace({ + localpart, + domain, + union_branch_of(irt_localpart), + irt_localpart, + union_branch_of(irt_domain), + irt_domain, + email_node.email.body, + }) + add_emails(email_node.successors or {}, { + localpart = localpart, + domain = domain, + }) + end + end + + add_emails(email_trees) + + -- add two emails with one null in in_reply_to_{localpart,domain} to test + -- FULL MATCH constraints + local domain = DOMAIN + local localpart = prng:next_string(16):hex() + virtbox.email:replace({ + localpart, + domain, + union_branch_of(box.NULL), + box.NULL, + union_branch_of(domain), + domain, + 'x', + }) + local localpart = prng:next_string(16):hex() + virtbox.email:replace({ + localpart, + domain, + union_branch_of(localpart), + localpart, + union_branch_of(box.NULL), + box.NULL, + 'y', + }) + + return results +end + +function nullable_1_1_conn_testdata.drop_spaces() + box.space._schema:delete('oncetest_space_init_spaces') + box.space.email:drop() +end + +function nullable_1_1_conn_testdata.run_queries(gql_wrapper) + local results = '' + + -- downside traversal (1:N connections) + -- ------------------------------------ + + local query_downside = [[ + query emails_tree_downside($body: String) { + email(body: $body) { + body + successors { + body + successors { + body + successors { + body + } + } + } + } + } + ]] + + local gql_query_downside = gql_wrapper:compile(query_downside) + + utils.show_trace(function() + local variables_downside_a = {body = 'a'} + local result = gql_query_downside:execute(variables_downside_a) + results = results .. print_and_return(format_result( + 'downside_a', query_downside, variables_downside_a, result)) + end) + + utils.show_trace(function() + local variables_downside_h = {body = 'h'} + local result = gql_query_downside:execute(variables_downside_h) + results = results .. print_and_return(format_result( + 'downside_h', query_downside, variables_downside_h, result)) + end) + + -- upside traversal (1:1 connections) + -- ---------------------------------- + + local query_upside = [[ + query emails_trace_upside($body: String) { + email(body: $body) { + body + in_reply_to { + body + in_reply_to { + body + in_reply_to { + body + } + } + } + } + } + ]] + + local gql_query_upside = gql_wrapper:compile(query_upside) + + utils.show_trace(function() + local variables_upside = {body = 'f'} + local result = gql_query_upside:execute(variables_upside) + results = results .. print_and_return(format_result( + 'upside', query_upside, variables_upside, result)) + end) + + -- FULL MATCH constraint: connection key parts must be all non-nulls or all + -- nulls; both expected to fail + -- ------------------------------------------------------------------------ + + local variables_upside_x = {body = 'x'} + local ok, err = pcall(function() + local result = gql_query_upside:execute(variables_upside_x) + results = results .. print_and_return(format_result( + 'upside_x', query_upside, variables_upside_x, result)) + end) + + local result = {ok = ok, err = strip_error(err)} + results = results .. print_and_return(format_result( + 'upside_x', query_upside, variables_upside_x, result)) + + local variables_upside_y = {body = 'y'} + local ok, err = pcall(function() + local result = gql_query_upside:execute(variables_upside_y) + results = results .. print_and_return(format_result( + 'upside_y', query_upside, variables_upside_y, result)) + end) + + local result = {ok = ok, err = strip_error(err)} + results = results .. print_and_return(format_result( + 'upside_y', query_upside, variables_upside_y, result)) + + return results +end + +return nullable_1_1_conn_testdata From c2baeac8a87b045ee9ec212d7cf83cd1ac37f6bc Mon Sep 17 00:00:00 2001 From: AKhatskevich Date: Wed, 28 Feb 2018 11:46:05 +0300 Subject: [PATCH 04/13] Improve test_data_user_order Add goods (item) and order_item relation. --- test/common/lua/test_data_user_order.lua | 345 ++++++++++++++++++++++- 1 file changed, 338 insertions(+), 7 deletions(-) diff --git a/test/common/lua/test_data_user_order.lua b/test/common/lua/test_data_user_order.lua index 5c6baad..14c8b5d 100644 --- a/test/common/lua/test_data_user_order.lua +++ b/test/common/lua/test_data_user_order.lua @@ -1,5 +1,28 @@ local json = require('json') +-- This module helps to write tests providing simple shop database. +-- +-- Data layout +-- +-- models: +-- - user: Users of the shop. +-- - id +-- - first_name +-- - last_name +-- - order: Orders (shopping carts) of users. One user can has many orders. +-- - id +-- - user_id +-- - description +-- - item: Goods of the shop. +-- - id +-- - name +-- - description +-- - price +-- - order_item: N:N connection emulation. That is how one order can have many +-- itema and one item can be in different orders. +-- - order_id +-- - item_id + local testdata = {} testdata.meta = { @@ -21,6 +44,24 @@ testdata.meta = { { "name": "user_id", "type": "int" }, { "name": "description", "type": "string" } ] + }, + "item": { + "type": "record", + "name": "item", + "fields": [ + { "name": "id", "type": "int" }, + { "name": "description", "type": "string" }, + { "name": "name", "type": "string" }, + { "name": "price", "type": "string" } + ] + }, + "order_item": { + "type": "record", + "name": "order_item", + "fields": [ + { "name": "item_id", "type": "int" }, + { "name": "order_id", "type": "int" } + ] } }]]), collections = json.decode([[{ @@ -32,7 +73,10 @@ testdata.meta = { "name": "order_connection", "destination_collection": "order_collection", "parts": [ - { "source_field": "id", "destination_field": "user_id" } + { + "source_field": "id", + "destination_field": "user_id" + } ], "index_name": "user_id_index" } @@ -41,16 +85,65 @@ testdata.meta = { "order_collection": { "schema_name": "order", "connections": [ + { + "type": "1:N", + "name": "order__order_item", + "destination_collection": "order_item_collection", + "parts": [ + { + "source_field": "id", + "destination_field": "order_id" + } + ], + "index_name": "order_id_item_id_index" + }, { "type": "1:1", "name": "user_connection", "destination_collection": "user_collection", "parts": [ - { "source_field": "user_id", "destination_field": "id" } + { + "source_field": "user_id", + "destination_field": "id" + } ], "index_name": "user_id_index" } ] + }, + "item_collection": { + "schema_name": "item", + "connections": [ + ] + }, + "order_item_collection": { + "schema_name": "order_item", + "connections": [ + { + "type": "1:1", + "name": "order_item__order", + "destination_collection": "order_collection", + "parts": [ + { + "source_field": "order_id", + "destination_field": "id" + } + ], + "index_name": "order_id_index" + }, + { + "type": "1:1", + "name": "order_item__item", + "destination_collection": "item_collection", + "parts": [ + { + "source_field": "item_id", + "destination_field": "id" + } + ], + "index_name": "item_id_index" + } + ] } }]]), service_fields = { @@ -58,6 +151,8 @@ testdata.meta = { {name = 'created', type = 'long', default = 0}, }, order = {}, + item = {}, + order_item = {}, }, indexes = { user_collection = { @@ -85,6 +180,24 @@ testdata.meta = { primary = false, }, }, + item_collection = { + item_id_index = { + service_fields = {}, + fields = {'id'}, + index_type = 'tree', + unique = true, + primary = true + } + }, + order_item_collection = { + order_id_item_id_index = { + service_fields = {}, + fields = {'order_id', 'item_id'}, + index_type = 'tree', + unique = true, + primary = true + } + } } } @@ -112,26 +225,244 @@ function testdata.init_spaces() {type = 'tree', unique = false, parts = { O_USER_ID_FN, 'unsigned' }}) + -- item_collection fields + local I_ITEM_ID_FN = 1 + box.schema.create_space('item_collection') + box.space.item_collection:create_index('item_id_index', + {type = 'tree', unique = true, parts = { + I_ITEM_ID_FN, 'unsigned' + }}) + -- order_item_collection fields + local OI_ORDER_ID_FN = 1 + local OI_USER_ID_FN = 2 + box.schema.create_space('order_item_collection') + box.space.order_item_collection:create_index('order_id_item_id_index', + {type = 'tree', unique = true, parts = { + OI_ORDER_ID_FN, 'unsigned', OI_USER_ID_FN, 'unsigned' + }}) end function testdata.drop_spaces() box.space.user_collection:drop() box.space.order_collection:drop() + box.space.item_collection:drop() + box.space.order_item_collection:drop() end +local items function testdata.fill_test_data(virtbox) local order_id = 1 - for i = 1, 15 do + local item_id_max = #items + for _, item in ipairs(items) do + virtbox.item_collection:replace({ + item.id, item.description, item.name, item.price + }) + end + local order_item_cnt = 0 + for user_id = 1, 15 do virtbox.user_collection:replace( - {1827767717, i, 'user fn ' .. i, 'user ln ' .. i}) + { 1827767717, user_id, 'user fn ' .. user_id, + 'user ln ' .. user_id }) -- Each user has N orders, where `N = user id` - for j = 1, i do - virtbox.order_collection:replace( - {order_id, i, 'order of user ' .. i}) + for i = 1, user_id do + virtbox.order_collection:replace({ + order_id, user_id, 'order of user ' .. user_id + }) order_id = order_id + 1 + local items_cnt = 3 + for k = 1, items_cnt do + order_item_cnt = order_item_cnt + 1 + local item_id = order_item_cnt % item_id_max + 1 + virtbox.order_item_collection:replace({ + order_id, item_id + }) + end end end end +items = { + { + id = 1, + description = "rhoncus. Nullam velit dui, semper", + name = "Salt", + price = "7.51" + }, + { + id = 2, + description = "sit", + name = "butter", + price = "3.96" + }, + { + id = 3, + description = "non,", + name = "onion", + price = "2.83" + }, + { + id = 4, + description = "mauris", + name = "milk", + price = "3.53" + }, + { + id = 5, + description = "Suspendisse tristique neque venenatis", + name = "Sausage", + price = "1.84" + }, + { + id = 6, + description = "eget, dictum", + name = "Paper", + price = "7.83" + }, + { + id = 7, + description = "lectus quis massa. Mauris", + name = "Freezer", + price = "5.47" + }, + { + id = 8, + description = "ac", + name = "Stone", + price = "8.29" + }, + { + id = 9, + description = "natoque penatibus et magnis dis", + name = "Silk", + price = "1.60" + }, + { + id = 10, + description = "adipiscing", + name = "Leather", + price = "0.40" + }, + { + id = 11, + description = "lobortis ultrices. Vivamus rhoncus.", + name = "Money", + price = "9.74" + }, + { + id = 12, + description = "montes, nascetur ridiculus", + name = "Tree", + price = "8.52" + }, + { + id = 13, + description = "In at pede. Cras vulputate", + name = "Garbage", + price = "1.88" + }, + { + id = 14, + description = "dolor quam, elementum at,", + name = "Table", + price = "2.91" + }, + { + id = 15, + description = "Donec dignissim", + name = "Wire", + price = "6.04" + }, + { + id = 16, + description = "turpis nec mauris blandit", + name = "Cup", + price = "8.05" + }, + { + id = 17, + description = "ornare placerat, orci", + name = "Blade", + price = "2.58" + }, + { + id = 18, + description = "arcu. Sed", + name = "Tea", + price = "0.38" + }, + { + id = 19, + description = "tempus risus. Donec egestas. Duis", + name = "Sveater", + price = "8.66" + }, + { + id = 20, + description = "Quisque libero lacus, varius", + name = "Keyboard", + price = "3.74" + }, + { + id = 21, + description = "faucibus orci luctus et ultrices", + name = "Shoes", + price = "2.21" + }, + { + id = 22, + description = "rhoncus. Nullam velit", + name = "Lemon", + price = "3.70" + }, + { + id = 23, + description = "justo sit amet", + name = "Orange", + price = "9.27" + }, + { + id = 24, + description = "porttitor tellus non magna.", + name = "Pen", + price = "3.41" + }, + { + id = 25, + description = "Suspendisse dui. Fusce diam", + name = "Screen", + price = "1.22" + }, + { + id = 26, + description = "eleifend vitae, erat. Vivamus nisi.", + name = "Glass", + price = "8.59" + }, + { + id = 27, + description = "tincidunt, nunc", + name = "Book", + price = "4.24" + }, + { + id = 28, + description = "orci luctus et ultrices posuere", + name = "Mouse", + price = "7.73" + }, + { + id = 29, + description = "in,", + name = "Doll", + price = "2.13" + }, + { + id = 30, + description = "lobortis ultrices. Vivamus rhoncus.", + name = "Socks", + price = "0.91" + } +} + return testdata From e4336e4759fe46a87f70727d1add476efe5691a5 Mon Sep 17 00:00:00 2001 From: AKhatskevich Date: Tue, 27 Feb 2018 22:35:45 +0300 Subject: [PATCH 05/13] Move general parts of executor to query_util.lua This patch is important for graphql query -> avro schema convertor implementation, because both executor and convertor walks over query AST. Part of #7 --- graphql/core/execute.lua | 89 ++-------------------- graphql/core/query_util.lua | 143 ++++++++++++++++++++++++++++++++++++ 2 files changed, 150 insertions(+), 82 deletions(-) create mode 100644 graphql/core/query_util.lua diff --git a/graphql/core/execute.lua b/graphql/core/execute.lua index 92d5070..53807e2 100644 --- a/graphql/core/execute.lua +++ b/graphql/core/execute.lua @@ -2,6 +2,7 @@ local path = (...):gsub('%.[^%.]+$', '') local types = require(path .. '.types') local util = require(path .. '.util') local introspection = require(path .. '.introspection') +local query_util = require(path .. '.query_util') local function typeFromAST(node, schema) local innerType @@ -63,89 +64,10 @@ local function doesFragmentApply(fragment, type, context) end end -local function mergeSelectionSets(fields) - local selections = {} - - for i = 1, #fields do - local selectionSet = fields[i].selectionSet - if selectionSet then - for j = 1, #selectionSet.selections do - table.insert(selections, selectionSet.selections[j]) - end - end - end - - return selections -end - local function defaultResolver(object, arguments, info) return object[info.fieldASTs[1].name.value] end -local function buildContext(schema, tree, rootValue, variables, operationName) - local context = { - schema = schema, - rootValue = rootValue, - variables = variables, - operation = nil, - fragmentMap = {}, - -- The field is passed to resolve function within info attribute. - -- Can be used to store any data within one query. - qcontext = {} - } - - for _, definition in ipairs(tree.definitions) do - if definition.kind == 'operation' then - if not operationName and context.operation then - error('Operation name must be specified if more than one operation exists.') - end - - if not operationName or definition.name.value == operationName then - context.operation = definition - end - elseif definition.kind == 'fragmentDefinition' then - context.fragmentMap[definition.name.value] = definition - end - end - - if not context.operation then - if operationName then - error('Unknown operation "' .. operationName .. '"') - else - error('Must provide an operation') - end - end - - return context -end - -local function collectFields(objectType, selections, visitedFragments, result, context) - for _, selection in ipairs(selections) do - if selection.kind == 'field' then - if shouldIncludeNode(selection, context) then - local name = getFieldResponseKey(selection) - result[name] = result[name] or {} - table.insert(result[name], selection) - end - elseif selection.kind == 'inlineFragment' then - if shouldIncludeNode(selection, context) and doesFragmentApply(selection, objectType, context) then - collectFields(objectType, selection.selectionSet.selections, visitedFragments, result, context) - end - elseif selection.kind == 'fragmentSpread' then - local fragmentName = selection.name.value - if shouldIncludeNode(selection, context) and not visitedFragments[fragmentName] then - visitedFragments[fragmentName] = true - local fragment = context.fragmentMap[fragmentName] - if fragment and shouldIncludeNode(fragment, context) and doesFragmentApply(fragment, objectType, context) then - collectFields(objectType, fragment.selectionSet.selections, visitedFragments, result, context) - end - end - end - end - - return result -end - local evaluateSelections local function completeValue(fieldType, result, subSelections, context) @@ -230,13 +152,13 @@ local function getFieldEntry(objectType, object, fields, context) } local resolvedObject = (fieldType.resolve or defaultResolver)(object, arguments, info) - local subSelections = mergeSelectionSets(fields) + local subSelections = query_util.mergeSelectionSets(fields) return completeValue(fieldType.kind, resolvedObject, subSelections, context) end evaluateSelections = function(objectType, object, selections, context) - local groupedFieldSet = collectFields(objectType, selections, {}, {}, context) + local groupedFieldSet = query_util.collectFields(objectType, selections, {}, {}, context) return util.map(groupedFieldSet, function(fields) return getFieldEntry(objectType, object, fields, context) @@ -244,7 +166,10 @@ evaluateSelections = function(objectType, object, selections, context) end return function(schema, tree, rootValue, variables, operationName) - local context = buildContext(schema, tree, rootValue, variables, operationName) + local context = query_util.buildContext(schema, tree, rootValue, variables, operationName) + -- The field is passed to resolve function within info attribute. + -- Can be used to store any data within one query. + context.qcontext = {} local rootType = schema[context.operation.operation] if not rootType then diff --git a/graphql/core/query_util.lua b/graphql/core/query_util.lua new file mode 100644 index 0000000..7887878 --- /dev/null +++ b/graphql/core/query_util.lua @@ -0,0 +1,143 @@ +local path = (...):gsub('%.[^%.]+$', '') +local types = require(path .. '.types') +local util = require(path .. '.util') + +local query_util = {} + +local function typeFromAST(node, schema) + local innerType + if node.kind == 'listType' then + innerType = typeFromAST(node.type) + return innerType and types.list(innerType) + elseif node.kind == 'nonNullType' then + innerType = typeFromAST(node.type) + return innerType and types.nonNull(innerType) + else + assert(node.kind == 'namedType', 'Variable must be a named type') + return schema:getType(node.name.value) + end +end + +local function getFieldResponseKey(field) + return field.alias and field.alias.name.value or field.name.value +end + +local function shouldIncludeNode(selection, context) + if selection.directives then + local function isDirectiveActive(key, _type) + local directive = util.find(selection.directives, function(directive) + return directive.name.value == key + end) + + if not directive then return end + + local ifArgument = util.find(directive.arguments, function(argument) + return argument.name.value == 'if' + end) + + if not ifArgument then return end + + return util.coerceValue(ifArgument.value, _type.arguments['if'], context.variables) + end + + if isDirectiveActive('skip', types.skip) then return false end + if isDirectiveActive('include', types.include) == false then return false end + end + + return true +end + +local function doesFragmentApply(fragment, type, context) + if not fragment.typeCondition then return true end + + local innerType = typeFromAST(fragment.typeCondition, context.schema) + + if innerType == type then + return true + elseif innerType.__type == 'Interface' then + local implementors = context.schema:getImplementors(innerType.name) + return implementors and implementors[type] + elseif innerType.__type == 'Union' then + return util.find(innerType.types, function(member) + return member == type + end) + end +end + +function query_util.collectFields(objectType, selections, visitedFragments, result, context) + for _, selection in ipairs(selections) do + if selection.kind == 'field' then + if shouldIncludeNode(selection, context) then + local name = getFieldResponseKey(selection) + result[name] = result[name] or {} + table.insert(result[name], selection) + end + elseif selection.kind == 'inlineFragment' then + if shouldIncludeNode(selection, context) and doesFragmentApply(selection, objectType, context) then + collectFields(objectType, selection.selectionSet.selections, visitedFragments, result, context) + end + elseif selection.kind == 'fragmentSpread' then + local fragmentName = selection.name.value + if shouldIncludeNode(selection, context) and not visitedFragments[fragmentName] then + visitedFragments[fragmentName] = true + local fragment = context.fragmentMap[fragmentName] + if fragment and shouldIncludeNode(fragment, context) and doesFragmentApply(fragment, objectType, context) then + collectFields(objectType, fragment.selectionSet.selections, visitedFragments, result, context) + end + end + end + end + + return result +end + +function query_util.mergeSelectionSets(fields) + local selections = {} + + for i = 1, #fields do + local selectionSet = fields[i].selectionSet + if selectionSet then + for j = 1, #selectionSet.selections do + table.insert(selections, selectionSet.selections[j]) + end + end + end + + return selections +end + +function query_util.buildContext(schema, tree, rootValue, variables, operationName) + local context = { + schema = schema, + rootValue = rootValue, + variables = variables, + operation = nil, + fragmentMap = {} + } + + for _, definition in ipairs(tree.definitions) do + if definition.kind == 'operation' then + if not operationName and context.operation then + error('Operation name must be specified if more than one operation exists.') + end + + if not operationName or definition.name.value == operationName then + context.operation = definition + end + elseif definition.kind == 'fragmentDefinition' then + context.fragmentMap[definition.name.value] = definition + end + end + + if not context.operation then + if operationName then + error('Unknown operation "' .. operationName .. '"') + else + error('Must provide an operation') + end + end + + return context +end + +return query_util From d8da85e547201d17a444cc1cf66ea9e53e968490 Mon Sep 17 00:00:00 2001 From: AKhatskevich Date: Wed, 28 Feb 2018 13:58:05 +0300 Subject: [PATCH 06/13] Feature: convert graphql query to avro schema Extend query object: add `avro_schema` method, which produces Avro schema which can be used to verify or flatten any `query_exequte` result. Closes #7 --- Makefile | 5 +- graphql/query_to_avro.lua | 171 +++++++++++++++++ graphql/tarantool_graphql.lua | 2 + test/extra/suite.ini | 5 + test/extra/to_avro_arrays.test.lua | 112 +++++++++++ test/extra/to_avro_huge.test.lua | 267 +++++++++++++++++++++++++++ test/extra/to_avro_nested.test.lua | 110 +++++++++++ test/extra/to_avro_nullable.test.lua | 99 ++++++++++ 8 files changed, 770 insertions(+), 1 deletion(-) create mode 100755 graphql/query_to_avro.lua create mode 100644 test/extra/suite.ini create mode 100755 test/extra/to_avro_arrays.test.lua create mode 100755 test/extra/to_avro_huge.test.lua create mode 100755 test/extra/to_avro_nested.test.lua create mode 100755 test/extra/to_avro_nullable.test.lua diff --git a/Makefile b/Makefile index 8db7584..11c701e 100644 --- a/Makefile +++ b/Makefile @@ -3,8 +3,11 @@ default: .PHONY: lint lint: - luacheck graphql/*.lua test/local/*.lua test/testdata/*.lua \ + luacheck graphql/*.lua \ + test/local/*.lua \ + test/testdata/*.lua \ test/common/*.test.lua test/common/lua/*.lua \ + test/extra/*.test.lua \ --no-redefined --no-unused-args .PHONY: test diff --git a/graphql/query_to_avro.lua b/graphql/query_to_avro.lua new file mode 100755 index 0000000..ae1f718 --- /dev/null +++ b/graphql/query_to_avro.lua @@ -0,0 +1,171 @@ +--- Module for convertion GraphQL query to Avro schema. +--- +--- Random notes: +--- +--- * The best way to use this module is to just call `avro_schema` methon on +--- compiled query object. +local path = "graphql.core" +local introspection = require(path .. '.introspection') +local query_util = require(path .. '.query_util') + +-- module functions +local query_to_avro = {} + +local gql_scalar_to_avro_index = { + String = "string", + Int = "int", + Long = "long", + -- GraphQL Float is double precision according to graphql.org. + -- More info http://graphql.org/learn/schema/#scalar-types + Float = "double", + Boolean = "boolean" +} +local function gql_scalar_to_avro(fieldType) + assert(fieldType.__type == "Scalar", "GraphQL scalar field expected") + assert(fieldType.name ~= "Map", "Map type is not supported") + local result = gql_scalar_to_avro_index[fieldType.name] + assert(result ~= nil, "Unexpected scalar type: " .. fieldType.name) + return result +end + +-- The function converts avro type to nullable. +-- In current tarantool/avro-schema implementation we simply add '*' +-- to the end of type name. +-- The function do not copy the resulting type but changes it in place. +-- +-- @tparam table avro schema node to be converted to nullable +-- +-- @tresult table schema node; basically it is the passed schema node, +-- however in nullable type implementation through unions it can be different +-- node +local function make_avro_type_nullable(avro) + assert(avro.type ~= nil, "Avro `type` field is necessary") + local type_type = type(avro.type) + if type_type == "string" then + assert(avro.type:endswith("*") == false, + "Avro type should not be nullable already") + avro.type = avro.type .. '*' + return avro + end + if type_type == "table" then + avro.type = make_avro_type_nullable(avro.type) + return avro + end + error("Avro type should be a string or table, got :" .. type_type) +end + +local object_to_avro + +local function complete_field_to_avro(fieldType, result, subSelections, context, + NonNull) + local fieldTypeName = fieldType.__type + if fieldTypeName == 'NonNull' then + -- In case the field is NonNull, the real type is in ofType attribute. + fieldType = fieldType.ofType + fieldTypeName = fieldType.__type + elseif NonNull ~= true then + -- Call complete_field second time and make result nullable. + result = complete_field_to_avro(fieldType, result, subSelections, + context, true) + result = make_avro_type_nullable(result) + return result + end + + if fieldTypeName == 'List' then + local innerType = fieldType.ofType + -- Steal type from virtual object. + -- This is necessary because in case of arrays type should be + -- "completed" into results `items` field, but in other cases (Object, + -- Scalar) it should be completed into `type` field. + local items = complete_field_to_avro(innerType, {}, subSelections, + context).type + result.type = { + type = "array", + items = items + } + return result + end + + if fieldTypeName == 'Scalar' then + result.type = gql_scalar_to_avro(fieldType) + return result + end + + if fieldTypeName == 'Object' then + result.type = object_to_avro(fieldType, subSelections, context) + return result + elseif fieldTypeName == 'Interface' or fieldTypeName == 'Union' then + error('Interfaces and Unions are not supported yet') + end + error(string.format('Unknown type "%s"', fieldTypeName)) +end + +--- The function converts a single Object field to avro format +local function field_to_avro(object_type, fields, context) + local firstField = fields[1] + assert(#fields == 1, "The aliases are not considered yet") + local fieldName = firstField.name.value + local fieldType = introspection.fieldMap[fieldName] or + object_type.fields[fieldName] + assert(fieldType ~= nil) + local subSelections = query_util.mergeSelectionSets(fields) + local result = {} + result.name = fieldName + result = complete_field_to_avro(fieldType.kind, result, subSelections, + context) + return result +end + +--- Convert GraphQL object to avro record. +--- +--- @tparam table object_type GraphQL type object to be converted to Avro schema +--- +--- @tparam table selections GraphQL representations of fields which should be +--- in the output of the query +--- +--- @tparam table context additional information for Avro schema generation; one +--- of the fields is `namespace_parts` -- table of names of records from the +--- root to the current object +--- +--- @treturn table corresponding Avro schema +object_to_avro = function(object_type, selections, context) + local groupedFieldSet = query_util.collectFields(object_type, selections, + {}, {}, context) + local result = { + type = 'record', + name = object_type.name, + fields = {} + } + if #context.namespace_parts ~= 0 then + result.namespace = table.concat(context.namespace_parts, ".") + end + table.insert(context.namespace_parts, result.name) + for _, fields in pairs(groupedFieldSet) do + local avro_field = field_to_avro(object_type, fields, context) + table.insert(result.fields, avro_field) + end + context.namespace_parts[#context.namespace_parts] = nil + return result +end + +--- Create an Avro schema for a given query. +--- +--- @tparam table query object which avro schema should be created for +--- +--- @treturn table `avro_schema` avro schema for any `query:execute()` result. +function query_to_avro.convert(query) + assert(type(query) == "table", + 'query should be a table, got: ' .. type(table) + .. '; hint: use ":" instead of "."') + local state = query.state + local context = query_util.buildContext(state.schema, query.ast, {}, {}, + query.operation_name) + -- The variable is necessary to avoid fullname interferention. + -- Each nested Avro record creates it's namespace. + context.namespace_parts = {} + local rootType = state.schema[context.operation.operation] + local selections = context.operation.selectionSet.selections + return object_to_avro(rootType, selections, context) +end + +return query_to_avro diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index a063c2b..db16799 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -14,6 +14,7 @@ local schema = require('graphql.core.schema') local types = require('graphql.core.types') local validate = require('graphql.core.validate') local execute = require('graphql.core.execute') +local query_to_avro = require('graphql.query_to_avro') local utils = require('graphql.utils') @@ -661,6 +662,7 @@ local function gql_compile(state, query) local gql_query = setmetatable(qstate, { __index = { execute = gql_execute, + avro_schema = query_to_avro.convert } }) return gql_query diff --git a/test/extra/suite.ini b/test/extra/suite.ini new file mode 100644 index 0000000..777dd47 --- /dev/null +++ b/test/extra/suite.ini @@ -0,0 +1,5 @@ +[default] +core = app +description = tests on features which are not related to specific executor +lua_libs = ../common/lua/test_data_user_order.lua ../testdata/array_and_map_testdata.lua \ + ../testdata/nullable_index_testdata.lua diff --git a/test/extra/to_avro_arrays.test.lua b/test/extra/to_avro_arrays.test.lua new file mode 100755 index 0000000..4097a2e --- /dev/null +++ b/test/extra/to_avro_arrays.test.lua @@ -0,0 +1,112 @@ +#!/usr/bin/env tarantool +local fio = require('fio') +local yaml = require('yaml') +local avro = require('avro_schema') +local testdata = require('array_and_map_testdata') +local test = require('tap').test('to avro schema') +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path + +local graphql = require('graphql') + +box.cfg{wal_mode="none"} +test:plan(4) + +testdata.init_spaces() +testdata.fill_test_data() +local meta = testdata.get_test_metadata() + +local accessor = graphql.accessor_space.new({ + schemas = meta.schemas, + collections = meta.collections, + service_fields = meta.service_fields, + indexes = meta.indexes, +}) + +local gql_wrapper = graphql.new({ + schemas = meta.schemas, + collections = meta.collections, + accessor = accessor, +}) + +-- We do not select `customer_balances` and `favorite_holidays` because thay are +-- is of `Map` type, which is not supported. +local query = [[ + query user_holidays($user_id: String) { + user_collection(user_id: $user_id) { + user_id + favorite_food + user_balances { + value + } + } + } +]] +local expected_avro_schema = [[ +type: record +name: Query +fields: +- name: user_collection + type: + type: array + items: + type: record + fields: + - name: user_id + type: string + - name: user_balances + type: + type: array + items: + type: record + fields: + - name: value + type: int + name: balance + namespace: Query.user_collection + - name: favorite_food + type: + type: array + items: string + name: user_collection + namespace: Query +]] +expected_avro_schema = yaml.decode(expected_avro_schema) +local gql_query = gql_wrapper:compile(query) +local variables = { + user_id = 'user_id_1', +} + +local avros = gql_query:avro_schema() + +test:is_deeply(avros, expected_avro_schema, "generated avro schema") +local result_expected = [[ +user_collection: +- user_id: user_id_1 + user_balances: + - value: 33 + - value: 44 + favorite_food: + - meat + - potato +]] +result_expected = yaml.decode(result_expected) +local result = gql_query:execute(variables) +test:is_deeply(result, result_expected, 'graphql qury exec result') +local ok, ash, r, fs, _ +ok, ash = avro.create(avros) +assert(ok) +ok, _ = avro.validate(ash, result) +assert(ok) +test:is(ok, true, 'gql result validation by avro') +ok, fs = avro.compile(ash) +assert(ok) +ok, r = fs.flatten(result) +assert(ok) +ok, r = fs.unflatten(r) +assert(ok) +test:is_deeply(r, result_expected, 'res = unflatten(flatten(res))') + +os.exit(test:check() == true and 0 or 1) diff --git a/test/extra/to_avro_huge.test.lua b/test/extra/to_avro_huge.test.lua new file mode 100755 index 0000000..072efef --- /dev/null +++ b/test/extra/to_avro_huge.test.lua @@ -0,0 +1,267 @@ +#!/usr/bin/env tarantool +local fio = require('fio') +local yaml = require('yaml') +local avro = require('avro_schema') +local data = require('test_data_user_order') +local test = require('tap').test('to avro schema') +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path + +local graphql = require('graphql') + +box.cfg{wal_mode="none"} +test:plan(4) + +data.init_spaces() +data.fill_test_data(box.space) + +local accessor = graphql.accessor_space.new({ + schemas = data.meta.schemas, + collections = data.meta.collections, + service_fields = data.meta.service_fields, + indexes = data.meta.indexes, +}) + +local gql_wrapper = graphql.new({ + schemas = data.meta.schemas, + collections = data.meta.collections, + accessor = accessor, +}) + +local query = [[ + query object_result_max($user_id: Int, $order_id: Int) { + user_collection(id: $user_id) { + id + last_name + first_name + order_connection(limit: 1) { + id + user_id + description + order__order_item { + order_id + item_id + order_item__item{ + id + name + description + price + } + } + } + }, + order_collection(id: $order_id) { + id + description + user_connection { + id + first_name + last_name + order_connection(limit: 1) { + id + order__order_item { + order_item__item { + name + price + } + } + } + } + } + } + +]] +local expected_avro_schema = [[ +type: record +name: Query +fields: +- name: user_collection + type: + type: array + items: + type: record + fields: + - name: order_connection + type: + type: array + items: + type: record + fields: + - name: user_id + type: int + - name: order__order_item + type: + type: array + items: + type: record + fields: + - name: order_id + type: int + - name: item_id + type: int + - name: order_item__item + type: + type: record + fields: + - name: description + type: string + - name: price + type: string + - name: name + type: string + - name: id + type: int + name: item_collection + namespace: Query.user_collection.order_collection.order_item_collection + name: order_item_collection + namespace: Query.user_collection.order_collection + - name: description + type: string + - name: id + type: int + name: order_collection + namespace: Query.user_collection + - name: last_name + type: string + - name: first_name + type: string + - name: id + type: int + name: user_collection + namespace: Query +- name: order_collection + type: + type: array + items: + type: record + fields: + - name: user_connection + type: + type: record + fields: + - name: order_connection + type: + type: array + items: + type: record + fields: + - name: order__order_item + type: + type: array + items: + type: record + fields: + - name: order_item__item + type: + type: record + fields: + - name: name + type: string + - name: price + type: string + name: item_collection + namespace: Query.order_collection.user_collection.order_collection.order_item_collection + name: order_item_collection + namespace: Query.order_collection.user_collection.order_collection + - name: id + type: int + name: order_collection + namespace: Query.order_collection.user_collection + - name: last_name + type: string + - name: first_name + type: string + - name: id + type: int + name: user_collection + namespace: Query.order_collection + - name: id + type: int + - name: description + type: string + name: order_collection + namespace: Query + +]] +expected_avro_schema = yaml.decode(expected_avro_schema) +local gql_query = gql_wrapper:compile(query) +local variables = { + user_id = 5, + order_id = 20 +} + +local avros = gql_query:avro_schema() +test:is_deeply(avros, expected_avro_schema, "generated avro schema") +local result_expected = [[ +user_collection: +- order_connection: + - user_id: 5 + id: 11 + description: order of user 5 + order__order_item: + - order_id: 1 + item_id: 11 + order_item__item: + id: 11 + price: '9.74' + name: Money + description: lobortis ultrices. Vivamus rhoncus. + - order_id: 29 + item_id: 11 + order_item__item: + id: 11 + price: '9.74' + name: Money + description: lobortis ultrices. Vivamus rhoncus. + - order_id: 30 + item_id: 11 + order_item__item: + id: 11 + price: '9.74' + name: Money + description: lobortis ultrices. Vivamus rhoncus. + last_name: user ln 5 + first_name: user fn 5 + id: 5 +order_collection: +- description: order of user 6 + user_connection: + order_connection: + - id: 16 + order__order_item: + - order_item__item: + name: Cup + price: '8.05' + - order_item__item: + name: Cup + price: '8.05' + - order_item__item: + name: Cup + price: '8.05' + last_name: user ln 6 + first_name: user fn 6 + id: 6 + id: 20 +]] +result_expected = yaml.decode(result_expected) +local result = gql_query:execute(variables) +test:is_deeply(result, result_expected, 'graphql qury exec result') +local ok, ash, r, fs, _ +ok, ash = avro.create(avros) +assert(ok) +ok, _ = avro.validate(ash, result) +assert(ok) +test:is(ok, true, 'gql result validation by avro') +ok, fs = avro.compile(ash) +assert(ok) +ok, r = fs.flatten(result) +assert(ok) +ok, r = fs.unflatten(r) +-- The test can fail if wrong avro-schema version is installed. +-- Please install avro-schema >= fea0ead9d1. +assert(ok) +test:is_deeply(r, result_expected, 'res = unflatten(flatten(res))') + +os.exit(test:check() == true and 0 or 1) diff --git a/test/extra/to_avro_nested.test.lua b/test/extra/to_avro_nested.test.lua new file mode 100755 index 0000000..3335bca --- /dev/null +++ b/test/extra/to_avro_nested.test.lua @@ -0,0 +1,110 @@ +#!/usr/bin/env tarantool +local fio = require('fio') +local yaml = require('yaml') +local avro = require('avro_schema') +local data = require('test_data_nested_record') +local test = require('tap').test('to avro schema') +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path + +local graphql = require('graphql') + +box.cfg{wal_mode="none"} +test:plan(4) + +data.init_spaces() +data.fill_test_data(box.space) + +local accessor = graphql.accessor_space.new({ + schemas = data.meta.schemas, + collections = data.meta.collections, + service_fields = data.meta.service_fields, + indexes = data.meta.indexes, +}) + +local gql_wrapper = graphql.new({ + schemas = data.meta.schemas, + collections = data.meta.collections, + accessor = accessor, +}) + +local query = [[ + query getUserByUid($uid: Long) { + user(uid: $uid) { + uid + p1 + p2 + nested { + x + y + } + } + } +]] +local expected_avro_schema = [[ +type: record +name: Query +fields: +- name: user + type: + type: array + items: + type: record + fields: + - name: p2 + type: string + - name: p1 + type: string + - name: uid + type: long + - name: nested + type: + type: record + fields: + - name: y + type: long + - name: x + type: long + name: nested + namespace: Query.user + name: user + namespace: Query +]] +expected_avro_schema = yaml.decode(expected_avro_schema) +local gql_query = gql_wrapper:compile(query) +local variables = { + uid = 1, +} + +local avros = gql_query:avro_schema() + +test:is_deeply(avros, expected_avro_schema, "generated avro schema") +local result_expected = [[ +user: +- p2: p2 1 + p1: p1 1 + uid: 1 + nested: + y: 2001 + x: 1001 +]] +result_expected = yaml.decode(result_expected) +local result = gql_query:execute(variables) +test:is_deeply(result, result_expected, 'graphql qury exec result') +local ok, ash, r, fs, _ +ok, ash = avro.create(avros) +assert(ok) +ok, _ = avro.validate(ash, result) +assert(ok) +test:is(ok, true, 'gql result validation by avro') +ok, fs = avro.compile(ash) +assert(ok) +ok, r = fs.flatten(result) +assert(ok) +ok, r = fs.unflatten(r) +assert(ok) +test:is_deeply(r, result_expected, 'res = unflatten(flatten(res))') + +os.exit(test:check() == true and 0 or 1) diff --git a/test/extra/to_avro_nullable.test.lua b/test/extra/to_avro_nullable.test.lua new file mode 100755 index 0000000..31ccca8 --- /dev/null +++ b/test/extra/to_avro_nullable.test.lua @@ -0,0 +1,99 @@ +#!/usr/bin/env tarantool +local fio = require('fio') +local yaml = require('yaml') +local avro = require('avro_schema') +local testdata = require('nullable_index_testdata') +local test = require('tap').test('to avro schema') +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path + +local graphql = require('graphql') + +box.cfg{wal_mode="none"} +test:plan(4) + +testdata.init_spaces() +testdata.fill_test_data() +local meta = testdata.get_test_metadata() + +local accessor = graphql.accessor_space.new({ + schemas = meta.schemas, + collections = meta.collections, + service_fields = meta.service_fields, + indexes = meta.indexes, +}) + +local gql_wrapper = graphql.new({ + schemas = meta.schemas, + collections = meta.collections, + accessor = accessor, +}) + +-- We do not select `customer_balances` and `favorite_holidays` because thay are +-- is of `Map` type, which is not supported. +local query = [[ + query get_foo($id: String) { + bar(id: $id) { + id + id_or_null_1 + id_or_null_2 + id_or_null_3 + } + } +]] +local expected_avro_schema = [[ +type: record +name: Query +fields: +- name: bar + type: + type: array + items: + type: record + fields: + - name: id_or_null_1 + type: string* + - name: id_or_null_3 + type: string* + - name: id_or_null_2 + type: string* + - name: id + type: string + name: bar + namespace: Query +]] +expected_avro_schema = yaml.decode(expected_avro_schema) +local gql_query = gql_wrapper:compile(query) +local variables = { + id = '101', +} + +local avros = gql_query:avro_schema() + +test:is_deeply(avros, expected_avro_schema, "generated avro schema") +local result_expected = [[ +bar: +- id_or_null_3: '101' + id_or_null_2: '101' + id: '101' +]] +result_expected = yaml.decode(result_expected) +local result = gql_query:execute(variables) +test:is_deeply(result, result_expected, 'graphql qury exec result') +local ok, ash, r, fs, _ +ok, ash = avro.create(avros) +assert(ok) +ok, _ = avro.validate(ash, result) +assert(ok) +test:is(ok, true, 'gql result validation by avro') +ok, fs = avro.compile(ash) +assert(ok) +ok, r = fs.flatten(result) +assert(ok) +ok, r = fs.unflatten(r) +assert(ok) +test:is_deeply(r, result_expected, 'res = unflatten(flatten(res))') + +os.exit(test:check() == true and 0 or 1) From c2dd3190f181ae087963e5384f175b7eee9fad02 Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Tue, 13 Mar 2018 22:23:21 +0300 Subject: [PATCH 07/13] Refactor query_to_avro a bit The changes are debatable, but I'm trying to improve code readability as stated in the comments to [1]. [1]: https://github.com/tarantool/graphql/pull/58 --- README.md | 17 +++- graphql/query_to_avro.lua | 138 ++++++++++++++------------- test/extra/suite.ini | 5 +- test/extra/to_avro_nullable.test.lua | 23 +++-- 4 files changed, 101 insertions(+), 82 deletions(-) diff --git a/README.md b/README.md index e1f2922..dca5477 100644 --- a/README.md +++ b/README.md @@ -95,11 +95,18 @@ make test ## Requirements -* For use: tarantool, lulpeg, >=tarantool/shard-1.1-91-gfa88bf8 (optional), - tarantool/avro-schema. -* For test (additionally to 'for use'): python 2.7, virtualenv, luacheck, - >=tarantool/shard-1.1-92-gec1a27e. -* For building apidoc (additionally to 'for use'): ldoc. +* For use: + * tarantool, + * lulpeg, + * >=tarantool/avro-schema-2.0-71-gfea0ead, + * >=tarantool/shard-1.1-91-gfa88bf8 (optional). +* For test (additionally to 'for use'): + * python 2.7, + * virtualenv, + * luacheck, + * >=tarantool/shard-1.1-92-gec1a27e. +* For building apidoc (additionally to 'for use'): + * ldoc. ## License diff --git a/graphql/query_to_avro.lua b/graphql/query_to_avro.lua index ae1f718..8e9f1a8 100755 --- a/graphql/query_to_avro.lua +++ b/graphql/query_to_avro.lua @@ -2,7 +2,7 @@ --- --- Random notes: --- ---- * The best way to use this module is to just call `avro_schema` methon on +--- * The best way to use this module is to just call `avro_schema` method on --- compiled query object. local path = "graphql.core" local introspection = require(path .. '.introspection') @@ -11,6 +11,9 @@ local query_util = require(path .. '.query_util') -- module functions local query_to_avro = {} +-- forward declaration +local object_to_avro + local gql_scalar_to_avro_index = { String = "string", Int = "int", @@ -28,79 +31,84 @@ local function gql_scalar_to_avro(fieldType) return result end --- The function converts avro type to nullable. --- In current tarantool/avro-schema implementation we simply add '*' --- to the end of type name. --- The function do not copy the resulting type but changes it in place. --- --- @tparam table avro schema node to be converted to nullable --- --- @tresult table schema node; basically it is the passed schema node, --- however in nullable type implementation through unions it can be different --- node +--- The function converts avro type to the corresponding nullable type in +--- place and returns the result. +--- +--- We make changes in place in case of table input (`avro`) because of +--- performance reasons, but we returns the result because an input (`avro`) +--- can be a string. Strings in Lua are immutable. +--- +--- In the current tarantool/avro-schema implementation we simply add '*' to +--- the end of a type name. +--- +--- If the type is already nullable the function leaves it as is. +--- +--- @tparam table avro avro schema node to be converted to nullable one +--- +--- @result `result` (string or table) nullable avro type local function make_avro_type_nullable(avro) - assert(avro.type ~= nil, "Avro `type` field is necessary") - local type_type = type(avro.type) - if type_type == "string" then - assert(avro.type:endswith("*") == false, - "Avro type should not be nullable already") - avro.type = avro.type .. '*' - return avro - end - if type_type == "table" then - avro.type = make_avro_type_nullable(avro.type) - return avro + assert(avro ~= nil, "avro must not be nil") + + local value_type = type(avro) + + if value_type == "string" then + return avro:endswith("*") and avro or (avro .. '*') + elseif value_type == "table" then + return make_avro_type_nullable(avro.type) end - error("Avro type should be a string or table, got :" .. type_type) -end -local object_to_avro + error("avro should be a string or a table, got " .. value_type) +end -local function complete_field_to_avro(fieldType, result, subSelections, context, - NonNull) +--- Convert GraphQL type to avro-schema with selecting fields. +--- +--- @tparam table fieldType GraphQL type +--- +--- @tparam table subSelections fields to select from resulting avro-schema +--- (internal graphql-lua format) +--- +--- @tparam table context current traversal context, here it just falls to the +--- called functions (internal graphql-lua format) +--- +--- @tresult table `result` is the resulting avro-schema +local function gql_type_to_avro(fieldType, subSelections, context) local fieldTypeName = fieldType.__type - if fieldTypeName == 'NonNull' then - -- In case the field is NonNull, the real type is in ofType attribute. + local isNonNull = false + + -- In case the field is NonNull, the real type is in ofType attribute. + while fieldTypeName == 'NonNull' do fieldType = fieldType.ofType fieldTypeName = fieldType.__type - elseif NonNull ~= true then - -- Call complete_field second time and make result nullable. - result = complete_field_to_avro(fieldType, result, subSelections, - context, true) - result = make_avro_type_nullable(result) - return result + isNonNull = true end + local result + if fieldTypeName == 'List' then local innerType = fieldType.ofType - -- Steal type from virtual object. - -- This is necessary because in case of arrays type should be - -- "completed" into results `items` field, but in other cases (Object, - -- Scalar) it should be completed into `type` field. - local items = complete_field_to_avro(innerType, {}, subSelections, - context).type - result.type = { + local innerTypeAvro = gql_type_to_avro(innerType, subSelections, + context) + result = { type = "array", - items = items + items = innerTypeAvro, } - return result - end - - if fieldTypeName == 'Scalar' then - result.type = gql_scalar_to_avro(fieldType) - return result - end - - if fieldTypeName == 'Object' then - result.type = object_to_avro(fieldType, subSelections, context) - return result + elseif fieldTypeName == 'Scalar' then + result = gql_scalar_to_avro(fieldType) + elseif fieldTypeName == 'Object' then + result = object_to_avro(fieldType, subSelections, context) elseif fieldTypeName == 'Interface' or fieldTypeName == 'Union' then error('Interfaces and Unions are not supported yet') + else + error(string.format('Unknown type "%s"', tostring(fieldTypeName))) end - error(string.format('Unknown type "%s"', fieldTypeName)) + + if not isNonNull then + result = make_avro_type_nullable(result) + end + return result end ---- The function converts a single Object field to avro format +--- The function converts a single Object field to avro format. local function field_to_avro(object_type, fields, context) local firstField = fields[1] assert(#fields == 1, "The aliases are not considered yet") @@ -109,11 +117,13 @@ local function field_to_avro(object_type, fields, context) object_type.fields[fieldName] assert(fieldType ~= nil) local subSelections = query_util.mergeSelectionSets(fields) - local result = {} - result.name = fieldName - result = complete_field_to_avro(fieldType.kind, result, subSelections, + + local fieldTypeAvro = gql_type_to_avro(fieldType.kind, subSelections, context) - return result + return { + name = fieldName, + type = fieldTypeAvro, + } end --- Convert GraphQL object to avro record. @@ -127,7 +137,7 @@ end --- of the fields is `namespace_parts` -- table of names of records from the --- root to the current object --- ---- @treturn table corresponding Avro schema +--- @treturn table `result` is the corresponding Avro schema object_to_avro = function(object_type, selections, context) local groupedFieldSet = query_util.collectFields(object_type, selections, {}, {}, context) @@ -152,11 +162,11 @@ end --- --- @tparam table query object which avro schema should be created for --- ---- @treturn table `avro_schema` avro schema for any `query:execute()` result. +--- @treturn table `avro_schema` avro schema for any `query:execute()` result function query_to_avro.convert(query) assert(type(query) == "table", - 'query should be a table, got: ' .. type(table) - .. '; hint: use ":" instead of "."') + ('query should be a table, got: %s; ' .. + 'hint: use ":" instead of "."'):format(type(table))) local state = query.state local context = query_util.buildContext(state.schema, query.ast, {}, {}, query.operation_name) diff --git a/test/extra/suite.ini b/test/extra/suite.ini index 777dd47..d4d1955 100644 --- a/test/extra/suite.ini +++ b/test/extra/suite.ini @@ -1,5 +1,8 @@ [default] core = app description = tests on features which are not related to specific executor -lua_libs = ../common/lua/test_data_user_order.lua ../testdata/array_and_map_testdata.lua \ +lua_libs = + ../common/lua/test_data_user_order.lua \ + ../common/lua/test_data_nested_record.lua \ + ../testdata/array_and_map_testdata.lua \ ../testdata/nullable_index_testdata.lua diff --git a/test/extra/to_avro_nullable.test.lua b/test/extra/to_avro_nullable.test.lua index 31ccca8..d54ce11 100755 --- a/test/extra/to_avro_nullable.test.lua +++ b/test/extra/to_avro_nullable.test.lua @@ -81,19 +81,18 @@ bar: ]] result_expected = yaml.decode(result_expected) local result = gql_query:execute(variables) -test:is_deeply(result, result_expected, 'graphql qury exec result') -local ok, ash, r, fs, _ -ok, ash = avro.create(avros) -assert(ok) -ok, _ = avro.validate(ash, result) -assert(ok) +test:is_deeply(result, result_expected, 'graphql query exec result') +local ok, ash = avro.create(avros) +assert(ok, tostring(ash)) +local ok, err = avro.validate(ash, result) +assert(ok, tostring(err)) test:is(ok, true, 'gql result validation by avro') -ok, fs = avro.compile(ash) -assert(ok) -ok, r = fs.flatten(result) -assert(ok) -ok, r = fs.unflatten(r) -assert(ok) +local ok, fs = avro.compile(ash) +assert(ok, tostring(fs)) +local ok, r = fs.flatten(result) +assert(ok, tostring(r)) +local ok, r = fs.unflatten(r) +assert(ok, tostring(r)) test:is_deeply(r, result_expected, 'res = unflatten(flatten(res))') os.exit(test:check() == true and 0 or 1) From 5f4e8eb919cb8e20b9128e62133f796b4afffa30 Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Tue, 13 Mar 2018 03:43:50 +0300 Subject: [PATCH 08/13] Process immediate InnerObject correctly The criteria of correctness is that the following queries should produce the same results. ``` local gql = ... local query = [[ query foo_list($offset: foo_offset) { foo(offset: $offset) { ... } } ]] local variables = { offset = { bar = ..., baz = ..., } } print(yaml.encode(gql:compile(query):execute(variables))) ``` ``` local gql = ... local query = [[ query foo_list($bar: String, $baz: Long) { foo(offset: {bar: $bar, baz: $baz}) { ... } } ]] local variables = { bar = ..., baz = ..., } print(yaml.encode(gql:compile(query):execute(variables))) ``` --- graphql/core/util.lua | 9 ++- graphql/core/validate.lua | 12 ++++ test/local/space_compound_index.result | 65 +++++++++++++++++++ .../shard_compound_index.result | 65 +++++++++++++++++++ .../shard_compound_index.result | 65 +++++++++++++++++++ test/testdata/compound_index_testdata.lua | 28 ++++++++ 6 files changed, 241 insertions(+), 3 deletions(-) diff --git a/graphql/core/util.lua b/graphql/core/util.lua index 45aa3b1..5a9438d 100644 --- a/graphql/core/util.lua +++ b/graphql/core/util.lua @@ -75,13 +75,16 @@ function util.coerceValue(node, schemaType, variables) error('Expected an input object') end - return util.map(node.values, function(field) + local inputObjectValue = {} + for _, field in pairs(node.values) do if not schemaType.fields[field.name] then error('Unknown input object field "' .. field.name .. '"') end - return util.coerceValue(field.value, schemaType.fields[field.name].kind, variables) - end) + inputObjectValue[field.name] = util.coerceValue( + field.value, schemaType.fields[field.name].kind, variables) + end + return inputObjectValue end if schemaType.__type == 'Enum' then diff --git a/graphql/core/validate.lua b/graphql/core/validate.lua index af255d4..de685ab 100644 --- a/graphql/core/validate.lua +++ b/graphql/core/validate.lua @@ -259,9 +259,21 @@ local visitors = { end end, + children = function(node) + return util.map(node.value.values or {}, function(value) + return value.value + end) + end, + rules = { rules.uniqueInputObjectFields } }, + variable = { + enter = function(node, context) + context.variableReferences[node.name.value] = true + end + }, + directive = { children = function(node, context) return node.arguments diff --git a/test/local/space_compound_index.result b/test/local/space_compound_index.result index 3fad85b..4b1246b 100644 --- a/test/local/space_compound_index.result +++ b/test/local/space_compound_index.result @@ -1154,3 +1154,68 @@ err: 'offset by a partial key is forbidden: expected "order_num" field' }}} +RUN 6 {{{ +QUERY + query users($limit: Int, $user_str: String, $user_num: Long) { + user_collection(limit: $limit, offset: {user_str: $user_str, + user_num: $user_num}) { + user_str + user_num + last_name + first_name + } + } +VARIABLES +--- +user_str: user_str_b +limit: 10 +user_num: 12 +... + +RESULT +--- +user_collection: +- last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 13 +- last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 14 +- last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 15 +- last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 16 +- last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 17 +- last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 18 +- last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 19 +- last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 20 +- last_name: last name c + user_str: user_str_c + first_name: first name c + user_num: 1 +- last_name: last name c + user_str: user_str_c + first_name: first name c + user_num: 2 +... + +}}} + diff --git a/test/shard_no_redundancy/shard_compound_index.result b/test/shard_no_redundancy/shard_compound_index.result index 7c84e8a..a6fc4c8 100644 --- a/test/shard_no_redundancy/shard_compound_index.result +++ b/test/shard_no_redundancy/shard_compound_index.result @@ -1270,6 +1270,71 @@ testdata.run_queries(gql_wrapper) }}} + RUN 6 {{{ + QUERY + query users($limit: Int, $user_str: String, $user_num: Long) { + user_collection(limit: $limit, offset: {user_str: $user_str, + user_num: $user_num}) { + user_str + user_num + last_name + first_name + } + } + VARIABLES + --- + user_str: user_str_b + limit: 10 + user_num: 12 + ... + + RESULT + --- + user_collection: + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 13 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 14 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 15 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 16 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 17 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 18 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 19 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 20 + - last_name: last name c + user_str: user_str_c + first_name: first name c + user_num: 1 + - last_name: last name c + user_str: user_str_c + first_name: first name c + user_num: 2 + ... + + }}} + ... -- clean up -- -------- diff --git a/test/shard_redundancy/shard_compound_index.result b/test/shard_redundancy/shard_compound_index.result index 92a11ae..5f9ab1b 100644 --- a/test/shard_redundancy/shard_compound_index.result +++ b/test/shard_redundancy/shard_compound_index.result @@ -1288,6 +1288,71 @@ testdata.run_queries(gql_wrapper) }}} + RUN 6 {{{ + QUERY + query users($limit: Int, $user_str: String, $user_num: Long) { + user_collection(limit: $limit, offset: {user_str: $user_str, + user_num: $user_num}) { + user_str + user_num + last_name + first_name + } + } + VARIABLES + --- + user_str: user_str_b + limit: 10 + user_num: 12 + ... + + RESULT + --- + user_collection: + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 13 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 14 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 15 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 16 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 17 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 18 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 19 + - last_name: last name b + user_str: user_str_b + first_name: first name b + user_num: 20 + - last_name: last name c + user_str: user_str_c + first_name: first name c + user_num: 1 + - last_name: last name c + user_str: user_str_c + first_name: first name c + user_num: 2 + ... + + }}} + ... -- clean up -- -------- diff --git a/test/testdata/compound_index_testdata.lua b/test/testdata/compound_index_testdata.lua index 0117931..75ed408 100644 --- a/test/testdata/compound_index_testdata.lua +++ b/test/testdata/compound_index_testdata.lua @@ -459,6 +459,34 @@ function compound_index_testdata.run_queries(gql_wrapper) results = results .. print_and_return(format_result( '5_2', query_5, variables_5_2, result)) + -- compound offset argument constructed from separate variables (top-level + -- collection, full primary key) + -- ----------------------------------------------------------------------- + + local query_6 = [[ + query users($limit: Int, $user_str: String, $user_num: Long) { + user_collection(limit: $limit, offset: {user_str: $user_str, + user_num: $user_num}) { + user_str + user_num + last_name + first_name + } + } + ]] + + utils.show_trace(function() + local gql_query_6 = gql_wrapper:compile(query_6) + local variables_6 = { + limit = 10, + user_str = 'user_str_b', + user_num = 12, + } + local result = gql_query_6:execute(variables_6) + results = results .. print_and_return(format_result( + '6', query_6, variables_6, result)) + end) + return results end From e04aef6494bc24a62de4fe030e1182a1a994414b Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Tue, 13 Mar 2018 03:45:42 +0300 Subject: [PATCH 09/13] PCRE matching of string fields Related to #73. --- README.md | 3 +- graphql/accessor_general.lua | 357 +++++++++++++++++------------- graphql/tarantool_graphql.lua | 3 +- graphql/utils.lua | 32 +-- test/local/space_pcre.result | 53 +++++ test/local/space_pcre.test.lua | 117 ++++++++++ test/testdata/common_testdata.lua | 11 +- 7 files changed, 402 insertions(+), 174 deletions(-) create mode 100644 test/local/space_pcre.result create mode 100755 test/local/space_pcre.test.lua diff --git a/README.md b/README.md index dca5477..d4ec14d 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,8 @@ make test * tarantool, * lulpeg, * >=tarantool/avro-schema-2.0-71-gfea0ead, - * >=tarantool/shard-1.1-91-gfa88bf8 (optional). + * >=tarantool/shard-1.1-91-gfa88bf8 (optional), + * lrexlib-pcre (optional). * For test (additionally to 'for use'): * python 2.7, * virtualenv, diff --git a/graphql/accessor_general.lua b/graphql/accessor_general.lua index f8b996d..4b19f34 100644 --- a/graphql/accessor_general.lua +++ b/graphql/accessor_general.lua @@ -8,6 +8,12 @@ local json = require('json') local avro_schema = require('avro_schema') local utils = require('graphql.utils') local clock = require('clock') +local rex = utils.optional_require('rex_pcre') + +-- XXX: consider using [1] when it will be mature enough; +-- look into [2] for the status. +-- [1]: https://github.com/igormunkin/lua-re +-- [2]: https://github.com/tarantool/tarantool/issues/2764 local accessor_general = {} local DEF_RESULTING_OBJECT_CNT_MAX = 10000 @@ -553,61 +559,6 @@ local function build_index_parts_tree(indexes) return roots end -local function set_connection_index(c, c_name, c_type, collection_name, - indexes, connection_indexes) - assert(type(c.index_name) == 'string', - 'index_name must be a string, got ' .. type(c.index_name)) - - -- validate index_name against 'indexes' - local index_meta = indexes[c.destination_collection] - assert(type(index_meta) == 'table', - 'index_meta must be a table, got ' .. type(index_meta)) - - assert(type(collection_name) == 'string', 'collection_name expected to ' .. - 'be string, got ' .. type(collection_name)) - - -- validate connection parts are match or being prefix of index - -- fields - local i = 1 - local index_fields = index_meta[c.index_name].fields - for _, part in ipairs(c.parts) do - assert(type(part.source_field) == 'string', - 'part.source_field must be a string, got ' .. - type(part.source_field)) - assert(type(part.destination_field) == 'string', - 'part.destination_field must be a string, got ' .. - type(part.destination_field)) - assert(part.destination_field == index_fields[i], - ('connection "%s" of collection "%s" ' .. - 'has destination parts that is not prefix of the index ' .. - '"%s" parts (destination collection - "%s")'):format(c_name, collection_name, - c.index_name, c.destination_collection)) - i = i + 1 - end - local parts_cnt = i - 1 - - -- partial index of an unique index is not guaranteed to being - -- unique - assert(c_type == '1:N' or parts_cnt == #index_fields, - ('1:1 connection "%s" of collection "%s" ' .. - 'has less fields than the index "%s" has (destination collection - "%s")' .. - '(cannot prove uniqueness of the partial index)'):format(c_name, - collection_name, c.index_name, c.destination_collection)) - - -- validate connection type against index uniqueness (if provided) - if index_meta.unique ~= nil then - assert(c_type == '1:N' or index_meta.unique == true, - ('1:1 connection ("%s") cannot be implemented ' .. - 'on top of non-unique index ("%s")'):format( - c_name, c.index_name)) - end - - return { - index_name = c.index_name, - connection_type = c_type, - } -end - --- Build `connection_indexes` table (part of `index_cache`) to use in the --- @{get_index_name} function. --- @@ -630,28 +581,60 @@ local function build_connection_indexes(indexes, collections) assert(type(collections) == 'table', 'collections must be a table, got ' .. type(collections)) local connection_indexes = {} - for collection_name, collection in pairs(collections) do + for _, collection in pairs(collections) do for _, c in ipairs(collection.connections) do - if c.destination_collection ~= nil then - if connection_indexes[c.destination_collection] == nil then - connection_indexes[c.destination_collection] = {} - end - - connection_indexes[c.destination_collection][c.name] = - set_connection_index(c, c.name, c.type, collection_name, - indexes, connection_indexes) + if connection_indexes[c.destination_collection] == nil then + connection_indexes[c.destination_collection] = {} end + local index_name = c.index_name + assert(type(index_name) == 'string', + 'index_name must be a string, got ' .. type(index_name)) - if c.variants ~= nil then - for _, v in ipairs(c.variants) do - if connection_indexes[v.destination_collection] == nil then - connection_indexes[v.destination_collection] = {} - end - connection_indexes[v.destination_collection][c.name] = - set_connection_index(v, c.name, c.type, collection_name, - indexes, connection_indexes) - end + -- validate index_name against 'indexes' + local index_meta = indexes[c.destination_collection] + assert(type(index_meta) == 'table', + 'index_meta must be a table, got ' .. type(index_meta)) + + -- validate connection parts are match or being prefix of index + -- fields + local i = 1 + local index_fields = index_meta[c.index_name].fields + for _, part in ipairs(c.parts) do + assert(type(part.source_field) == 'string', + 'part.source_field must be a string, got ' .. + type(part.source_field)) + assert(type(part.destination_field) == 'string', + 'part.destination_field must be a string, got ' .. + type(part.destination_field)) + assert(part.destination_field == index_fields[i], + ('connection "%s" of collection "%s" ' .. + 'has destination parts that is not prefix of the index ' .. + '"%s" parts'):format(c.name, c.destination_collection, + c.index_name)) + i = i + 1 end + local parts_cnt = i - 1 + + -- partial index of an unique index is not guaranteed to being + -- unique + assert(c.type == '1:N' or parts_cnt == #index_fields, + ('1:1 connection "%s" of collection "%s" ' .. + 'has less fields than the index "%s" has (cannot prove ' .. + 'uniqueness of the partial index)'):format(c.name, + c.destination_collection, c.index_name)) + + -- validate connection type against index uniqueness (if provided) + if index_meta.unique ~= nil then + assert(c.type == '1:N' or index_meta.unique == true, + ('1:1 connection ("%s") cannot be implemented ' .. + 'on top of non-unique index ("%s")'):format( + c.name, index_name)) + end + + connection_indexes[c.destination_collection][c.name] = { + index_name = index_name, + connection_type = c.type, + } end end return connection_indexes @@ -702,50 +685,52 @@ local function validate_collections(collections, schemas) type(connections)) for _, connection in ipairs(connections) do assert(type(connection) == 'table', - 'connection must be a table, got ' .. type(connection)) + 'connection must be a table, got ' .. type(connection)) assert(type(connection.name) == 'string', - 'connection.name must be a string, got ' .. - type(connection.name)) - assert(type(connection.type) == 'string', 'connection.type must' .. - 'be a string, got ' .. type(connection.type)) - assert(connection.type == '1:1' or connection.type == '1:N', - 'connection.type must be \'1:1\' or \'1:N\', got ' .. - connection.type) - if connection.destination_collection then - assert(type(connection.destination_collection) == 'string', + 'connection.name must be a string, got ' .. + type(connection.name)) + assert(type(connection.destination_collection) == 'string', 'connection.destination_collection must be a string, got ' .. type(connection.destination_collection)) - assert(type(connection.parts) == 'table', - 'connection.parts must be a table, got ' .. + assert(type(connection.parts) == 'table', + 'connection.parts must be a string, got ' .. type(connection.parts)) - assert(type(connection.index_name) == 'string', + assert(type(connection.index_name) == 'string', 'connection.index_name must be a string, got ' .. type(connection.index_name)) - return - end - if connection.variants then - for _, v in pairs(connection.variants) do - assert(type(v.determinant) == 'table', 'variant\'s ' .. - 'determinant must be a table, got ' .. - type(v.determinant)) - assert(type(v.destination_collection) == 'string', - 'variant.destination_collection must be a string, ' .. - 'got ' .. type(v.destination_collection)) - assert(type(v.parts) == 'table', - 'variant.parts must be a table, got ' .. type(v.parts)) - assert(type(v.index_name) == 'string', - 'variant.index_name must be a string, got ' .. - type(v.index_name)) - end - return - else - assert(false, ('collection doesn\'t have neither destination' .. - 'collection nor variants field')) - end end end end +--- Whether an object match set of PCRE. +--- +--- @tparam table obj an object to check +--- +--- @tparam table pcre map with PCRE as values; names are correspond to field +--- names of the `obj` to match +--- +--- @treturn boolean `res` whether the `obj` object match `pcre` set of +--- regexps. +local function match_using_re(obj, pcre) + if pcre == nil then return true end + + for field_name, re in pairs(pcre) do + -- skip an object with null in a string* field + if obj[field_name] == nil then + return false + end + assert(rex ~= nil, 'we should not pass over :compile() ' .. + 'with a query contains PCRE matching when there are '.. + 'no lrexlib-pcre (rex_pcre) module present') + -- XXX: compile re once + local re = rex.new(re) + if not re:match(obj[field_name]) then + return false + end + end + + return true +end --- Perform unflatten, skipping, filtering, limiting of objects. This is the --- core of the `select_internal` function. @@ -792,9 +777,11 @@ local function process_tuple(state, tuple, opts) qstats.fetched_object_cnt, fetched_object_cnt_max)) assert(qcontext.deadline_clock > clock.monotonic64(), 'query execution timeout exceeded, use `timeout_ms` to increase it') + local collection_name = opts.collection_name + local pcre = opts.pcre -- convert tuple -> object - local obj = opts.unflatten_tuple(opts.collection_name, tuple, + local obj = opts.unflatten_tuple(collection_name, tuple, opts.default_unflatten_tuple) -- skip all items before pivot (the item pointed by offset) @@ -806,7 +793,8 @@ local function process_tuple(state, tuple, opts) end -- filter out non-matching objects - local match = utils.is_subtable(obj, filter) + local match = utils.is_subtable(obj, filter) and + match_using_re(obj, pcre) if do_filter then if not match then return true end else @@ -879,6 +867,8 @@ local function select_internal(self, collection_name, from, filter, args, extra) -- XXX: save type at parsing and check here --assert(args.offset == nil or type(args.offset) == 'number', -- 'args.offset must be a number of nil, got ' .. type(args.offset)) + assert(args.pcre == nil or type(args.pcre) == 'table', + 'args.pcre must be nil or a table, got ' .. type(args.pcre)) local collection = self.collections[collection_name] assert(collection ~= nil, @@ -927,6 +917,7 @@ local function select_internal(self, collection_name, from, filter, args, extra) collection_name = collection_name, unflatten_tuple = self.funcs.unflatten_tuple, default_unflatten_tuple = default_unflatten_tuple, + pcre = args.pcre, } if index == nil then @@ -1027,6 +1018,107 @@ local function init_qcontext(accessor, qcontext) settings.timeout_ms * 1000 * 1000 end +--- Get an avro-schema for a primary key by a collection name. +--- +--- @tparam table self accessor_general instance +--- +--- @tparam string collection_name name of a collection +--- +--- @treturn string `offset_type` is a just string in case of scalar primary +--- key (and, then, offset) type +--- +--- @treturn table `offset_type` is a record in case of compound (multi-part) +--- primary key +local function get_primary_key_type(self, collection_name) + -- get name of field of primary key + local _, index_meta = get_primary_index_meta( + self, collection_name) + + local collection = self.collections[collection_name] + local schema = self.schemas[collection.schema_name] + + local offset_fields = {} + + for _, field_name in ipairs(index_meta.fields) do + local field_type + for _, field in ipairs(schema.fields) do + if field.name == field_name then + field_type = field.type + end + end + assert(field_type ~= nil, + ('cannot find type for primary index field "%s" ' .. + 'for collection "%s"'):format(field_name, + collection_name)) + assert(type(field_type) == 'string', + 'field type must be a string, got ' .. + type(field_type)) + offset_fields[#offset_fields + 1] = { + name = field_name, + type = field_type, + } + end + + local offset_type + assert(#offset_fields > 0, + 'offset must contain at least one field') + if #offset_fields == 1 then + -- use a scalar type + offset_type = offset_fields[1].type + else + -- construct an input type + offset_type = { + name = collection_name .. '_offset', + type = 'record', + fields = offset_fields, + } + end + + return offset_type +end + +-- XXX: add string fields of a nested record / 1:1 connection to +-- get_pcre_argument_type + +--- Get an avro-schema for a pcre argument by a collection name. +--- +--- Note: it is called from `list_args`, so applicable only for lists: +--- top-level objects and 1:N connections. +--- +--- @tparam table self accessor_general instance +--- +--- @tparam string collection_name name of a collection +--- +--- @treturn table `pcre_type` is a record with fields per string/string* field +--- of an object of the collection +local function get_pcre_argument_type(self, collection_name) + local collection = self.collections[collection_name] + assert(collection ~= nil, 'cannot found collection ' .. + tostring(collection_name)) + local schema = self.schemas[collection.schema_name] + assert(schema ~= nil, 'cannot found schema ' .. + tostring(collection.schema_name)) + + assert(schema.type == 'record', + 'top-level object expected to be a record, got ' .. + tostring(schema.type)) + + local string_fields = {} + + for _, field in ipairs(schema.fields) do + if field.type == 'string' or field.type == 'string*' then + string_fields[#string_fields + 1] = table.copy(field) + end + end + + local pcre_type = { + name = collection_name .. '_pcre', + type = 'record', + fields = string_fields, + } + return pcre_type +end + --- Create a new data accessor. --- --- Provided `funcs` argument determines certain functions for retrieving @@ -1165,53 +1257,20 @@ function accessor_general.new(opts, funcs) args, extra) end, list_args = function(self, collection_name) - -- get name of field of primary key - local _, index_meta = get_primary_index_meta( - self, collection_name) - - local offset_fields = {} - - for _, field_name in ipairs(index_meta.fields) do - local field_type - local collection = self.collections[collection_name] - local schema = self.schemas[collection.schema_name] - for _, field in ipairs(schema.fields) do - if field.name == field_name then - field_type = field.type - end - end - assert(field_type ~= nil, - ('cannot find type for primary index field "%s" ' .. - 'for collection "%s"'):format(field_name, - collection_name)) - assert(type(field_type) == 'string', - 'field type must be a string, got ' .. - type(field_type)) - offset_fields[#offset_fields + 1] = { - name = field_name, - type = field_type, - } - end + local offset_type = get_primary_key_type(self, collection_name) - local offset_type - assert(#offset_fields > 0, - 'offset must contain at least one field') - if #offset_fields == 1 then - -- use a scalar type - offset_type = offset_fields[1].type - else - -- construct an input type - offset_type = { - name = collection_name .. '_offset', - type = 'record', - fields = offset_fields, - } + -- add `pcre` argument only if lrexlib-pcre was found + local pcre_field + if rex ~= nil then + local pcre_type = get_pcre_argument_type(self, collection_name) + pcre_field = {name = 'pcre', type = pcre_type} end return { {name = 'limit', type = 'int'}, {name = 'offset', type = offset_type}, -- {name = 'filter', type = ...}, + pcre_field, } end, } diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index db16799..af719ea 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -729,7 +729,8 @@ end --- list_args = function(self, collection_name) --- return { --- {name = 'limit', type = 'int'}, ---- {name = 'offset', type = <...>}, -- type of primary key +--- {name = 'offset', type = <...>}, -- type of a primary key +--- {name = 'pcre', type = <...>}, --- } --- end, --- } diff --git a/graphql/utils.lua b/graphql/utils.lua index 18be4b4..8b8ba8a 100644 --- a/graphql/utils.lua +++ b/graphql/utils.lua @@ -132,27 +132,19 @@ function utils.gen_booking_table(data) }) end ---- @return `table` with all keys of the given table -function utils.get_keys(table) - local keys = {} - for k, _ in pairs(table) do - keys[#keys + 1] = k - end - return keys -end - ---- Check if passed table has passed keys with non-nil values. ---- @tparam table table to check ---- @tparam table keys array of keys to check ---- @return[1] `true` if passed table has passed keys ---- @return[2] `false` otherwise -function utils.do_have_keys(table, keys) - for _, k in pairs(keys) do - if table[k] == nil then - return false - end +--- Catch error at module require and return nil in the case. +--- +--- @tparam string module_name mane of a module to require +--- +--- @return `module` or `nil` +function utils.optional_require(module_name) + assert(type(module_name) == 'string', + 'module_name must be a string, got ' .. type(module_name)) + local ok, module = pcall(require, module_name) + if not ok then + log.warn('optional_require: no module ' .. module_name) end - return true + return ok and module or nil end return utils diff --git a/test/local/space_pcre.result b/test/local/space_pcre.result new file mode 100644 index 0000000..bdc5963 --- /dev/null +++ b/test/local/space_pcre.result @@ -0,0 +1,53 @@ +RUN 1_1 {{{ +QUERY + query users($offset: String, $first_name_re: String, + $middle_name_re: String) { + user_collection(pcre: {first_name: $first_name_re, + middle_name: $middle_name_re}, offset: $offset) { + first_name + middle_name + last_name + } + } +VARIABLES +--- +middle_name_re: ich$ +first_name_re: ^I +... + +RESULT +--- +user_collection: +- last_name: Ivanov + first_name: Ivan + middle_name: Ivanovich +... + +}}} + +RUN 1_2 {{{ +QUERY + query users($offset: String, $first_name_re: String, + $middle_name_re: String) { + user_collection(pcre: {first_name: $first_name_re, + middle_name: $middle_name_re}, offset: $offset) { + first_name + middle_name + last_name + } + } +VARIABLES +--- +user_id: user_id_1 +first_name_re: ^V +... + +RESULT +--- +user_collection: +- last_name: Pupkin + first_name: Vasiliy +... + +}}} + diff --git a/test/local/space_pcre.test.lua b/test/local/space_pcre.test.lua new file mode 100755 index 0000000..433d715 --- /dev/null +++ b/test/local/space_pcre.test.lua @@ -0,0 +1,117 @@ +#!/usr/bin/env tarantool + +local fio = require('fio') + +-- require in-repo version of graphql/ sources despite current working directory +package.path = fio.abspath(debug.getinfo(1).source:match("@?(.*/)") + :gsub('/./', '/'):gsub('/+$', '')) .. '/../../?.lua' .. ';' .. + package.path + +local yaml = require('yaml') +local utils = require('graphql.utils') +local graphql = require('graphql') +local testdata = require('test.testdata.common_testdata') + +-- helpers +-- ------- + +local function print_and_return(...) + print(...) + return table.concat({...}, ' ') .. '\n' +end + +local function format_result(name, query, variables, result) + return ('RUN %s {{{\nQUERY\n%s\nVARIABLES\n%s\nRESULT\n%s\n}}}\n'):format( + name, query:rstrip(), yaml.encode(variables), yaml.encode(result)) +end + +-- init box, upload test data and acquire metadata +-- ----------------------------------------------- + +-- init box and data schema +box.cfg{background = false} +testdata.init_spaces() + +-- upload test data +testdata.fill_test_data() + +-- acquire metadata +local metadata = testdata.get_test_metadata() +local schemas = metadata.schemas +local collections = metadata.collections +local service_fields = metadata.service_fields +local indexes = metadata.indexes + +-- build accessor and graphql schemas +-- ---------------------------------- + +local accessor = graphql.accessor_space.new({ + schemas = schemas, + collections = collections, + service_fields = service_fields, + indexes = indexes, +}) + +local gql_wrapper = graphql.new({ + schemas = schemas, + collections = collections, + accessor = accessor, +}) + +-- run queries +-- ----------- + +local function run_queries(gql_wrapper) + local results = '' + + local query_1 = [[ + query users($offset: String, $first_name_re: String, + $middle_name_re: String) { + user_collection(pcre: {first_name: $first_name_re, + middle_name: $middle_name_re}, offset: $offset) { + first_name + middle_name + last_name + } + } + ]] + + local gql_query_1 = gql_wrapper:compile(query_1) + + -- regexp match + -- ------------ + + utils.show_trace(function() + local variables_1_1 = { + first_name_re = '^I', + middle_name_re = 'ich$', + } + local result = gql_query_1:execute(variables_1_1) + results = results .. print_and_return(format_result( + '1_1', query_1, variables_1_1, result)) + end) + + -- offset + regexp match + -- --------------------- + + utils.show_trace(function() + local variables_1_2 = { + user_id = 'user_id_1', + first_name_re = '^V', + } + local result = gql_query_1:execute(variables_1_2) + results = results .. print_and_return(format_result( + '1_2', query_1, variables_1_2, result)) + end) + + return results +end + +run_queries(gql_wrapper) + +-- clean up +-- -------- + +testdata.drop_spaces() + +os.exit() diff --git a/test/testdata/common_testdata.lua b/test/testdata/common_testdata.lua index 5818c2b..5b061eb 100644 --- a/test/testdata/common_testdata.lua +++ b/test/testdata/common_testdata.lua @@ -17,6 +17,7 @@ function common_testdata.get_test_metadata() "fields": [ { "name": "user_id", "type": "string" }, { "name": "first_name", "type": "string" }, + { "name": "middle_name", "type": "string*" }, { "name": "last_name", "type": "string" } ] }, @@ -137,10 +138,13 @@ end function common_testdata.fill_test_data(shard) local shard = shard or box.space + local NULL_T = 0 + local STRING_T = 1 + shard.user_collection:replace( - {1827767717, 'user_id_1', 'Ivan', 'Ivanov'}) + {1827767717, 'user_id_1', 'Ivan', STRING_T, 'Ivanovich', 'Ivanov'}) shard.user_collection:replace( - {1827767717, 'user_id_2', 'Vasiliy', 'Pupkin'}) + {1827767717, 'user_id_2', 'Vasiliy', NULL_T, box.NULL, 'Pupkin'}) shard.order_collection:replace( {'order_id_1', 'user_id_1', 'first order of Ivan'}) shard.order_collection:replace( @@ -151,7 +155,8 @@ function common_testdata.fill_test_data(shard) for i = 3, 100 do local s = tostring(i) shard.user_collection:replace( - {1827767717, 'user_id_' .. s, 'first name ' .. s, 'last name ' .. s}) + {1827767717, 'user_id_' .. s, 'first name ' .. s, NULL_T, box.NULL, + 'last name ' .. s}) for j = (4 + (i - 3) * 40), (4 + (i - 2) * 40) - 1 do local t = tostring(j) shard.order_collection:replace( From cd5146d5a39ca33e19fb137db4a2edb1199a1cbb Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Tue, 13 Mar 2018 17:11:24 +0300 Subject: [PATCH 10/13] tarantool_graphql::nullable for nesting NonNulls It is for make the function general and don't have fears of corner cases. It will loop in case of x = { ofType = x }, but it is anyway incorrect input. --- graphql/tarantool_graphql.lua | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index af719ea..3026f52 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -58,7 +58,6 @@ local function avro_type(avro_schema) error('unrecognized avro-schema type: ' .. json.encode(avro_schema)) end --- XXX: recursive skip several NonNull's? local function nullable(gql_class) assert(type(gql_class) == 'table', 'gql_class must be a table, got ' .. type(gql_class)) @@ -66,7 +65,7 @@ local function nullable(gql_class) if gql_class.__type ~= 'NonNull' then return gql_class end assert(gql_class.ofType ~= nil, 'gql_class.ofType must not be nil') - return gql_class.ofType + return nullable(gql_class.ofType) end local types_long = types.scalar({ From b73c0e119311009c3a475d0482f73a7bb535cf8a Mon Sep 17 00:00:00 2001 From: Alexander Turenko Date: Wed, 14 Mar 2018 00:43:29 +0300 Subject: [PATCH 11/13] Check the corner case in an avro-schema generation This follows up the PR #58 (the work in the scope of the issue #7). --- graphql/query_to_avro.lua | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/graphql/query_to_avro.lua b/graphql/query_to_avro.lua index 8e9f1a8..f7ebb2b 100755 --- a/graphql/query_to_avro.lua +++ b/graphql/query_to_avro.lua @@ -4,6 +4,8 @@ --- --- * The best way to use this module is to just call `avro_schema` method on --- compiled query object. + +local json = require('json') local path = "graphql.core" local introspection = require(path .. '.introspection') local query_util = require(path .. '.query_util') @@ -41,20 +43,38 @@ end --- In the current tarantool/avro-schema implementation we simply add '*' to --- the end of a type name. --- ---- If the type is already nullable the function leaves it as is. +--- If the type is already nullable the function leaves it as if +--- `opts.raise_on_nullable` is false or omitted. If `opts.raise_on_nullable` +--- is true the function will raise an error. --- --- @tparam table avro avro schema node to be converted to nullable one --- +--- @tparam[opt] table opts the following options: +--- +--- * `raise_on_nullable` (boolean) raise an error on nullable type +--- --- @result `result` (string or table) nullable avro type -local function make_avro_type_nullable(avro) +local function make_avro_type_nullable(avro, opts) assert(avro ~= nil, "avro must not be nil") + local opts = opts or {} + assert(type(opts) == 'table', + 'opts must be nil or a table, got ' .. type(opts)) + local raise_on_nullable = opts.raise_on_nullable or false + assert(type(raise_on_nullable) == 'boolean', + 'opts.raise_on_nullable must be nil or a boolean, got ' .. + type(raise_on_nullable)) local value_type = type(avro) if value_type == "string" then - return avro:endswith("*") and avro or (avro .. '*') + local is_nullable = avro:endswith("*") + if raise_on_nullable and is_nullable then + error('expected non-null type, got the nullable one: ' .. + json.encode(avro)) + end + return is_nullable and avro or (avro .. '*') elseif value_type == "table" then - return make_avro_type_nullable(avro.type) + return make_avro_type_nullable(avro.type, opts) end error("avro should be a string or a table, got " .. value_type) @@ -103,7 +123,7 @@ local function gql_type_to_avro(fieldType, subSelections, context) end if not isNonNull then - result = make_avro_type_nullable(result) + result = make_avro_type_nullable(result, {raise_on_nullable = true}) end return result end From 94277f989b4603f66d1b60fa329a7c2557c8ab80 Mon Sep 17 00:00:00 2001 From: SudoBobo Date: Wed, 14 Mar 2018 13:30:08 +0300 Subject: [PATCH 12/13] add support for union connections --- graphql/accessor_general.lua | 145 +++++++---- graphql/tarantool_graphql.lua | 455 +++++++++++++++++++++++----------- graphql/utils.lua | 23 ++ 3 files changed, 433 insertions(+), 190 deletions(-) diff --git a/graphql/accessor_general.lua b/graphql/accessor_general.lua index 4b19f34..a039400 100644 --- a/graphql/accessor_general.lua +++ b/graphql/accessor_general.lua @@ -559,6 +559,61 @@ local function build_index_parts_tree(indexes) return roots end +local function set_connection_index(c, c_name, c_type, collection_name, + indexes, connection_indexes) + assert(type(c.index_name) == 'string', + 'index_name must be a string, got ' .. type(c.index_name)) + + -- validate index_name against 'indexes' + local index_meta = indexes[c.destination_collection] + assert(type(index_meta) == 'table', + 'index_meta must be a table, got ' .. type(index_meta)) + + assert(type(collection_name) == 'string', 'collection_name expected to ' .. + 'be string, got ' .. type(collection_name)) + + -- validate connection parts are match or being prefix of index + -- fields + local i = 1 + local index_fields = index_meta[c.index_name].fields + for _, part in ipairs(c.parts) do + assert(type(part.source_field) == 'string', + 'part.source_field must be a string, got ' .. + type(part.source_field)) + assert(type(part.destination_field) == 'string', + 'part.destination_field must be a string, got ' .. + type(part.destination_field)) + assert(part.destination_field == index_fields[i], + ('connection "%s" of collection "%s" ' .. + 'has destination parts that is not prefix of the index ' .. + '"%s" parts (destination collection - "%s")'):format(c_name, collection_name, + c.index_name, c.destination_collection)) + i = i + 1 + end + local parts_cnt = i - 1 + + -- partial index of an unique index is not guaranteed to being + -- unique + assert(c_type == '1:N' or parts_cnt == #index_fields, + ('1:1 connection "%s" of collection "%s" ' .. + 'has less fields than the index "%s" has (destination collection - "%s")' .. + '(cannot prove uniqueness of the partial index)'):format(c_name, + collection_name, c.index_name, c.destination_collection)) + + -- validate connection type against index uniqueness (if provided) + if index_meta.unique ~= nil then + assert(c_type == '1:N' or index_meta.unique == true, + ('1:1 connection ("%s") cannot be implemented ' .. + 'on top of non-unique index ("%s")'):format( + c_name, c.index_name)) + end + + return { + index_name = c.index_name, + connection_type = c_type, + } +end + --- Build `connection_indexes` table (part of `index_cache`) to use in the --- @{get_index_name} function. --- @@ -581,60 +636,28 @@ local function build_connection_indexes(indexes, collections) assert(type(collections) == 'table', 'collections must be a table, got ' .. type(collections)) local connection_indexes = {} - for _, collection in pairs(collections) do + for collection_name, collection in pairs(collections) do for _, c in ipairs(collection.connections) do - if connection_indexes[c.destination_collection] == nil then - connection_indexes[c.destination_collection] = {} - end - local index_name = c.index_name - assert(type(index_name) == 'string', - 'index_name must be a string, got ' .. type(index_name)) + if c.destination_collection ~= nil then + if connection_indexes[c.destination_collection] == nil then + connection_indexes[c.destination_collection] = {} + end - -- validate index_name against 'indexes' - local index_meta = indexes[c.destination_collection] - assert(type(index_meta) == 'table', - 'index_meta must be a table, got ' .. type(index_meta)) - - -- validate connection parts are match or being prefix of index - -- fields - local i = 1 - local index_fields = index_meta[c.index_name].fields - for _, part in ipairs(c.parts) do - assert(type(part.source_field) == 'string', - 'part.source_field must be a string, got ' .. - type(part.source_field)) - assert(type(part.destination_field) == 'string', - 'part.destination_field must be a string, got ' .. - type(part.destination_field)) - assert(part.destination_field == index_fields[i], - ('connection "%s" of collection "%s" ' .. - 'has destination parts that is not prefix of the index ' .. - '"%s" parts'):format(c.name, c.destination_collection, - c.index_name)) - i = i + 1 - end - local parts_cnt = i - 1 - - -- partial index of an unique index is not guaranteed to being - -- unique - assert(c.type == '1:N' or parts_cnt == #index_fields, - ('1:1 connection "%s" of collection "%s" ' .. - 'has less fields than the index "%s" has (cannot prove ' .. - 'uniqueness of the partial index)'):format(c.name, - c.destination_collection, c.index_name)) - - -- validate connection type against index uniqueness (if provided) - if index_meta.unique ~= nil then - assert(c.type == '1:N' or index_meta.unique == true, - ('1:1 connection ("%s") cannot be implemented ' .. - 'on top of non-unique index ("%s")'):format( - c.name, index_name)) + connection_indexes[c.destination_collection][c.name] = + set_connection_index(c, c.name, c.type, collection_name, + indexes, connection_indexes) end - connection_indexes[c.destination_collection][c.name] = { - index_name = index_name, - connection_type = c.type, - } + if c.variants ~= nil then + for _, v in ipairs(c.variants) do + if connection_indexes[v.destination_collection] == nil then + connection_indexes[v.destination_collection] = {} + end + connection_indexes[v.destination_collection][c.name] = + set_connection_index(v, c.name, c.type, collection_name, + indexes, connection_indexes) + end + end end end return connection_indexes @@ -698,10 +721,32 @@ local function validate_collections(collections, schemas) assert(type(connection.index_name) == 'string', 'connection.index_name must be a string, got ' .. type(connection.index_name)) + return + + if connection.variants then + for _, v in pairs(connection.variants) do + assert(type(v.determinant) == 'table', 'variant\'s ' .. + 'determinant must be a table, got ' .. + type(v.determinant)) + assert(type(v.destination_collection) == 'string', + 'variant.destination_collection must be a string, ' .. + 'got ' .. type(v.destination_collection)) + assert(type(v.parts) == 'table', + 'variant.parts must be a table, got ' .. type(v.parts)) + assert(type(v.index_name) == 'string', + 'variant.index_name must be a string, got ' .. + type(v.index_name)) + end + return + else + assert(false, ('collection doesn\'t have neither destination' .. + 'collection nor variants field')) + end end end end + --- Whether an object match set of PCRE. --- --- @tparam table obj an object to check diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index 3026f52..4adae9f 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -8,6 +8,7 @@ --- passed to an accessor function in the filter argument. local json = require('json') +local yaml = require('yaml') local parse = require('graphql.core.parse') local schema = require('graphql.core.schema') @@ -250,6 +251,7 @@ local function convert_record_fields(state, fields) return res end +--- The function converts passed simple connection to a field of GraphQL type. --- The function converts passed avro-schema to a GraphQL type. --- --- @tparam table state for read state.accessor and previously filled @@ -258,6 +260,289 @@ end --- @tparam[opt] table collection table with schema_name, connections fields --- described a collection (e.g. tarantool's spaces) --- +--- @tparam table state for for collection types +--- @tparam table c simple connection to create field on +--- @tparam table collection_name name of the collection which has given +--- connection +local convert_simple_connection = function(state, c, collection_name) + assert(type(c.destination_collection) == 'string', + 'connection.destination_collection must be a string, got ' .. + type(c.destination_collection)) + assert(type(c.parts) == 'table', + 'connection.parts must be a string, got ' .. type(c.parts)) + + -- gql type of connection field + local destination_type = state.types[c.destination_collection] + assert(destination_type ~= nil, + ('destination_type (named %s) must not be nil'):format( + c.destination_collection)) + + local c_args + if c.type == '1:1' then + c_args = state.object_arguments[c.destination_collection] + elseif c.type == '1:N' then + destination_type = types.nonNull(types.list(destination_type)) + c_args = state.all_arguments[c.destination_collection] + else + error('unknown connection type: ' .. tostring(c.type)) + end + + local c_list_args = state.list_arguments[c.destination_collection] + + local field = { + name = c.name, + kind = destination_type, + arguments = c_args, + resolve = function(parent, args_instance, info) + local destination_args_names = {} + local destination_args_values = {} + + for _, part in ipairs(c.parts) do + assert(type(part.source_field) == 'string', + 'part.source_field must be a string, got ' .. + type(part.destination_field)) + assert(type(part.destination_field) == 'string', + 'part.destination_field must be a string, got ' .. + type(part.destination_field)) + + destination_args_names[#destination_args_names + 1] = + part.destination_field + destination_args_values[#destination_args_values + 1] = + parent[part.source_field] + end + + local from = { + collection_name = collection_name, + connection_name = c.name, + destination_args_names = destination_args_names, + destination_args_values = destination_args_values, + } + local extra = { + qcontext = info.qcontext + } + local object_args_instance = {} -- passed to 'filter' + local list_args_instance = {} -- passed to 'args' + for k, v in pairs(args_instance) do + if c_list_args[k] ~= nil then + list_args_instance[k] = v + elseif c_args[k] ~= nil then + object_args_instance[k] = v + else + error(('cannot found "%s" field ("%s" value) ' .. + 'within allowed fields'):format(tostring(k), + tostring(v))) + end + end + local objs = state.accessor:select(parent, + c.destination_collection, from, + object_args_instance, list_args_instance, extra) + assert(type(objs) == 'table', + 'objs list received from an accessor ' .. + 'must be a table, got ' .. type(objs)) + if c.type == '1:1' then + assert(#objs == 1, + 'expect one matching object, got ' .. + tostring(#objs)) + return objs[1] + else -- c.type == '1:N' + return objs + end + end, + } + + return field +end + +--- The function converts passed union connection to a field of GraphQL type. +--- It builds connections between union collection and destination collections +--- (destination collections are 'types' of a 'Union' in GraphQL). +--- +--- @tparam table state for collection types +--- @tparam table c union connection to create field on +--- @tparam table collection_name name of the collection which has given +--- connection +local convert_union_connection = function(state, c, collection_name) + local union_types = {} + local collection_to_arguments = {} + local collection_to_list_arguments = {} + + local determinant_keys = utils.get_keys(c.variants[1].determinant) + local determinant_to_variant = {} + + for _, v in ipairs(c.variants) do + assert(v.determinant, 'each variant should have a determinant') + assert(type(v.determinant) == 'table', 'variant\'s determinant must ' .. + 'end be a table, got ' .. type(v.determinant)) + assert(type(v.destination_collection) == 'string', + 'variant.destination_collection must be a string, got ' .. + type(v.destination_collection)) + assert(type(v.parts) == 'table', + 'variant.parts must be a string, got ' .. type(v.parts)) + local destination_type = state.types[v.destination_collection] + assert(destination_type ~= nil, + ('destination_type (named %s) must not be nil'):format( + v.destination_collection)) + + determinant_to_variant[v.determinant] = v + + local v_args + if c.type == '1:1' then + v_args = state.object_arguments[v.destination_collection] + elseif c.type == '1:N' then + destination_type = types.nonNull(types.list(destination_type)) + v_args = state.all_arguments[v.destination_collection] + end + + local v_list_args = state.list_arguments[v.destination_collection] + + union_types[#union_types + 1] = destination_type + + collection_to_arguments[v.destination_collection] = v_args + collection_to_list_arguments[v.destination_collection] = v_list_args + end + + local resolveType = function (result) + for _, v in pairs(c.variants) do + local dest_collection = state.types[v.destination_collection] + if utils.do_have_keys(result, utils.get_keys(dest_collection.fields)) then + return dest_collection + end + end + end + + local resolve_variant = function (parent) + assert(utils.do_have_keys(parent, determinant_keys), + ('Parent object of union object doesn\'t have determinant ' .. + 'fields which are nessesary to determine which resolving ' .. + 'variant should be used. Union parent object:\n"%s"\n' .. + 'Determinant keys:\n"%s"'): + format(yaml.encode(parent), yaml.encode(determinant_keys))) + + local resulting_variant + for determinant, variant in pairs(determinant_to_variant) do + local is_match = true + for determinant_key, determinant_value in pairs(determinant) do + if parent[determinant_key] ~= determinant_value then + is_match = false + break + end + end + + if is_match then + resulting_variant = variant + break + end + end + + assert(resulting_variant, ('Variant resolving failed.'.. + 'Parent object: "%s"\n'):format(yaml.encode(parent))) + return resulting_variant + end + + local field = { + name = c.name, + kind = types.union({name = c.name, types = union_types, + resolveType = resolveType}), + arguments = nil, + resolve = function(parent, args_instance, info) + local v = resolve_variant(parent) + local destination_collection = state.types[v.destination_collection] + local destination_args_names = {} + local destination_args_values = {} + + for _, part in ipairs(v.parts) do + assert(type(part.source_field) == 'string', + 'part.source_field must be a string, got ' .. + type(part.destination_field)) + assert(type(part.destination_field) == 'string', + 'part.destination_field must be a string, got ' .. + type(part.destination_field)) + + destination_args_names[#destination_args_names + 1] = + part.destination_field + destination_args_values[#destination_args_values + 1] = + parent[part.source_field] + end + + local from = { + collection_name = collection_name, + connection_name = c.name, + destination_args_names = destination_args_names, + destination_args_values = destination_args_values, + } + local extra = { + qcontext = info.qcontext + } + local object_args_instance = {} -- passed to 'filter' + local list_args_instance = {} -- passed to 'args' + + local c_args = collection_to_arguments[destination_collection] + local c_list_args = collection_to_list_arguments[destination_collection] + + for k, v in pairs(args_instance) do + if c_list_args[k] ~= nil then + list_args_instance[k] = v + elseif c_args[k] ~= nil then + object_args_instance[k] = v + else + error(('cannot found "%s" field ("%s" value) ' .. + 'within allowed fields'):format(tostring(k), + tostring(v))) + end + end + local objs = state.accessor:select(parent, + v.destination_collection, from, + object_args_instance, list_args_instance, extra) + assert(type(objs) == 'table', + 'objs list received from an accessor ' .. + 'must be a table, got ' .. type(objs)) + if c.type == '1:1' then + assert(#objs == 1, + 'expect one matching object, got ' .. + tostring(#objs)) + return objs[1] + else -- c.type == '1:N' + return objs + end + end + } + return field +end + +--- The function converts passed connection to a field of GraphQL type +--- +--- @tparam table state for read state.accessor and previously filled +--- state.types (state.types are gql types) +--- @tparam table connection connection to create field on +--- @tparam table collection_name name of the collection which have given +--- connection +local convert_connection_to_field = function(state, connection, collection_name) + assert(type(connection.type) == 'string', + 'connection.type must be a string, got ' .. type(connection.type)) + assert(connection.type == '1:1' or connection.type == '1:N', + 'connection.type must be 1:1 or 1:N, got ' .. connection.type) + assert(type(connection.name) == 'string', + 'connection.name must be a string, got ' .. type(connection.name)) + assert(connection.destination_collection or connection.variants, + 'connection must either destination_collection or variatns field') + + if connection.destination_collection then + return convert_simple_connection(state, connection, collection_name) + end + + if connection.variants then + return convert_union_connection(state, connection, collection_name) + end +end + +--- The function converts passed avro-schema to a GraphQL type. +--- +--- @tparam table state for read state.accessor and previously filled +--- state.types (state.types are gql types) +--- @tparam table avro_schema input avro-schema +--- @tparam[opt] table collection table with schema_name, connections fields +--- described a collection (e.g. tarantool's spaces) +--- --- If collection is passed, two things are changed within this function: --- --- 1. Connections from the collection will be taken into account to @@ -303,145 +588,8 @@ gql_type = function(state, avro_schema, collection, collection_name) local fields = convert_record_fields(state, avro_schema.fields) - -- if collection param is passed then go over all connections for _, c in ipairs((collection or {}).connections or {}) do - assert(type(c.type) == 'string', - 'connection.type must be a string, got ' .. type(c.type)) - assert(c.type == '1:1' or c.type == '1:1*' or c.type == '1:N', - 'connection.type must be 1:1, 1:1* or 1:N, got ' .. c.type) - assert(type(c.name) == 'string', - 'connection.name must be a string, got ' .. type(c.name)) - assert(type(c.destination_collection) == 'string', - 'connection.destination_collection must be a string, got ' .. - type(c.destination_collection)) - assert(type(c.parts) == 'table', - 'connection.parts must be a string, got ' .. type(c.parts)) - - -- gql type of connection field - local destination_type = - state.nullable_collection_types[c.destination_collection] - assert(destination_type ~= nil, - ('destination_type (named %s) must not be nil'):format( - c.destination_collection)) - - local c_args - if c.type == '1:1' then - destination_type = types.nonNull(destination_type) - c_args = state.object_arguments[c.destination_collection] - elseif c.type == '1:1*' then - c_args = state.object_arguments[c.destination_collection] - elseif c.type == '1:N' then - destination_type = types.nonNull(types.list(types.nonNull( - destination_type))) - c_args = state.all_arguments[c.destination_collection] - else - error('unknown connection type: ' .. tostring(c.type)) - end - - local c_list_args = state.list_arguments[c.destination_collection] - - fields[c.name] = { - name = c.name, - kind = destination_type, - arguments = c_args, - resolve = function(parent, args_instance, info) - local destination_args_names = {} - local destination_args_values = {} - local are_all_parts_non_null = true - local are_all_parts_null = true - - for _, part in ipairs(c.parts) do - assert(type(part.source_field) == 'string', - 'part.source_field must be a string, got ' .. - type(part.destination_field)) - assert(type(part.destination_field) == 'string', - 'part.destination_field must be a string, got ' .. - type(part.destination_field)) - - destination_args_names[#destination_args_names + 1] = - part.destination_field - - local value = parent[part.source_field] - destination_args_values[#destination_args_values + 1] = - value - - if value ~= nil then -- nil or box.NULL - are_all_parts_null = false - else - are_all_parts_non_null = false - end - end - - -- Check FULL match constraint before request of - -- destination object(s). Note that connection key parts - -- can be prefix of index key parts. Zero parts count - -- considered as ok by this check. - local ok = are_all_parts_null or are_all_parts_non_null - if not ok then -- avoid extra json.encode() - assert(ok, - 'FULL MATCH constraint was failed: connection ' .. - 'key parts must be all non-nulls or all nulls; ' .. - 'object: ' .. json.encode(parent)) - end - - -- Avoid non-needed index lookup on a destination - -- collection when all connection parts are null: - -- * return null for 1:1* connection; - -- * return {} for 1:N connection (except the case when - -- source collection is the Query pseudo-collection). - if collection_name ~= 'Query' and are_all_parts_null then - if c.type ~= '1:1*' and c.type ~= '1:N' then - -- `if` is to avoid extra json.encode - assert(c.type == '1:1*' or c.type == '1:N', - ('only 1:1* or 1:N connections can have ' .. - 'all key parts null; parent is %s from ' .. - 'collection "%s"'):format(json.encode(parent), - tostring(collection_name))) - end - return c.type == '1:N' and {} or nil - end - - local from = { - collection_name = collection_name, - connection_name = c.name, - destination_args_names = destination_args_names, - destination_args_values = destination_args_values, - } - local extra = { - qcontext = info.qcontext - } - local object_args_instance = {} -- passed to 'filter' - local list_args_instance = {} -- passed to 'args' - for k, v in pairs(args_instance) do - if c_list_args[k] ~= nil then - list_args_instance[k] = v - elseif c_args[k] ~= nil then - object_args_instance[k] = v - else - error(('cannot found "%s" field ("%s" value) ' .. - 'within allowed fields'):format(tostring(k), - tostring(v))) - end - end - local objs = accessor:select(parent, - c.destination_collection, from, - object_args_instance, list_args_instance, extra) - assert(type(objs) == 'table', - 'objs list received from an accessor ' .. - 'must be a table, got ' .. type(objs)) - if c.type == '1:1' or c.type == '1:1*' then - -- we expect here exactly one object even for 1:1* - -- connections because we processed all-parts-are-null - -- situation above - assert(#objs == 1, - 'expect one matching object, got ' .. - tostring(#objs)) - return objs[1] - else -- c.type == '1:N' - return objs - end - end, - } + fields[c.name] = convert_connection_to_field(state, c, collection_name) end -- create gql type @@ -687,7 +835,8 @@ end --- schema_name = 'schema_name_foo', --- connections = { // the optional field --- { ---- name = 'connection_name_bar', +--- type = '1:1' or '1:N', +--- name = 'simple_connection_name', --- destination_collection = 'collection_baz', --- parts = { --- { @@ -700,7 +849,17 @@ end --- -- ignored in the graphql --- -- part --- }, ---- ... +--- { +--- name = 'union_connection_name', +--- type = '1:1' or '1:N', +--- variants = { +--- { +--- see variant format below +--- }, +--- ... +--- } +--- }, +--- ... --- }, --- }, --- ... @@ -735,6 +894,22 @@ end --- } --- }), --- }) +--- +--- variant format +--- { +--- Source collection must have all fields that are keys in determinant +--- table. Based on the values of these fields right destination collection +--- is determined. +--- determinant = {field_or_source: 'destination_1_value', ...}, +--- destination_collection = 'collection_name', +--- parts = { +--- { +--- source_field = 'field_name_source', +--- destination_field = 'field_name_destination' +--- } +--- }, +--- index_name = 'index_name' +--- } function tarantool_graphql.new(cfg) local state = parse_cfg(cfg) return setmetatable(state, { diff --git a/graphql/utils.lua b/graphql/utils.lua index 8b8ba8a..f779ff7 100644 --- a/graphql/utils.lua +++ b/graphql/utils.lua @@ -132,6 +132,29 @@ function utils.gen_booking_table(data) }) end +--- @return `table` with all keys of the given table +function utils.get_keys(table) + local keys = {} + for k, _ in pairs(table) do + keys[#keys + 1] = k + end + return keys +end + +--- Check if passed table has passed keys with non-nil values. +--- @tparam table table to check +--- @tparam table keys array of keys to check +--- @return[1] `true` if passed table has passed keys +--- @return[2] `false` otherwise +function utils.do_have_keys(table, keys) + for _, k in pairs(keys) do + if table[k] == nil then + return false + end + end + return true +end + --- Catch error at module require and return nil in the case. --- --- @tparam string module_name mane of a module to require From b585527e3aea4a6f504218ff3ccb7e23dcf75d6a Mon Sep 17 00:00:00 2001 From: SudoBobo Date: Wed, 14 Mar 2018 15:12:58 +0300 Subject: [PATCH 13/13] prepare for rebase --- graphql/tarantool_graphql.lua | 134 +++++++++++++++++++++++++++++----- 1 file changed, 116 insertions(+), 18 deletions(-) diff --git a/graphql/tarantool_graphql.lua b/graphql/tarantool_graphql.lua index 4adae9f..910fe6a 100644 --- a/graphql/tarantool_graphql.lua +++ b/graphql/tarantool_graphql.lua @@ -272,16 +272,22 @@ local convert_simple_connection = function(state, c, collection_name) 'connection.parts must be a string, got ' .. type(c.parts)) -- gql type of connection field - local destination_type = state.types[c.destination_collection] + local destination_type = + state.nullable_collection_types[c.destination_collection] + assert(destination_type ~= nil, ('destination_type (named %s) must not be nil'):format( c.destination_collection)) local c_args if c.type == '1:1' then + destination_type = types.nonNull(destination_type) + c_args = state.object_arguments[c.destination_collection] + elseif c.type == '1:1*' then c_args = state.object_arguments[c.destination_collection] elseif c.type == '1:N' then - destination_type = types.nonNull(types.list(destination_type)) + destination_type = types.nonNull(types.list(types.nonNull( + destination_type))) c_args = state.all_arguments[c.destination_collection] else error('unknown connection type: ' .. tostring(c.type)) @@ -296,6 +302,8 @@ local convert_simple_connection = function(state, c, collection_name) resolve = function(parent, args_instance, info) local destination_args_names = {} local destination_args_values = {} + local are_all_parts_non_null = true + local are_all_parts_null = true for _, part in ipairs(c.parts) do assert(type(part.source_field) == 'string', @@ -307,8 +315,44 @@ local convert_simple_connection = function(state, c, collection_name) destination_args_names[#destination_args_names + 1] = part.destination_field + local value = parent[part.source_field] destination_args_values[#destination_args_values + 1] = - parent[part.source_field] + value + + if value ~= nil then -- nil of box.NonNull + are_all_parts_null = false + else + are_all_parts_non_null = false + end + end + + -- Check FULL match constraint before request of + -- destination object(s). Note that connection key parts + -- can be prefix of index key parts. Zero parts count + -- considered as ok by this check. + local ok = are_all_parts_null or are_all_parts_non_null + if not ok then -- avoid extra json.encode() + assert(ok, + 'FULL MATCH constraint was failed: connection ' .. + 'key parts must be all non-nulls or all nulls; ' .. + 'object: ' .. json.encode(parent)) + end + + -- Avoid non-needed index lookup on a destination + -- collection when all connection parts are null: + -- * return null for 1:1* connection; + -- * return {} for 1:N connection (except the case when + -- source collection is the Query pseudo-collection). + if collection_name ~= 'Query' and are_all_parts_null then + if c.type ~= '1:1*' and c.type ~= '1:N' then + -- `if` is to avoid extra json.encode + assert(c.type == '1:1*' or c.type == '1:N', + ('only 1:1* or 1:N connections can have ' .. + 'all key parts null; parent is %s from ' .. + 'collection "%s"'):format(json.encode(parent), + tostring(collection_name))) + end + return c.type == '1:N' and {} or nil end local from = { @@ -337,9 +381,12 @@ local convert_simple_connection = function(state, c, collection_name) c.destination_collection, from, object_args_instance, list_args_instance, extra) assert(type(objs) == 'table', - 'objs list received from an accessor ' .. - 'must be a table, got ' .. type(objs)) - if c.type == '1:1' then + 'objs list received from an accessor ' .. + 'must be a table, got ' .. type(objs)) + if c.type == '1:1' or c.type == '1:1*' then + -- we expect here exactly one object even for 1:1* + -- connections because we processed all-parts-are-null + -- situation above assert(#objs == 1, 'expect one matching object, got ' .. tostring(#objs)) @@ -372,25 +419,33 @@ local convert_union_connection = function(state, c, collection_name) for _, v in ipairs(c.variants) do assert(v.determinant, 'each variant should have a determinant') assert(type(v.determinant) == 'table', 'variant\'s determinant must ' .. - 'end be a table, got ' .. type(v.determinant)) + 'end be a table, got ' .. type(v.determinant)) assert(type(v.destination_collection) == 'string', - 'variant.destination_collection must be a string, got ' .. - type(v.destination_collection)) + 'variant.destination_collection must be a string, got ' .. + type(v.destination_collection)) assert(type(v.parts) == 'table', - 'variant.parts must be a string, got ' .. type(v.parts)) - local destination_type = state.types[v.destination_collection] + 'variant.parts must be a string, got ' .. type(v.parts)) + + local destination_type = + state.nullable_collection_types[v.destination_collection] assert(destination_type ~= nil, - ('destination_type (named %s) must not be nil'):format( - v.destination_collection)) + ('destination_type (named %s) must not be nil'):format( + v.destination_collection)) determinant_to_variant[v.determinant] = v local v_args if c.type == '1:1' then + destination_type = types.nonNull(destination_type) + v_args = state.object_arguments[v.destination_collection] + elseif c.type == '1:1*' then v_args = state.object_arguments[v.destination_collection] elseif c.type == '1:N' then - destination_type = types.nonNull(types.list(destination_type)) + destination_type = types.nonNull(types.list(types.nonNull( + destination_type))) v_args = state.all_arguments[v.destination_collection] + else + error('unknown connection type: ' .. tostring(c.type)) end local v_list_args = state.list_arguments[v.destination_collection] @@ -449,6 +504,9 @@ local convert_union_connection = function(state, c, collection_name) local destination_collection = state.types[v.destination_collection] local destination_args_names = {} local destination_args_values = {} + local are_all_parts_non_null = true + local are_all_parts_null = true + for _, part in ipairs(v.parts) do assert(type(part.source_field) == 'string', @@ -460,8 +518,44 @@ local convert_union_connection = function(state, c, collection_name) destination_args_names[#destination_args_names + 1] = part.destination_field + local value = parent[part.source_field] destination_args_values[#destination_args_values + 1] = - parent[part.source_field] + value + + if value ~= nil then -- nil of box.NonNull + are_all_parts_null = false + else + are_all_parts_non_null = false + end + end + + -- Check FULL match constraint before request of + -- destination object(s). Note that connection key parts + -- can be prefix of index key parts. Zero parts count + -- considered as ok by this check. + local ok = are_all_parts_null or are_all_parts_non_null + if not ok then -- avoid extra json.encode() + assert(ok, + 'FULL MATCH constraint was failed: connection ' .. + 'key parts must be all non-nulls or all nulls; ' .. + 'object: ' .. json.encode(parent)) + end + + -- Avoid non-needed index lookup on a destination + -- collection when all connection parts are null: + -- * return null for 1:1* connection; + -- * return {} for 1:N connection (except the case when + -- source collection is the Query pseudo-collection). + if collection_name ~= 'Query' and are_all_parts_null then + if c.type ~= '1:1*' and c.type ~= '1:N' then + -- `if` is to avoid extra json.encode + assert(c.type == '1:1*' or c.type == '1:N', + ('only 1:1* or 1:N connections can have ' .. + 'all key parts null; parent is %s from ' .. + 'collection "%s"'):format(json.encode(parent), + tostring(collection_name))) + end + return c.type == '1:N' and {} or nil end local from = { @@ -496,7 +590,10 @@ local convert_union_connection = function(state, c, collection_name) assert(type(objs) == 'table', 'objs list received from an accessor ' .. 'must be a table, got ' .. type(objs)) - if c.type == '1:1' then + if c.type == '1:1' or c.type == '1:1*' then + -- we expect here exactly one object even for 1:1* + -- connections because we processed all-parts-are-null + -- situation above assert(#objs == 1, 'expect one matching object, got ' .. tostring(#objs)) @@ -519,8 +616,9 @@ end local convert_connection_to_field = function(state, connection, collection_name) assert(type(connection.type) == 'string', 'connection.type must be a string, got ' .. type(connection.type)) - assert(connection.type == '1:1' or connection.type == '1:N', - 'connection.type must be 1:1 or 1:N, got ' .. connection.type) + assert(connection.type == '1:1' or connection.type == '1:1*' or + connection.type == '1:N', 'connection.type must be 1:1, 1:1* or 1:N, '.. + 'got ' .. connection.type) assert(type(connection.name) == 'string', 'connection.name must be a string, got ' .. type(connection.name)) assert(connection.destination_collection or connection.variants,