Skip to content
This repository was archived by the owner on Apr 14, 2022. It is now read-only.

Enhance validation and introduce error codes #180

Merged
merged 20 commits into from
Jul 2, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ default:
.PHONY: lint
lint:
luacheck graphql/*.lua \
graphql/core/execute.lua \
graphql/core/rules.lua \
graphql/core/validate_variables.lua \
graphql/convert_schema/*.lua \
graphql/server/*.lua \
test/bench/*.lua \
Expand Down
21 changes: 11 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,23 +141,23 @@ be consistent.
Mutations are disabled in the resharding state of a shard cluster.

There are three types of modifications: insert, update and delete. Several
modifications are allowed in an one GraphQL request, but they will be processed
in non-transactional way.
modifications are allowed in one GraphQL request, but they will be processed in
non-transactional way.

In the case of shard accessor the following constraints can guarantee that data
will be changed in atomic way or, in other words, in an one shard request (but
will be changed in atomic way or, in other words, in one shard request (but
foregoing and upcoming selects can see other data):

* One insert / update / delete argument over the entire GraphQL request.
* For update / delete: either the argument is for 1:1 connection or `limit: 1`
is used for a collection (a topmost field) or 1:N connection (a nested
is used for a collection (a upmost field) or 1:N connection (a nested
field).
* No update of a first field of a **tuple** (shard key is calculated by it). It
is the first field of upmost record in the schema for a collection in case
when there are no service fields. If there are service fields, the first
field of a tuple cannot be changed by a mutation GraphQL request.

Data can be changed between shard requests which are part of the one GraphQL
Data can be changed between shard requests which are part of one GraphQL
request, so the result can observe inconsistent state. We'll don't show all
possible cases, but give an idea what going on in the following paragraph.

Expand Down Expand Up @@ -229,7 +229,7 @@ Consider the following details:
#### Update

Example with an update statement passed from a variable. Note that here we
update an object given by a connection (inside an one of nested fields of a
update an object given by a connection (inside one of nested fields of a
request):

```
Expand Down Expand Up @@ -300,9 +300,10 @@ Consider the following details:
`update` argument, then connected objects are selected.
* The `limit` and `offset` arguments applied before update, so a user can use
`limit: 1` to update only first match.
* Objects traversed in deep-first up-first order as it written in a mutation
request. So an `update` argument potentially changes those fields that are
follows the updated object in this order.
* Objects are traversed in pre-order depth-first way, object's fields are
traversed in an order as they are written in a mutation request. So an
`update` argument potentially changes those fields that are follows the
updated object in this order.
* Filters by connected objects are performed before update. Resulting connected
objects given after the update (it is matter when a field(s) of the parent
objects by whose the connection is made is subject to change).
Expand Down Expand Up @@ -336,7 +337,7 @@ Consider the following details:
* The `delete` argument is forbidden with `insert` or `update` arguments.
* The `delete` argument is forbidden in `query` requests.
* The same fields traversal order and 'select -> change -> select connected'
order of operations for an one field are applied likewise for the `update`
order of operations for one field are applied likewise for the `update`
argument.
* The `limit` argument can be used to define how many objects are subject to
deletion and `offset` can help with adjusting start point of multi-object
Expand Down
55 changes: 44 additions & 11 deletions graphql/accessor_general.lua
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@ local bit = require('bit')
local rex, is_pcre2 = utils.optional_require_rex()
local avro_helpers = require('graphql.avro_helpers')
local db_schema_helpers = require('graphql.db_schema_helpers')
local error_codes = require('graphql.error_codes')

local check = utils.check
local e = error_codes

-- XXX: consider using [1] when it will be mature enough;
-- look into [2] for the status.
Expand Down Expand Up @@ -856,12 +858,17 @@ local function process_tuple(self, state, tuple, opts)
local resulting_object_cnt_max = opts.resulting_object_cnt_max
local fetched_object_cnt_max = opts.fetched_object_cnt_max
qstats.fetched_object_cnt = qstats.fetched_object_cnt + 1
assert(qstats.fetched_object_cnt <= fetched_object_cnt_max,
('fetched object count[%d] exceeds limit[%d] ' ..
'(`fetched_object_cnt_max` in accessor)'):format(
qstats.fetched_object_cnt, fetched_object_cnt_max))
assert(qcontext.deadline_clock > clock.monotonic64(),
'query execution timeout exceeded, use `timeout_ms` to increase it')
if qstats.fetched_object_cnt > fetched_object_cnt_max then
error(e.fetched_objects_limit_exceeded(
('fetched objects count (%d) exceeds fetched_object_cnt_max ' ..
'limit (%d)'):format(qstats.fetched_object_cnt,
fetched_object_cnt_max)))
end
if clock.monotonic64() > qcontext.deadline_clock then
error(e.timeout_exceeded((
'query execution timeout exceeded timeout_ms limit (%s ms)'):format(
tostring(self.settings.timeout_ms))))
end
local collection_name = opts.collection_name
local pcre = opts.pcre
local resolveField = opts.resolveField
Expand All @@ -878,6 +885,12 @@ local function process_tuple(self, state, tuple, opts)
return true -- skip pivot item too
end

-- Don't count subrequest resulting objects (needed for filtering) into
-- count of object we show to an user as a result.
-- XXX: It is better to have an option to control whether selected objects
-- will be counted as resulting ones.
local saved_resulting_object_cnt = qstats.resulting_object_cnt

-- make subrequests if needed
for k, v in pairs(filter) do
if obj[k] == nil then
Expand All @@ -891,6 +904,8 @@ local function process_tuple(self, state, tuple, opts)
end
end

qstats.resulting_object_cnt = saved_resulting_object_cnt

-- filter out non-matching objects
local match = utils.is_subtable(obj, filter) and
match_using_re(obj, pcre)
Expand All @@ -905,10 +920,12 @@ local function process_tuple(self, state, tuple, opts)
state.objs[#state.objs + 1] = obj
state.count = state.count + 1
qstats.resulting_object_cnt = qstats.resulting_object_cnt + 1
assert(qstats.resulting_object_cnt <= resulting_object_cnt_max,
('returning object count[%d] exceeds limit[%d] ' ..
'(`resulting_object_cnt_max` in accessor)'):format(
qstats.resulting_object_cnt, resulting_object_cnt_max))
if qstats.resulting_object_cnt > resulting_object_cnt_max then
error(e.resulting_objects_limit_exceeded(
('resulting objects count (%d) exceeds resulting_object_cnt_max ' ..
'limit (%d)'):format(qstats.resulting_object_cnt,
resulting_object_cnt_max)))
end
if limit ~= nil and state.count >= limit then
return false
end
Expand Down Expand Up @@ -1060,6 +1077,13 @@ local function select_internal(self, collection_name, from, filter, args, extra)
-- fullscan
local primary_index = self.funcs.get_primary_index(self,
collection_name)

-- count full scan select request
extra.qcontext.statistics.select_requests_cnt =
extra.qcontext.statistics.select_requests_cnt + 1
extra.qcontext.statistics.full_scan_select_requests_cnt =
extra.qcontext.statistics.full_scan_select_requests_cnt + 1

for _, tuple in primary_index:pairs() do
assert(pivot == nil,
'offset for top-level objects must use a primary index')
Expand Down Expand Up @@ -1102,6 +1126,12 @@ local function select_internal(self, collection_name, from, filter, args, extra)

local tuple_count = 0

-- count index select request
extra.qcontext.statistics.select_requests_cnt =
extra.qcontext.statistics.select_requests_cnt + 1
extra.qcontext.statistics.index_select_requests_cnt =
extra.qcontext.statistics.index_select_requests_cnt + 1

for _, tuple in index:pairs(index_value, iterator_opts) do
tuple_count = tuple_count + 1
-- check full match constraint
Expand Down Expand Up @@ -1287,7 +1317,10 @@ local function init_qcontext(accessor, qcontext)
local settings = accessor.settings
qcontext.statistics = {
resulting_object_cnt = 0,
fetched_object_cnt = 0
fetched_object_cnt = 0,
select_requests_cnt = 0,
full_scan_select_requests_cnt = 0,
index_select_requests_cnt = 0,
}
qcontext.deadline_clock = clock.monotonic64() +
settings.timeout_ms * 1000 * 1000
Expand Down
35 changes: 24 additions & 11 deletions graphql/convert_schema/union.lua
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@ local yaml = require('yaml')
local core_types = require('graphql.core.types')
local avro_helpers = require('graphql.avro_helpers')
local helpers = require('graphql.convert_schema.helpers')

local error_codes = require('graphql.error_codes')
local utils = require('graphql.utils')

local check = utils.check
local e = error_codes

local union = {}

Expand Down Expand Up @@ -98,8 +100,10 @@ local function create_union_types(avro_schema, opts)
if type == 'null' then
is_nullable = true
else
local variant_type = convert(type, {context = context})
local box_field_name = type.name or avro_helpers.avro_type(type)
table.insert(context.path, box_field_name)
local variant_type = convert(type, {context = context})
table.remove(context.path, #context.path)
union_types[#union_types + 1] = box_type(variant_type,
box_field_name, {
gen_argument = gen_argument,
Expand Down Expand Up @@ -301,25 +305,34 @@ function union.convert(avro_schema, opts)
types = union_types,
name = helpers.full_name(union_name, context),
resolveType = function(result)
if type(result) ~= 'table' then
error(e.wrong_value('union value must be a map with one ' ..
'field, got ' .. type(result)))
end
if next(result) == nil or next(result, next(result)) ~= nil then
error(e.wrong_value('union value must have only one field'))
end
for determinant, type in pairs(determinant_to_type) do
if result[determinant] ~= nil then
return type
end
end
error(('result object has no determinant field matching ' ..
'determinants for this union\nresult object:\n%s' ..
'determinants:\n%s'):format(yaml.encode(result),
yaml.encode(determinant_to_type)))
local field_name = tostring(next(result))
error(e.wrong_value(('unexpected union value field: %s'):format(
field_name)))
end,
resolveNodeType = function(node)
assert(#node.values == 1,
('box object with more then one field: %d'):format(
#node.values))
if #node.values ~= 1 then
error(e.wrong_value('box object with more then one field: %d')
:format(#node.values))
end
local determinant = node.values[1].name
check(determinant, 'determinant', 'string')
local res = determinant_to_type[determinant]
assert(determinant ~= nil,
('the union has no "%s" field'):format(determinant))
if res == nil then
error(e.wrong_value('the union has no "%s" field'):format(
determinant))
end
return res
end,
})
Expand Down
41 changes: 27 additions & 14 deletions graphql/core/execute.lua
Original file line number Diff line number Diff line change
@@ -1,22 +1,30 @@
local path = (...):gsub('%.[^%.]+$', '')
local types = require(path .. '.types')
local util = require(path .. '.util')
local introspection = require(path .. '.introspection')
local query_util = require(path .. '.query_util')
local validate_variables = require(path .. '.validate_variables')

local function defaultResolver(object, arguments, info)
return object[info.fieldASTs[1].name.value]
end

local evaluateSelections

-- @param[opt] resolvedType a type to be used instead of one returned by
-- `fieldType.resolveType(result)` in case when the `fieldType` is Interface or
-- Union; that is needed to increase flexibility of an union type resolving
-- (e.g. resolving by a parent object instead of a current object) via
-- returning it from the `fieldType.resolve` function, which called before
-- `resolvedType` and may need to determine the type itself for its needs
local function completeValue(fieldType, result, subSelections, context, resolvedType)
-- @tparam[opt] table opts the following options:
--
-- * fieldName (string; optional)
--
-- * resolvedType (table; optional) resolvedType a type to be used instead of
-- one returned by `fieldType.resolveType(result)` in case when the
-- `fieldType` is Interface or Union; that is needed to increase flexibility
-- of an union type resolving (e.g. resolving by a parent object instead of a
-- current object) via returning it from the `fieldType.resolve` function,
-- which called before `resolvedType` and may need to determine the type
-- itself for its needs
local function completeValue(fieldType, result, subSelections, context, opts)
local opts = opts or {}
local resolvedType = opts.resolvedType
local fieldName = opts.fieldName or '???'
local fieldTypeName = fieldType.__type

if fieldTypeName == 'NonNull' then
Expand Down Expand Up @@ -65,13 +73,13 @@ local function completeValue(fieldType, result, subSelections, context, resolved
return evaluateSelections(objectType, result, subSelections, context)
end

error('Unknown type "' .. fieldTypeName .. '" for field "' .. field.name .. '"')
error('Unknown type "' .. fieldTypeName .. '" for field "' .. fieldName .. '"')
end

local function getFieldEntry(objectType, object, fields, context)
local firstField = fields[1]
local fieldName = firstField.name.value
local responseKey = query_util.getFieldResponseKey(firstField)
-- local responseKey = query_util.getFieldResponseKey(firstField)
local fieldType = introspection.fieldMap[fieldName] or objectType.fields[fieldName]

if fieldType == nil then
Expand All @@ -85,7 +93,8 @@ local function getFieldEntry(objectType, object, fields, context)

local arguments = util.map(fieldType.arguments or {}, function(argument, name)
local supplied = argumentMap[name] and argumentMap[name].value
supplied = supplied and util.coerceValue(supplied, argument, context.variables)
supplied = util.coerceValue(supplied, argument, context.variables,
{strict_non_null = true})
if supplied ~= nil then
return supplied
else
Expand All @@ -110,7 +119,8 @@ local function getFieldEntry(objectType, object, fields, context)
local resolvedObject, resolvedType = (fieldType.resolve or defaultResolver)(object, arguments, info)
local subSelections = query_util.mergeSelectionSets(fields)

return completeValue(fieldType.kind, resolvedObject, subSelections, context, resolvedType)
return completeValue(fieldType.kind, resolvedObject, subSelections, context,
{resolvedType = resolvedType})
end

evaluateSelections = function(objectType, object, selections, context)
Expand All @@ -125,16 +135,19 @@ evaluateSelections = function(objectType, object, selections, context)
return result
end

return function(schema, tree, rootValue, variables, operationName)
return function(schema, tree, rootValue, variables, operationName, opts)
local opts = opts or {}
local context = query_util.buildContext(schema, tree, rootValue, variables, operationName)
-- The field is passed to resolve function within info attribute.
-- Can be used to store any data within one query.
context.qcontext = {}
context.qcontext = opts.qcontext or {}
local rootType = schema[context.operation.operation]

if not rootType then
error('Unsupported operation "' .. context.operation.operation .. '"')
end

validate_variables.validate_variables(context)

return evaluateSelections(rootType, rootValue, context.operation.selectionSet.selections, context)
end
13 changes: 10 additions & 3 deletions graphql/core/query_util.lua
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ local query_util = {}
function query_util.typeFromAST(node, schema)
local innerType
if node.kind == 'listType' then
innerType = query_util.typeFromAST(node.type)
innerType = query_util.typeFromAST(node.type, schema)
return innerType and types.list(innerType)
elseif node.kind == 'nonNullType' then
innerType = query_util.typeFromAST(node.type)
innerType = query_util.typeFromAST(node.type, schema)
return innerType and types.nonNull(innerType)
else
assert(node.kind == 'namedType', 'Variable must be a named type')
Expand Down Expand Up @@ -111,7 +111,8 @@ function query_util.buildContext(schema, tree, rootValue, variables, operationNa
rootValue = rootValue,
variables = variables,
operation = nil,
fragmentMap = {}
fragmentMap = {},
variableTypes = {},
}

for _, definition in ipairs(tree.definitions) do
Expand All @@ -136,6 +137,12 @@ function query_util.buildContext(schema, tree, rootValue, variables, operationNa
end
end

-- Save variableTypes for the operation.
for _, definition in ipairs(context.operation.variableDefinitions or {}) do
context.variableTypes[definition.variable.name.value] =
query_util.typeFromAST(definition.type, context.schema)
end

return context
end

Expand Down
Loading