Skip to content

Release 0.10.0 #106

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 19 commits into from
Mar 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
918dd3d
Merge pull request #97 from casework/release-0.9.0
kchason Dec 8, 2022
27f8ac2
Schedule recurring CI cron job
ajnelson-nist Dec 19, 2022
04e890b
Update Python support to reflect coming 3.7 EOL
ajnelson-nist Dec 19, 2022
df7da5b
Merge pull request #99 from casework/bump_python_to_3_8
kchason Dec 19, 2022
88788a0
Merge branch 'develop' into schedule_regular_ci
ajnelson-nist Dec 19, 2022
f5ac7e5
Document second step in non-random UUID generation
ajnelson-nist Dec 19, 2022
9bae915
Merge pull request #98 from casework/schedule_regular_ci
kchason Dec 19, 2022
bb18283
Merge pull request #100 from casework/update_local_uuid_documentation
kchason Dec 19, 2022
5435187
Update SPARQL query result types
ajnelson-nist Mar 17, 2023
655cdbf
Run pre-commit autoupdate
ajnelson-nist Mar 17, 2023
faac091
Reformat per pre-commit
ajnelson-nist Mar 17, 2023
6b9e77b
Merge branch 'HotFix-run_precommit_autoupdate' into HotFix-sparql-que…
ajnelson-nist Mar 17, 2023
5cd80fc
Merge pull request #102 from casework/HotFix-sparql-query-result-types
kchason Mar 20, 2023
1e95e4e
Build CASE 1.2.0 monolithic .ttl files
ajnelson-nist Mar 28, 2023
d297361
Update CASE ontology pointer to version 1.2.0 (prerelease state)
ajnelson-nist Mar 28, 2023
24e763b
Regenerate Make-managed files.
ajnelson-nist Mar 28, 2023
11ffbd0
Bump CASE pointer to 1.2.0 release
ajnelson-nist Mar 29, 2023
646e978
Merge pull request #105 from casework/build_case_1.2.0
kchason Mar 29, 2023
3deb63d
Bump versions
ajnelson-nist Mar 30, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .github/workflows/cicd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ on:
release:
types:
- published
schedule:
- cron: '15 5 * * TUE'

jobs:
build:
Expand All @@ -31,7 +33,7 @@ jobs:
strategy:
matrix:
python-version:
- '3.7'
- '3.8'
- '3.11'

steps:
Expand Down
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
repos:
- repo: https://github.com/psf/black
rev: 22.10.0
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/pycqa/flake8
rev: 5.0.4
rev: 6.0.0
hooks:
- id: flake8
- repo: https://github.com/pycqa/isort
rev: 5.10.1
rev: 5.12.0
hooks:
- id: isort
name: isort (python)
2 changes: 1 addition & 1 deletion case_utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@
#
# We would appreciate acknowledgement if the software is used.

__version__ = "0.9.0"
__version__ = "0.10.0"

from . import local_uuid # noqa: F401
1 change: 1 addition & 0 deletions case_utils/case_file/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@

DEFAULT_PREFIX = "http://example.org/kb/"


# Shortcut syntax for defining an immutable named tuple is noted here:
# https://docs.python.org/3/library/typing.html#typing.NamedTuple
# via the "See also" box here: https://docs.python.org/3/library/collections.html#collections.namedtuple
Expand Down
7 changes: 4 additions & 3 deletions case_utils/case_sparql_construct/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
This script executes a SPARQL CONSTRUCT query, returning a graph of the generated triples.
"""

__version__ = "0.2.4"
__version__ = "0.2.5"

import argparse
import logging
Expand Down Expand Up @@ -98,10 +98,11 @@ def main() -> None:
construct_query_result = in_graph.query(construct_query_object)
_logger.debug("type(construct_query_result) = %r." % type(construct_query_result))
_logger.debug("len(construct_query_result) = %d." % len(construct_query_result))
for (row_no, row) in enumerate(construct_query_result):
for row_no, row in enumerate(construct_query_result):
assert isinstance(row, tuple)
if row_no == 0:
_logger.debug("row[0] = %r." % (row,))
out_graph.add(row)
out_graph.add((row[0], row[1], row[2]))

output_format = None
if args.output_format is None:
Expand Down
25 changes: 13 additions & 12 deletions case_utils/case_sparql_select/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
Should a more complex query be necessary, an outer, wrapping SELECT query would let this script continue to function.
"""

__version__ = "0.5.0"
__version__ = "0.5.1"

import argparse
import binascii
Expand Down Expand Up @@ -86,26 +86,27 @@ def graph_and_query_to_data_frame(
select_query_object = rdflib.plugins.sparql.processor.prepareQuery(
select_query_text, initNs=nsdict
)
for (row_no, row) in enumerate(_graph.query(select_query_object)):
for row_no, row in enumerate(_graph.query(select_query_object)):
assert isinstance(row, rdflib.query.ResultRow)
tally = row_no + 1
record = []
for (column_no, column) in enumerate(row):
for column_no, column in enumerate(row):
if column is None:
column_value = ""
elif (
isinstance(column, rdflib.term.Literal)
and column.datatype == NS_XSD.hexBinary
):
# Use hexlify to convert xsd:hexBinary to ASCII.
# The render to ASCII is in support of this script rendering results for website viewing.
# .decode() is because hexlify returns bytes.
column_value = binascii.hexlify(column.toPython()).decode()
elif isinstance(column, rdflib.term.Literal):
if column.datatype == NS_XSD.hexBinary:
# Use hexlify to convert xsd:hexBinary to ASCII.
# The render to ASCII is in support of this script rendering results for website viewing.
# .decode() is because hexlify returns bytes.
column_value = binascii.hexlify(column.toPython()).decode()
else:
column_value = column.toPython()
elif isinstance(column, rdflib.URIRef):
if use_prefixes:
column_value = graph.namespace_manager.qname(column.toPython())
else:
column_value = column.toPython()
else:
elif isinstance(column, rdflib.BNode):
column_value = column.toPython()
if row_no == 0:
_logger.debug("row[0]column[%d] = %r." % (column_no, column_value))
Expand Down
10 changes: 8 additions & 2 deletions case_utils/local_uuid.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
This library is a wrapper for uuid, provided to generate repeatable UUIDs if requested.
"""

__version__ = "0.3.1"
__version__ = "0.3.2"

import logging
import os
Expand All @@ -33,6 +33,9 @@


def configure() -> None:
"""
This function is part of setting up demo_uuid() to generate non-random UUIDs. See demo_uuid() documentation for further setup notes.
"""
global DEMO_UUID_BASE

if os.getenv("DEMO_UUID_REQUESTING_NONRANDOM") == "NONRANDOM_REQUESTED":
Expand Down Expand Up @@ -112,7 +115,10 @@ def demo_uuid() -> str:

WARNING: This function was developed for use ONLY for reducing (but not eliminating) version-control edits to identifiers when generating sample data. It creates UUIDs that are decidedly NOT random, and should remain consistent on repeated calls to the importing script.

To prevent accidental non-random UUID usage, an environment variable, CASE_DEMO_NONRANDOM_UUID_BASE, must be set to a string provided by the caller. The variable's required value is the path to some directory. The variable's recommended value is the equivalent of the Make variable "top_srcdir" - that is, the root directory of the containing Git repository, some parent of the current process's current working directory.
To prevent accidental non-random UUID usage, two setup steps need to be done before calling this function:

* An environment variable, CASE_DEMO_NONRANDOM_UUID_BASE, must be set to a string provided by the caller. The variable's required value is the path to some directory. The variable's recommended value is the equivalent of the Make variable "top_srcdir" - that is, the root directory of the containing Git repository, some parent of the current process's current working directory.
* The configure() function in this module must be called.
"""
global DEMO_UUID_BASE
global DEMO_UUID_COUNTER
Expand Down
21 changes: 11 additions & 10 deletions case_utils/ontology/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,16 @@ all: \
.PRECIOUS: \
case-$(case_version).ttl

$(case_srcdir)/.venv.done.log: \
$(top_srcdir)/.git_submodule_init.done.log
$(MAKE) \
--directory $(case_srcdir) \
.venv.done.log
touch -c $@
test -r $@

case-$(case_version).ttl: \
$(top_srcdir)/.git_submodule_init.done.log \
$(case_srcdir)/.venv.done.log \
$(RDF_TOOLKIT_JAR)
$(MAKE) \
--directory $(case_srcdir)/tests \
Expand All @@ -47,10 +55,7 @@ case-$(case_version)-subclasses.ttl: \
# release is being made, that step will have been skipped.
# This recursive Make call guarantees the virtual environment is
# set up.
$(MAKE) \
--directory $(case_srcdir)/tests \
.venv.done.log
source $(case_srcdir)/tests/venv/bin/activate \
source $(case_srcdir)/venv/bin/activate \
&& python3 src/subclasses_ttl.py \
__$@ \
$<
Expand All @@ -70,11 +75,7 @@ clean:
ontology_and_version_iris.txt: \
src/ontology_and_version_iris.py \
case-$(case_version)-subclasses.ttl
# Guarantee venv is built. (Same rationale as in the subclasses.ttl recipe.)
$(MAKE) \
--directory $(case_srcdir)/tests \
.venv.done.log
source $(case_srcdir)/tests/venv/bin/activate \
source $(case_srcdir)/venv/bin/activate \
&& python3 src/ontology_and_version_iris.py \
_$@ \
case-*.ttl
Expand Down
Loading