Skip to content

Commit

Permalink
Merge pull request #432 from skalish/new-record-tests
Browse files Browse the repository at this point in the history
TC: Add ndjson and request payload checking support to fake.json + update tests for tc.record
  • Loading branch information
pcattori committed Aug 20, 2020
2 parents d8755d1 + 1f7ee65 commit 146c883
Show file tree
Hide file tree
Showing 19 changed files with 318 additions and 95 deletions.
96 changes: 7 additions & 89 deletions tests/tamr_client/dataset/test_record.py
Original file line number Diff line number Diff line change
@@ -1,65 +1,34 @@
from functools import partial
import json
from typing import Dict

import pytest
import responses

import tamr_client as tc
from tests.tamr_client import fake, utils
from tests.tamr_client import fake


@responses.activate
@fake.json
def test_update():
s = fake.session()
dataset = fake.dataset()

url = tc.URL(path="datasets/1:updateRecords")
updates = [
tc.record._create_command(record, primary_key_name="primary_key")
for record in _records_json
]
snoop: Dict = {}
responses.add_callback(
responses.POST,
str(url),
partial(
utils.capture_payload, snoop=snoop, status=200, response_json=_response_json
),
)

response = tc.record._update(s, dataset, updates)
assert response == _response_json
assert snoop["payload"] == utils.stringify(updates)


@responses.activate
@fake.json
def test_upsert():
s = fake.session()
dataset = fake.dataset()

url = tc.URL(path="datasets/1:updateRecords")
updates = [
tc.record._create_command(record, primary_key_name="primary_key")
for record in _records_json
]
snoop: Dict = {}
responses.add_callback(
responses.POST,
str(url),
partial(
utils.capture_payload, snoop=snoop, status=200, response_json=_response_json
),
)

response = tc.record.upsert(
s, dataset, _records_json, primary_key_name="primary_key"
)
assert response == _response_json
assert snoop["payload"] == utils.stringify(updates)


@responses.activate
def test_upsert_primary_key_not_found():
s = fake.session()
dataset = fake.dataset()
Expand All @@ -70,57 +39,26 @@ def test_upsert_primary_key_not_found():
)


@responses.activate
@fake.json
def test_upsert_infer_primary_key():
s = fake.session()
dataset = fake.dataset()

url = tc.URL(path="datasets/1:updateRecords")
updates = [
tc.record._create_command(record, primary_key_name="primary_key")
for record in _records_json
]
snoop: Dict = {}
responses.add_callback(
responses.POST,
str(url),
partial(
utils.capture_payload, snoop=snoop, status=200, response_json=_response_json
),
)

response = tc.record.upsert(s, dataset, _records_json)
assert response == _response_json
assert snoop["payload"] == utils.stringify(updates)


@responses.activate
@fake.json
def test_delete():
s = fake.session()
dataset = fake.dataset()

url = tc.URL(path="datasets/1:updateRecords")
deletes = [
tc.record._delete_command(record, primary_key_name="primary_key")
for record in _records_json
]
snoop: Dict = {}
responses.add_callback(
responses.POST,
str(url),
partial(
utils.capture_payload, snoop=snoop, status=200, response_json=_response_json
),
)

response = tc.record.delete(
s, dataset, _records_json, primary_key_name="primary_key"
)
assert response == _response_json
assert snoop["payload"] == utils.stringify(deletes)


@responses.activate
def test_delete_primary_key_not_found():
s = fake.session()
dataset = fake.dataset()
Expand All @@ -131,40 +69,20 @@ def test_delete_primary_key_not_found():
)


@responses.activate
@fake.json
def test_delete_infer_primary_key():
s = fake.session()
dataset = fake.dataset()

url = tc.URL(path="datasets/1:updateRecords")
deletes = [
tc.record._delete_command(record, primary_key_name="primary_key")
for record in _records_json
]
snoop: Dict = {}
responses.add_callback(
responses.POST,
str(url),
partial(
utils.capture_payload, snoop=snoop, status=200, response_json=_response_json
),
)

response = tc.record.delete(s, dataset, _records_json)
assert response == _response_json
assert snoop["payload"] == utils.stringify(deletes)


@responses.activate
@fake.json
def test_stream():
s = fake.session()
dataset = fake.dataset()

url = tc.URL(path="datasets/1/records")
responses.add(
responses.GET, str(url), body="\n".join(json.dumps(x) for x in _records_json)
)

records = tc.record.stream(s, dataset)
assert list(records) == _records_json

Expand Down
89 changes: 84 additions & 5 deletions tests/tamr_client/fake.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,79 @@
For more, see "How to write tests" in the Contributor guide.
"""

from functools import wraps
from functools import partial, wraps
from inspect import getfile
from json import load
from json import dumps, load, loads
from pathlib import Path
from typing import Dict, Tuple

import responses

import tamr_client as tc
from tamr_client._types import JsonDict


tests_tc_dir = (Path(__file__) / "..").resolve()
fake_json_dir = tests_tc_dir / "fake_json"


def _to_kwargs(fake):
class WrongRequestBody(Exception):
"""Raised when the body of a request does not match the value expected during
testing
"""

pass


def _check_request_body(request, expected_body: JsonDict):
"""Checks that the body of a caught request matches the expected content
The body is decoded and loaded as a JSON object so the comparison is not sensitive to the
order of dictionary contents. The comparison is sensitive to the order of a newline-delimited
JSON request body.
Args:
request: The caught request
expected_body: The expected request body as a dictionary (for JSON contents) or a list of
dictionaries (for newline-delimited JSON contents)
"""
if isinstance(expected_body, list):
actual_body = [loads(x.decode("utf-8")) for x in request.body]
if actual_body != expected_body:
raise WrongRequestBody(actual_body)
elif expected_body is not None:
actual_body = loads(request.body.decode("utf-8"))
if actual_body != expected_body:
raise WrongRequestBody(actual_body)


def _callback(
request, expected_body: JsonDict, status: int, response_json: str
) -> Tuple[int, Dict, str]:
"""Adds a callback to intercept an API request, check the validity of the request, and emit a
response
Args:
expected_body: The expected request body as a dictionary (for JSON contents) or a list of
dictionaries (for newline-delimited JSON contents)
status: The status of the response to be emitted
response_json: The JSON body of the response to be emitted
Returns:
Response status, headers, and JSON body. This conforms to the callback interface of
`~responses.RequestsMock.add_callback`
"""
_check_request_body(request, expected_body)
return status, {}, response_json


def add_response(rsps, fake: JsonDict):
"""Adds a mock response to intercept API requests and respond with fake JSON data
Args:
fake: The JSON dictionary containing the fake data defining what requests to intercept and
what responses to emit
"""
req = fake["request"]
resp = fake["response"]

Expand All @@ -27,7 +85,28 @@ def _to_kwargs(fake):
path = req.get("path")
url = "http://localhost/api/versioned/v1/" + path

return dict(method=req["method"], url=url, **resp)
# Get response body from either ndjson or json
if resp.get("ndjson") is not None:
resp["body"] = "\n".join((dumps(line) for line in resp["ndjson"]))
elif resp.get("json") is not None:
resp["body"] = dumps(resp["json"])

# Get expected request body from ndjson
if req.get("ndjson") is not None:
req["body"] = req["ndjson"]
elif req.get("json") is not None:
req["body"] = req["json"]

rsps.add_callback(
method=req["method"],
url=url,
callback=partial(
_callback,
expected_body=req.get("body"),
status=resp["status"],
response_json=resp.get("body"),
),
)


def json(test_fn):
Expand All @@ -47,7 +126,7 @@ def json(test_fn):
def wrapper(*args, **kwargs):
with responses.RequestsMock() as rsps:
for fake in fakes:
rsps.add(**_to_kwargs(fake))
add_response(rsps, fake)
test_fn(*args, **kwargs)

return wrapper
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,59 @@
{
"request": {
"method": "POST",
"path": "datasets/1/attributes"
"path": "datasets/1/attributes",
"json": {
"name": "attr",
"isNullable": false,
"type": {
"baseType": "RECORD",
"attributes": [
{
"name": "0",
"isNullable": true,
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "STRING"
}
}
},
{
"name": "1",
"isNullable": true,
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "STRING"
}
}
},
{
"name": "2",
"isNullable": true,
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "STRING"
}
}
},
{
"name": "3",
"isNullable": true,
"type": {
"baseType": "ARRAY",
"innerType": {
"baseType": "STRING"
}
}
}
]
}
}
},
"response": {
"status": 201,
"json": {
"name": "attr",
"isNullable": false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
"path": "datasets/1/attributes/attr"
},
"response": {
"status": 200,
"json": {
"name": "attr",
"isNullable": false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
"path": "datasets/1/attributes/RowNum"
},
"response": {
"status": 200,
"json": {
"name": "RowNum",
"description": "Synthetic row number updated",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
"path": "projects/2/unifiedDataset"
},
"response": {
"status": 200,
"json": {
"id": "unify://unified-data/v1/datasets/161",
"name": "Party_Categorization_Unified_Dataset",
Expand Down Expand Up @@ -38,6 +39,7 @@
"path": "datasets?filter=name==Party_Categorization_Unified_Dataset_manual_categorizations"
},
"response": {
"status": 200,
"json": [
{
"id": "unify://unified-data/v1/datasets/167",
Expand Down Expand Up @@ -71,6 +73,7 @@
"path": "datasets/167"
},
"response": {
"status": 200,
"json": {
"id": "unify://unified-data/v1/datasets/167",
"name": "Party_Categorization_Unified_Dataset_manual_categorizations",
Expand Down

0 comments on commit 146c883

Please sign in to comment.