Skip to content

Commit

Permalink
code is now PEP8 compliant
Browse files Browse the repository at this point in the history
  • Loading branch information
shyal committed Dec 1, 2016
1 parent da40f8c commit a6801c8
Show file tree
Hide file tree
Showing 14 changed files with 223 additions and 179 deletions.
6 changes: 4 additions & 2 deletions examples/basic/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,12 @@
# create our HoverPy object in capture mode
hp = HoverPy(capture=True)

# print the json from our get request. Hoverpy acted as a proxy: it made the request on our behalf, captured it, and returned it to us.
# print the json from our get request. Hoverpy acted as a proxy: it made
# the request on our behalf, captured it, and returned it to us.
print(requests.get("http://ip.jsontest.com/myip").json())

# switch HoverPy to simulate mode. HoverPy no longer acts as a proxy; all it does from now on is replay the captured data.
# switch HoverPy to simulate mode. HoverPy no longer acts as a proxy; all
# it does from now on is replay the captured data.
hp.simulate()

# print the json from our get request. This time the data comes from the store.
Expand Down
33 changes: 20 additions & 13 deletions examples/delays/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,16 @@ This function either generates a echo server url, or a md5 url it is seeded so t

```python
def getServiceData():
for i in range(10):
random.seed(i)
print(requests.get(random.choice(["http://echo.jsontest.com/i/%i"%i, "http://md5.jsontest.com/?text=%i"%i])).json())
for i in range(10):
random.seed(i)
print(
requests.get(
random.choice(
[
"http://echo.jsontest.com/i/%i" %
i,
"http://md5.jsontest.com/?text=%i" %
i])).json())

```

Expand All @@ -38,17 +45,17 @@ getServiceData()

```

There are two ways to add delays. One is to call the delays method with the desired delay rules passed in as a json document
There are two ways to add delays. One is to call the delays method with the desired delay rules passed in as a json document

```python
print(hp.delays({"data":[
{
"urlPattern": "md5.jsontest.com",
"delay": 1000
}
]
}
))
print(hp.delays({"data": [
{
"urlPattern": "md5.jsontest.com",
"delay": 1000
}
]
}
))

```

Expand All @@ -70,7 +77,7 @@ Make the requests. This time HoverFly adds the simulated delays. these requests

```python
print("\nreplaying delayed responses from echo server\n")
getServiceData(
getServiceData()

```

38 changes: 24 additions & 14 deletions examples/delays/delays.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,36 @@
hp = HoverPy(capture=True)

# this function either generates a echo server url, or a md5 url
# it is seeded so that we get the exact same requests on capture as we do on simulate
# it is seeded so that we get the exact same requests on capture as we do
# on simulate


def getServiceData():
for i in range(10):
random.seed(i)
print(requests.get(random.choice(["http://echo.jsontest.com/i/%i"%i, "http://md5.jsontest.com/?text=%i"%i])).json())
for i in range(10):
random.seed(i)
print(
requests.get(
random.choice(
[
"http://echo.jsontest.com/i/%i" %
i,
"http://md5.jsontest.com/?text=%i" %
i])).json())

# make the requests to the desired host dependencies
print("capturing responses from echo server\n")
getServiceData()

# There are two ways to add delays. One is to call the delays method
# with the desired delay rules passed in as a json document
print(hp.delays({"data":[
{
"urlPattern": "md5.jsontest.com",
"delay": 1000
}
]
}
))
# with the desired delay rules passed in as a json document
print(hp.delays({"data": [
{
"urlPattern": "md5.jsontest.com",
"delay": 1000
}
]
}
))

# the other more pythonic way is to call addDelay(...)
print(hp.addDelay(urlPattern="echo.jsontest.com", delay=3000))
Expand All @@ -40,4 +50,4 @@ def getServiceData():
# these requests would normally be run asynchronously, and we could deal
# gracefully with the dependency taking too long to respond
print("\nreplaying delayed responses from echo server\n")
getServiceData()
getServiceData()
18 changes: 12 additions & 6 deletions examples/modify/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,25 @@ import requests
Create our HoverPy object with modify and middleware enabled. please note this brings in ```python examples/modify/modify_payload.py``` which will get run on every request

```python
hoverpy = HoverPy(flags=["-modify", "-middleware", "python examples/modify/modify_payload.py"])
hoverpy = HoverPy(
flags=[
"-modify",
"-middleware",
"python examples/modify/modify_payload.py"])

```

Our middleware is designed to random return an empty body instead of what it's supposed to return (the curren time). This is a good example of how to alter your dependencies, and adequately test and react based on their content

```python
for i in range(30):
r = requests.get("http://time.jsontest.com")
if "time" in r.json().keys():
print("response successfully modified, current date is "+r.json()["time"])
else:
print("something went wrong - deal with it gracefully")
r = requests.get("http://time.jsontest.com")
if "time" in r.json().keys():
print(
"response successfully modified, current date is " +
r.json()["time"])
else:
print("something went wrong - deal with it gracefully")

```

18 changes: 12 additions & 6 deletions examples/modify/modify.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,20 @@
# create our HoverPy object with modify and middleware enabled.
# please note this brings in ```python examples/modify/modify_payload.py```
# which will get run on every request
hoverpy = HoverPy(flags=["-modify", "-middleware", "python examples/modify/modify_payload.py"])
hoverpy = HoverPy(
flags=[
"-modify",
"-middleware",
"python examples/modify/modify_payload.py"])

# our middleware is designed to random return an empty body instead of what it's supposed
# to return (the curren time). This is a good example of how to alter your dependencies,
# and adequately test and react based on their content
for i in range(30):
r = requests.get("http://time.jsontest.com")
if "time" in r.json().keys():
print("response successfully modified, current date is "+r.json()["time"])
else:
print("something went wrong - deal with it gracefully")
r = requests.get("http://time.jsontest.com")
if "time" in r.json().keys():
print(
"response successfully modified, current date is " +
r.json()["time"])
else:
print("something went wrong - deal with it gracefully")
7 changes: 4 additions & 3 deletions examples/modify/modify_payload.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
logging.basicConfig(filename='middleware.log', level=logging.DEBUG)
logging.debug('Middleware "modify_request" called')


def main():
data = sys.stdin.readlines()
# this is a json string in one line so we are interested in that one line
Expand All @@ -18,13 +19,13 @@ def main():

payload_dict = json.loads(payload)

payload_dict['response']['status'] = random.choice([200,201])
payload_dict['response']['status'] = random.choice([200, 201])

if random.choice([True, False]):
payload_dict['response']['body'] = "{}"

# returning new payload
print(json.dumps(payload_dict))

if __name__ == "__main__":
main()
main()
23 changes: 12 additions & 11 deletions examples/readthedocs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ Setup argparse. If we call our app with --capture, it captures the request. Else
from argparse import ArgumentParser
parser = ArgumentParser(description="Perform proxy testing/URL list creation")
parser.add_argument("--capture", help="capture the data", action="store_true")
parser.add_argument("--limit", default=50, help="number of links to capture / simulate")
parser.add_argument(
"--limit", default=50, help="number of links to capture / simulate")
args = parser.parse_args()

```
Expand All @@ -28,16 +29,16 @@ This function requests articles from readthedocs.io.

```python
def getLinks(hp, limit):
print("\nGetting links in %s mode!\n" % hp.mode())
start = time.time()
sites = requests.get("http://readthedocs.org/api/v1/project/?limit=%d&offset=0&format=json" % limit)
objects = sites.json()['objects']
links = ["http://readthedocs.org" + x['resource_uri'] for x in objects]
for link in links:
response = requests.get(link)
print("url: %s, status code: %s" % (link, response.status_code))

print("Time taken: %f" % (time.time() - start))
print("\nGetting links in %s mode!\n" % hp.mode())
start = time.time()
sites = requests.get(
"http://readthedocs.org/api/v1/project/?limit=%d&offset=0&format=json" % limit)
objects = sites.json()['objects']
links = ["http://readthedocs.org" + x['resource_uri'] for x in objects]
for link in links:
response = requests.get(link)
print("url: %s, status code: %s" % (link, response.status_code))
print("Time taken: %f" % (time.time() - start))

```

Expand Down
28 changes: 16 additions & 12 deletions examples/readthedocs/readthedocs.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,22 +10,26 @@
from argparse import ArgumentParser
parser = ArgumentParser(description="Perform proxy testing/URL list creation")
parser.add_argument("--capture", help="capture the data", action="store_true")
parser.add_argument("--limit", default=50, help="number of links to capture / simulate")
parser.add_argument(
"--limit", default=50, help="number of links to capture / simulate")
args = parser.parse_args()

# this function requests articles from readthedocs.io.


def getLinks(hp, limit):
print("\nGetting links in %s mode!\n" % hp.mode())
start = time.time()
sites = requests.get("http://readthedocs.org/api/v1/project/?limit=%d&offset=0&format=json" % limit)
objects = sites.json()['objects']
links = ["http://readthedocs.org" + x['resource_uri'] for x in objects]

for link in links:
response = requests.get(link)
print("url: %s, status code: %s" % (link, response.status_code))

print("Time taken: %f" % (time.time() - start))
print("\nGetting links in %s mode!\n" % hp.mode())
start = time.time()
sites = requests.get(
"http://readthedocs.org/api/v1/project/?limit=%d&offset=0&format=json" % limit)
objects = sites.json()['objects']
links = ["http://readthedocs.org" + x['resource_uri'] for x in objects]

for link in links:
response = requests.get(link)
print("url: %s, status code: %s" % (link, response.status_code))

print("Time taken: %f" % (time.time() - start))

# construct our HoverPy object in capture mode
hp = HoverPy(capture=args.capture)
Expand Down
12 changes: 4 additions & 8 deletions examples/unittesting/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,12 @@ Instead of inheriting off `unittest.TestCase` let's inherit off `hoverpy.TestCas

```python
class TestRTD(hoverpy.TestCase):

```

In our test, we'll once again download a load of readthedocs pages

```python
# in our test, we'll once again download a load of readthedocs pages
def test_rtd_links(self):
import requests
limit = 50
sites = requests.get("http://readthedocs.org/api/v1/project/?limit=%d&offset=0&format=json" % limit)
sites = requests.get(
"http://readthedocs.org/api/v1/project/?limit=%d&offset=0&format=json" % limit)
objects = sites.json()['objects']
links = ["http://readthedocs.org" + x['resource_uri'] for x in objects]
self.assertTrue(len(links) == limit)
Expand All @@ -38,4 +34,4 @@ if __name__ == '__main__':

```

<hr> Now the correct way of launching this script the first time is: <br><br> `$ env HOVERPY_CAPTURE=true python examples/unittesting/unittesting.py`<br><br> which sets HoverPy in capture mode, and creates our all important `requests.db`. This process may take around 10 seconds depending on your internet speed. Now when we rerun our unit tests, we're always running against the data we captured in `requests.db`.<br><br> `$ python examples/unittesting/unittesting.py`<br><br> This time we are done in around 100ms
<hr> Now the correct way of launching this script the first time is: <br><br> `$ env HOVERPY_CAPTURE=true python examples/unittesting/unittesting.py`<br><br> which sets HoverPy in capture mode, and creates our all important `requests.db`. This process may take around 10 seconds depending on your internet speed. Now when we rerun our unit tests, we're always running against the data we captured in `requests.db`.<br><br> `$ python examples/unittesting/unittesting.py`<br><br> This time we are done in around 100ms! Not to mention: no more unnecessary breakages.
10 changes: 7 additions & 3 deletions examples/unittesting/unittesting.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,16 @@

# Instead of inheriting off `unittest.TestCase` let's inherit off
# `hoverpy.TestCase`


class TestRTD(hoverpy.TestCase):

# in our test, we'll once again download a load of readthedocs pages
# in our test, we'll once again download a load of readthedocs pages
def test_rtd_links(self):
import requests
limit = 50
sites = requests.get("http://readthedocs.org/api/v1/project/?limit=%d&offset=0&format=json" % limit)
sites = requests.get(
"http://readthedocs.org/api/v1/project/?limit=%d&offset=0&format=json" % limit)
objects = sites.json()['objects']
links = ["http://readthedocs.org" + x['resource_uri'] for x in objects]
self.assertTrue(len(links) == limit)
Expand All @@ -35,4 +38,5 @@ def test_rtd_links(self):
# Now when we rerun our unit tests, we're always running against the
# data we captured in `requests.db`.<br><br>
# `$ python examples/unittesting/unittesting.py`<br><br>
# This time we are done in around 100ms! Not to mention: no more unnecessary breakages.
# This time we are done in around 100ms! Not to mention: no more
# unnecessary breakages.

0 comments on commit a6801c8

Please sign in to comment.