Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions applications/expensive_seq/expensive_seq.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,14 @@
# Your code here


cache ={}
def expensive_seq(x, y, z):
# Your code here
if x <= 0:
result = y + z
return result
if (x, y, z) in cache:
return cache[(x,y,z)]
else:
cache[(x,y,z)] = expensive_seq(x-1,y+1,z) + expensive_seq(x-2,y+2,z*2) + expensive_seq(x-3,y+3,z*3)
return cache[(x, y, z)]



Expand Down
10 changes: 4 additions & 6 deletions applications/histo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,14 +67,12 @@ bow ######

## Hints

Items: `.vgrzf()` zrgubq ba n qvpgvbanel zvtug or hfrshy.
Items: `.items()` method on a dictionary might be useful.

Sorting: vg'f cbffvoyr sbe `.fbeg()` gb fbeg ba zhygvcyr xrlf ng bapr.
Sorting: it's possible for `.sort()` to sort on multiple keys at once.

Sorting: artngvirf zvtug uryc jurer `erirefr` jba'g.

Printing: lbh pna cevag n inevnoyr svryq jvqgu va na s-fgevat jvgu
arfgrq oenprf, yvxr fb `{k:{l}}`
Sorting: negatives might help where `reverse` won't.

Printing: you can print a variable field width in an f-string with nested braces, like so {x:{y}}
(The hints are encrypted with ROT13. Google for `rot13 decoder` to see
them.)
22 changes: 21 additions & 1 deletion applications/histo/histo.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,22 @@
# Your code here
# Open and the file
with open('robin.txt') as f:
texts= f.read()
#split the text
words = texts.lower().split()
#create a dictionary for the word count
histogram ={}
#ignore character
ignore_char = ' " : ; , . - + = / \ | [ ] { } ( ) * ^ & '
for word in words:
for char in word:
if char in ignore_char:
word = word.replace(char, '')
if word not in histogram:
histogram[word] = '#'
else:
histogram[word] += '#'
for key, value in sorted(histogram.items(), key = lambda item: (item[1]), reverse =True):
print(f'{key} :{value}')



16 changes: 13 additions & 3 deletions applications/lookup_table/lookup_table.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Your code here

import random
import math

def slowfun_too_slow(x, y):
v = math.pow(x, y)
Expand All @@ -8,13 +9,22 @@ def slowfun_too_slow(x, y):
v %= 982451653

return v

lookup_table = {}
def slowfun(x, y):
"""
Rewrite slowfun_too_slow() in here so that the program produces the same
output, but completes quickly instead of taking ages to run.
"""
# Your code here
#create a dictionary(hash tale) for caching
if (x,y) in lookup_table:
return lookup_table[(x,y)]
else:
lookup_table[(x,y)]=slowfun_too_slow(x, y)
return lookup_table[(x,y)]







Expand Down
39 changes: 37 additions & 2 deletions applications/markov/markov.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,44 @@
# Read in all the words in one go
with open("input.txt") as f:
words = f.read()


texts = words.lower().split(' ')
# TODO: analyze which words can follow other words
# Your code here
word_dic ={}
for i in range(len(texts)-1):
if texts[i] not in word_dic:
word_dic[texts[i]] = [texts[i+1]]
else:
word_dic[texts[i]].append(texts[i+1])

start_words =[]
stop_words =[]
for word in word_dic:
if word[0].isupper() or word[0] == '"':
start_words.append(word)
if word[-1] in '.?!':
stop_words.append(word)
if len(word)>2:
if (word[-2] in '.?!') and word[-1] =='"':
stop_words.append(word)



# start with capital letter or "
# for key in word_dic:

# # start_words
start = random.choice(list(word_dic))

for i in range(5):
if start in stop_words:
continue
else:
start = random.choice(word_dic[start])
for value in word_dic[start]:
print(value, end = ' ')




# TODO: construct 5 random sentences
Expand Down
14 changes: 13 additions & 1 deletion applications/no_dups/no_dups.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,17 @@
def no_dups(s):
# Your code here
'''
str -> str
'''
cache = ''
words = s.split()
for word in words:
if word not in cache:
cache = cache + ' ' +word
return cache.strip()







Expand Down
2 changes: 1 addition & 1 deletion applications/word_count/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,4 @@ Ignore each of the following characters:
" : ; , . - + = / \ | [ ] { } ( ) * ^ &
```

If the input contains no ignored characters, return an empty dictionary.
If the input contains all ignored characters, return an empty dictionary.
19 changes: 17 additions & 2 deletions applications/word_count/word_count.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,25 @@
def word_count(s):
# Your code here
word_dic = {}
ignored_char ='":;,.-+=/\\|[]{}()*^&'
#split the string into words
words= s.lower().split()

for word in words:
for char in word:
if char in ignored_char:
word = word.replace(char,'')
if word is '':
return {}
if word not in word_dic:
word_dic[word] = 1
else:
word_dic[word] +=1

return word_dic

if __name__ == "__main__":
print(word_count('":;,.-+=/\\|[]{}()*^&'))
print(word_count(""))
print(word_count("Hello"))
print(word_count("Hello hello"))
print(word_count('Hello, my cat. And my cat doesn\'t say "hello" back.'))
print(word_count('This is a test of the emergency broadcast network. This is only a test.'))
127 changes: 111 additions & 16 deletions hashtable/hashtable.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
class HashTableEntry:
"""
Linked List hash table key/value pair
Node class to store key value pair of the link list
each node has string as key value pair and pointer
to the the next node in case there is collision
"""
def __init__(self, key, value):
self.key = key
Expand All @@ -20,9 +23,12 @@ class HashTable:
Implement this.
"""

def __init__(self, capacity):
def __init__(self, capacity=8):
# Your code here

self.capacity = capacity
#assiging list for the hash table
self.list_hashtable= [None for i in range(self.capacity)]
self.count =0

def get_num_slots(self):
"""
Expand All @@ -34,7 +40,8 @@ def get_num_slots(self):

Implement this.
"""
# Your code here

return self.capacity #len(self.list_hashtable)


def get_load_factor(self):
Expand All @@ -43,7 +50,18 @@ def get_load_factor(self):

Implement this.
"""
# Your code here
#load factor = (No. of item in the hash table)/total no of slots
# items=0
# for i in range(self.capacity):
# existing_node = self.list_hashtable[i]
# if existing_node:
# while existing_node:
# items +=1
# existing_node=existing_node.next


load_factor = self.count/self.capacity
return load_factor


def fnv1(self, key):
Expand All @@ -59,11 +77,15 @@ def fnv1(self, key):
def djb2(self, key):
"""
DJB2 hash, 32-bit

Implement this, and/or FNV-1.
It uses bit manipulation and prime numbers
to create a hash index from a string
"""
# Your code here

hash = 5381
byte_array = key.encode('utf-8')
for byte in byte_array:
# the modulus keeps it 32-bit, python ints don't overflow
hash = ((hash * 33) ^ byte) % 0x100000000
return hash

def hash_index(self, key):
"""
Expand All @@ -81,7 +103,35 @@ def put(self, key, value):

Implement this.
"""
# Your code here
#calculate index to store string(key)in the list
list_index = self.hash_index(key)
#create a link list at that index to store bucket of key value pair
#create a node with given key value pair
new_node = HashTableEntry(key, value)
# check if there is a node at that index of the list
existing_node = self.list_hashtable[list_index]
if existing_node:
while existing_node:
#compare key and value at that index with the new_node
if existing_node.key == new_node.key:
#replace the value if it is already exist
existing_node.value = value
return
last_node = existing_node
existing_node = existing_node.next
#if we didn't find the value in the bucket
#put the value at the end of bucket
last_node.next = new_node
self.count +=1
else:
#store new_node at that index of the list
self.list_hashtable[list_index] = new_node
self.count +=1
load_factor = self.get_load_factor()
if load_factor >0.7:
self.resize(int(self.capacity *2))




def delete(self, key):
Expand All @@ -92,7 +142,25 @@ def delete(self, key):

Implement this.
"""
# Your code here
#calculate index to find string(key)in the list
list_index = self.hash_index(key)
# check if there is a node at that index of the list
existing_node = self.list_hashtable[list_index]
if existing_node:
#keep looking for the key in the bucket till the end
prev_node = None
while existing_node:

if existing_node.key == key:
if prev_node:
prev_node.next = existing_node.next
else:
self.list_hashtable[list_index] = existing_node.next
prev_node = existing_node
existing_node=existing_node.next
else:
print(f'waring: {key} not found')



def get(self, key):
Expand All @@ -102,8 +170,20 @@ def get(self, key):
Returns None if the key is not found.

Implement this.
"""
# Your code here
"""
#calculate index to find string(key)in the list
list_index = self.hash_index(key)
# check if there is a node at that index of the list
existing_node = self.list_hashtable[list_index]
if existing_node:
while existing_node:
#compare key and value at that index with the new_node
if existing_node.key == key:
return existing_node.value
existing_node = existing_node.next
else:
return None



def resize(self, new_capacity):
Expand All @@ -112,8 +192,20 @@ def resize(self, new_capacity):
rehashes all key/value pairs.

Implement this.
"""
# Your code here
"""
new_list = [None for i in range(new_capacity)]
old_capacity = self.capacity
self.capacity = new_capacity
old_list = self.list_hashtable
self.list_hashtable = new_list
for node in old_list:
if node:
self.put(node.key, node.value)








Expand All @@ -139,13 +231,16 @@ def resize(self, new_capacity):
for i in range(1, 13):
print(ht.get(f"line_{i}"))

load_factor = ht.get_load_factor()
print(load_factor)
# Test resizing
old_capacity = ht.get_num_slots()
ht.resize(ht.capacity * 2)
new_capacity = ht.get_num_slots()


load_factor = ht.get_load_factor()
print(f"\nResized from {old_capacity} to {new_capacity}.\n")

print(load_factor)
# Test if data intact after resizing
for i in range(1, 13):
print(ht.get(f"line_{i}"))
Expand Down