Skip to content
This repository was archived by the owner on May 25, 2022. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions .github/workflows/python-app.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions

name: Python application

on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

jobs:
build:

runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics

1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -169,3 +169,4 @@ SR No | Project | Author
82 | [Time To Load Website](https://github.com/chavarera/python-mini-projects/tree/master/projects/Time%20to%20load%20Website)| [Aditya Jetely](https://github.com/AdityaJ7)
83 | [Customer Loan Repayment Prediction](https://github.com/chavarera/python-mini-projects/tree/master/Notebooks/Customer_loan_repayment_problem)| [ART](https://github.com/Tomyzon1728)
84 | [Generate Wordcloud from Wikipedia Article](https://github.com/chavarera/python-mini-projects/tree/master/projects/Wikipedia%20Search%20Wordcloud)| [Naman Shah](https://github.com/namanshah01)
85 | [Number Guessing Game](https://github.com/chavarera/python-mini-projects/tree/master/projects/Number%20guessing%20game)| [Javokhirbek](https://github.com/leader2one)
1 change: 1 addition & 0 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,3 +89,4 @@ SR No | Project | Author
82 | [Time To Load Website](https://github.com/chavarera/python-mini-projects/tree/master/projects/Time%20to%20load%20Website)| [Aditya Jetely](https://github.com/AdityaJ7)
83 | [Customer Loan Repayment Prediction](https://github.com/chavarera/python-mini-projects/tree/master/Notebooks/Customer_loan_repayment_problem)| [ART](https://github.com/Tomyzon1728)
84 | [Generate Wordcloud from Wikipedia Article](https://github.com/chavarera/python-mini-projects/tree/master/projects/Wikipedia%20Search%20Wordcloud)| [Naman Shah](https://github.com/namanshah01)
85 | [Number Guessing Game](https://github.com/chavarera/python-mini-projects/tree/master/projects/Number%20guessing%20game)| [Javokhirbek](https://github.com/leader2one)
14 changes: 14 additions & 0 deletions projects/Number guessing game/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Number Guessing Game

This game allows you to check your luck and intuition :)
You should find the number computer guessed

### Usage
Just run "python main.py" in cmd command line after setting the project directory

### Here you can see sample
![Image](./image.png)

## *Author Name*

[Javokhir](https://github.com/leader2one/)
Binary file added projects/Number guessing game/image.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
54 changes: 54 additions & 0 deletions projects/Number guessing game/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import random

print("Number guessing game")

# randint function to generate the
# random number b/w 1 to 9
number = random.randint(1, 9)

# number of chances to be given
# to the user to guess the number
# or it is the inputs given by user
# into input box here number of
# chances are 5
chances = 0

print("Guess a number (between 1 and 9):")

# While loop to count the number
# of chances
while True:

# Enter a number between 1 to 9
guess = int(input())

# Compare the user entered number
# with the number to be guessed
if guess == number:

# if number entered by user
# is same as the generated
# number by randint function then
# break from loop using loop
# control statement "break"
print(
f'CONGRATULATIONS! YOU HAVE GUESSED THE \
NUMBER {number} IN {chances} ATTEMPTS!')
# Printing final statement using the f-strings method;
break

# Check if the user entered
# number is smaller than
# the generated number
elif guess < number:
print("Your guess was too low: Guess a number higher than", guess)

# The user entered number is
# greater than the generated
# number
else:
print("Your guess was too high: Guess a number lower than", guess)

# Increase the value of chance by 1
chances += 1

4 changes: 2 additions & 2 deletions projects/web page summation/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def writeCsv(data, LANGUAGE, SENTENCES_COUNT):
length = len(data)
position = data[0].index('website')
for i in range(1, length):
if i is 1:
if i == 1:
_data = data[0]
_data.append("summary")
newFileWriter.writerow(_data)
Expand Down Expand Up @@ -143,4 +143,4 @@ def main(argv=sys.argv):


if __name__ == '__main__':
main()
main()
2 changes: 1 addition & 1 deletion projects/web page summation/utils/model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import tensorflow as tf
from tensorflow.contrib import rnn
#from utils import get_init_embedding
from utils import get_init_embedding


class Model(object):
Expand Down
30 changes: 1 addition & 29 deletions projects/web page summation/utils/prepare.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,6 @@

import wget
import os
import tarfile
import gzip
import zipfile
import argparse


#parser = argparse.ArgumentParser()
#parser.add_argument("--glove", action="store_true")
#args = parser.parse_args()

# Extract data file
#with tarfile.open(default_path + "sumdata/train/summary.tar.gz", "r:gz") as tar:
# tar.extractall()
default_path = '.'

with gzip.open(default_path + "sumdata/train/train.article.txt.gz", "rb") as gz:
with open(default_path + "sumdata/train/train.article.txt", "wb") as out:
Expand All @@ -22,18 +9,3 @@
with gzip.open(default_path + "sumdata/train/train.title.txt.gz", "rb") as gz:
with open(default_path + "sumdata/train/train.title.txt", "wb") as out:
out.write(gz.read())


#if args.glove:
# glove_dir = "glove"
# glove_url = "https://nlp.stanford.edu/data/wordvecs/glove.42B.300d.zip"
#
# if not os.path.exists(glove_dir):
# os.mkdir(glove_dir)
#
# # Download glove vector
# wget.download(glove_url, out=glove_dir)
#
# # Extract glove file
# with zipfile.ZipFile(os.path.join("glove", "glove.42B.300d.zip"), "r") as z:
# z.extractall(glove_dir)
37 changes: 15 additions & 22 deletions projects/web page summation/utils/summarize.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
# -*- coding: utf-8 -*-
# load Dependancies

from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
Expand All @@ -13,25 +9,22 @@


def summarize(url=None, LANGUAGE='English', SENTENCES_COUNT=2):
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
# or for plain text files
# parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)

summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
result = ''
for sentence in summarizer(parser.document, SENTENCES_COUNT):
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
result = ''
for sentence in summarizer(parser.document, SENTENCES_COUNT):
result = result + ' ' + str(sentence)
try:
result = result + ' ' + str(sentence)
try:
result = result + ' ' + str(sentence)

except:
print(
except:
print(
'\n\n Invalid Entry!, please Ensure you enter a valid web link \n\n')
sys.stdout.flush()
return (
sys.stdout.flush()
return (
'\n\n Invalid Entry!, please Ensure you enter a valid web link \n\n')
print('\n\n'+str(url)+'\n\n'+str(result))
sys.stdout.flush()
return result
print('\n\n'+str(url)+'\n\n'+str(result))
sys.stdout.flush()
return result
15 changes: 6 additions & 9 deletions projects/web page summation/utils/test.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
import tensorflow as tf
import pickle
#from model import Model
#from utils import build_dict, build_dataset, batch_iter


# with open("args.pickle", "rb") as f:
# args = pickle.load(f)

from model import Model
from utils import build_dict, build_dataset, batch_iter, get_text_list
valid_article_path = '.'
valid_title_path = '.'
tf.reset_default_graph()
default_path = '.'


class args:
Expand Down Expand Up @@ -74,6 +71,6 @@ class args:
if word not in summary:
summary.append(word)
summary_array.append(" ".join(summary))
#print(" ".join(summary), file=f)
# print(" ".join(summary), file=f)

print('Summaries have been generated')
48 changes: 14 additions & 34 deletions projects/web page summation/utils/train.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,10 @@
import os
import pickle
import argparse
import tensorflow as tf
import time
from model import Model
from utils import build_dict, build_dataset, batch_iter
start = time.perf_counter()
#from model import Model
#from utils import build_dict, build_dataset, batch_iter

# Uncomment next 2 lines to suppress error and Tensorflow info verbosity. Or change logging levels
# tf.logging.set_verbosity(tf.logging.FATAL)
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# def add_arguments(parser):
# parser.add_argument("--num_hidden", type=int, default=150, help="Network size.")
# parser.add_argument("--num_layers", type=int, default=2, help="Network depth.")
# parser.add_argument("--beam_width", type=int, default=10, help="Beam width for beam search decoder.")
# parser.add_argument("--glove", action="store_true", help="Use glove as initial word embedding.")
# parser.add_argument("--embedding_size", type=int, default=300, help="Word embedding size.")
#
# parser.add_argument("--learning_rate", type=float, default=1e-3, help="Learning rate.")
# parser.add_argument("--batch_size", type=int, default=64, help="Batch size.")
# parser.add_argument("--num_epochs", type=int, default=10, help="Number of epochs.")
# parser.add_argument("--keep_prob", type=float, default=0.8, help="Dropout keep prob.")
#
# parser.add_argument("--toy", action="store_true", help="Use only 50K samples of data")
#
# parser.add_argument("--with_model", action="store_true", help="Continue from previously saved model")
default_path = '.'


class args:
Expand All @@ -48,20 +27,16 @@ class args:
args.with_model = "store_true"


#parser = argparse.ArgumentParser()
# add_arguments(parser)
#args = parser.parse_args()
# with open("args.pickle", "wb") as f:
# pickle.dump(args, f)

if not os.path.exists(default_path + "saved_model"):
os.mkdir(default_path + "saved_model")
else:
# if args.with_model:
old_model_checkpoint_path = open(
default_path + 'saved_model/checkpoint', 'r')
old_model_checkpoint_path = "".join(
[default_path + "saved_model/", old_model_checkpoint_path.read().splitlines()[0].split('"')[1]])
[
default_path + "saved_model/",
old_model_checkpoint_path.read().splitlines()[0].split('"')[1]])


print("Building dictionary...")
Expand Down Expand Up @@ -98,9 +73,13 @@ class args:
map(lambda x: list(x) + [word_dict["</s>"]], batch_y))

batch_decoder_input = list(
map(lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]], batch_decoder_input))
map(
lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]],
batch_decoder_input))
batch_decoder_output = list(
map(lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]], batch_decoder_output))
map(
lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]],
batch_decoder_output))

train_feed_dict = {
model.batch_size: len(batch_x),
Expand All @@ -112,7 +91,8 @@ class args:
}

_, step, loss = sess.run(
[model.update, model.global_step, model.loss], feed_dict=train_feed_dict)
[model.update,
model.global_step, model.loss], feed_dict=train_feed_dict)

if step % 1000 == 0:
print("step {0}: loss = {1}".format(step, loss))
Expand Down
20 changes: 11 additions & 9 deletions projects/web page summation/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@
import collections
import pickle
import numpy as np
from newspaper import Article
from nltk.tokenize import word_tokenize
from gensim.models.keyedvectors import KeyedVectors
from gensim.test.utils import get_tmpfile
from gensim.scripts.glove2word2vec import glove2word2vec
default_path = '.'
train_article_path = '.'
train_title_path = '.'
valid_article_path = '.'


def clean_str(sentence):
sentence = re.sub("[#.]+", "#", sentence)
Expand Down Expand Up @@ -55,7 +56,8 @@ def build_dict(step, toy=False):
return word_dict, reversed_dict, article_max_len, summary_max_len


def build_dataset(step, word_dict, article_max_len, summary_max_len, toy=False):
def build_dataset(
step, word_dict, article_max_len, summary_max_len, toy=False):
if step == "train":
article_list = get_text_list(train_article_path, toy)
title_list = get_text_list(train_title_path, toy)
Expand Down Expand Up @@ -91,11 +93,11 @@ def batch_iter(inputs, outputs, batch_size, num_epochs):


def get_init_embedding(reversed_dict, embedding_size):
#glove_file = default_path + "glove/glove.6B.300d.txt"
#word2vec_file = get_tmpfile(default_path + "word2vec_format.vec")
#glove2word2vec(glove_file, word2vec_file)
# glove_file = default_path + "glove/glove.6B.300d.txt"
# word2vec_file = get_tmpfile(default_path + "word2vec_format.vec")
# glove2word2vec(glove_file, word2vec_file)
print("Loading Glove vectors...")
#word_vectors = KeyedVectors.load_word2vec_format(word2vec_file)
# word_vectors = KeyedVectors.load_word2vec_format(word2vec_file)

with open(default_path + "glove/model_glove_300.pkl", 'rb') as handle:
word_vectors = pickle.load(handle)
Expand Down
Loading