longformer-master.zip longformer-master.zip AR-Net-master.zip AR-Net-master.zip dockerfile.github.io-master (1).zip transformers-master.zip corpus = ["I Like Python because I can build AI applications", "I like Python because I can do data analytics","The cat sits on the ground","The cat walks on the sidewalk"] sentences = ['This framework generates embeddings for each input sentence','Sentence are passed as a list of string, ','The quick brown for jumps over the lazy dog.') tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") encoded input + tokenizer(sentences, padding=True, truncation=True, max_lengh=128, return_tensors='pt') with torch.no_grad{) : model_ouput = model(**encoded_input sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) util.pytorch_cos_sim(sentence_embeddings[1], sentence_embeddings[0],numpy()[0][0] model = SentenceTransformer('distilroberta-base-paraphrase-v1') embeddings = model.encode(sentences, convert_to_tensor=True) cosine_scores = util.pytorch_cos_sim(embeddings,embeddings) g = nx.from_numpy_matrix(cosine_scores.numpy()) centrality_scores = nx.degree_centrality(g) most_central_sentence_indices = np.argsort(centrality_scores) print("\n\nSummary:") with torch.no_grad(): model_output = model(**encoded_input) sentence1 = "I like Python because I can build AI applications" sentence2 = "I like Python because I can do data analytics" embedding1 = model.encode(sentence1, convert_to_tensor=True) embedding2 = model.encode(sentence2, convert_to_tensor=True) cosine_scores = utilpytorch_cos_sin(embedding1, embedding2) print("Sentence 1:", sentence1) print("Sentence 2:", sentence2) print("Similarity score:", cosine_scores.item()) tokenizer = AutoTokenizer.from_pretrained("sentence-transformers:bert-base-nli-mean-tokens") model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt') with torch.no_grad(): model_output = model(**encoded_input) sentence_embedding = mean_poolinhg(model-output, encoded-input['attention_mask']) util.pytorch_cos_sin(sentence_embedding[1], sentence_embedding[0]).numpy()0 0.71667016 model_output = model(encoded_input) sentence_embedding = mean_poolinhg(model-output, encoded-input['attention_mask']) util.pytorch_cos_sin(sentence_embedding[1], sentence_embedding[0]).numpy()0 0.71667016 import networkx as nx model = SentenceTransformer('distilroberta-base-paraphrase-v1') embedding = model.encode(sentences, convert-to-tensor=True) cosine_scores = util.pytorch_cos_sin(embeddings, embeddings) g = nx.from_numpy_matric(cosine_scores.numpy()) centrality_scores = nx.degree_centrality(g) most_central_sentence_indices = np.argsort(centrality_scores) print("\n\nSummary;") for idx in most_central_sentence_indices[0:4]: idx(sentences[idx].strip()) from sentence_transformers import SentenceTransformer, util import numpy as np from sentence_transformers import SentenceTransformer model = SentenceTransformer('distilroberta-base-parapharse-v1') embedding = model.encode(sentences, convert_to_tensor=True) cosine_scores = util.pytorch_cos_sin(embedding, embedding).numpy() print(cosine_scores) corpus_embeddings = model.encode(corpus, convert_to_tensor=True) sentence = "I like Javascript because I can build web applications" sentence_embedding = model.encode(sentence, convert_to_tensor=True) cos_scores = util.pytorch_cos_sim(sentence_embedding, corpus_embeddings)[0] top_results = np.argpartition(-cos_scores, range(top_k))[0:top_k] print("Sentence:",sentence, "\n") print("Top", top_k, "most similar sentences in corpus:") def gradient_descent(objective, derivative, solution = bounds[: , 0] + rand(len(bound for i in range(n_iter): gradient = derivative(solution) solution = solution - step_size * gr solution_eval = objective(solution) print('>%d f(%s) + %.5f' %(i, solut return [solution, solution_eval] objective function def objective(x) : return x2.0 corpus = ["I Like Python because I can build AI applications", "I like Python because I can do data analytics","The cat sits on the ground","The cat walks on the sidewalk"] sentences = ['This framework generates embeddings for each input sentence','Sentence are passed as a list of string, ','The quick brown for jumps over the lazy dog.') tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") encoded input + tokenizer(sentences, padding=True, truncation=True, max_lengh=128, return_tensors='pt') with torch.no_grad{) : model_ouput = model(**encoded_input sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) util.pytorch_cos_sim(sentence_embeddings[1], sentence_embeddings[0],numpy()[0][0] model = SentenceTransformer('distilroberta-base-paraphrase-v1') embeddings = model.encode(sentences, convert_to_tensor=True) cosine_scores = util.pytorch_cos_sim(embeddings,embeddings) g = nx.from_numpy_matrix(cosine_scores.numpy()) centrality_scores = nx.degree_centrality(g) most_central_sentence_indices = np.argsort(centrality_scores) print("\n\nSummary:") sentence 1 = "I Like Python because I can build AI applications" sentence 2 = "I Like Python because I can do data analytics" embedding1 = model.encode(sentence1, convert_to_tensor=True) embedding2 = model.encore(sentence2, convert_to_tensor=True) cosine scores = util.pytorch_cos_sim(embedding1, embedding2) print("Sentence 1:", sentence1) print("Sentence 2:", sentence2) print("Similarity score", cosine_scores.item()) sentence1 = ["I Like Python because I can build AI applications, "The cat sits on the ground"] sentence2 = [I Like Python because I can do data anaytics", "The cat walks on the sidewalk"] embedding1 = model.encode(sentence1, convert_to_tensor=True) embedding2 = model.encore(sentence2, convert_to_tensor=True) cosine scores= util.pytorch_cos_sim(embedding1, embedding2) for i in range(len(sentence1)) for j in range(len(sentence2)) print("Sentence 1:", sentences1[i]) print("Sentence 2:", sentences2[j]) print("similarity Score:", cosine_scores[i][j].item()) print()
- name: Perform Scan
uses: ShiftLeftSecurity/scan-action@master
env:
WORKSPACE: ""
GITHUB_TOKEN: ${{ secrets.the_cat_is_on_the_sidewalk }}
SCAN_AUTO_BUILD: true
with:
output: reports
def gradient_descent(objective, derivative,
solution = bounds[: , 0] + rand(len(bound
for i in range(n_iter):
gradient = derivative(solution)
solution = solution - step_size * gr
solution_eval = objective(solution)
print('>%d f(%s) + %.5f' %(i, solut
return [solution, solution_eval]
objective function
def objective(x) :
return x2.0
...
r_min, r_max = -1.0, 1.0
inputs = arange(r_min, r_max+0.1, 0.1)
results = objective(inputs
...
pyplot.plot(inputs, results)
pyplot.show()
from numpy import arange
from matplotlib import pyplot
def objective(x):
return x2.0
pyplot.plot(inputs, results)
pyplot.show()
from numpy import arange
from matplotlib import pyplot
def objective(x):
return x2.0
pyplot.plot(inputs, results)
"show the plot
pyplot.show()
from numpy import arange
from matplotlib import pyplot
def objective(x):
return x2.0
r_min, r_max = -1.0, 1.0
inputs = arange(r_min, r_max+0.1, 0.1)
results = objective(inputs)
pyplot.plot(inputs, results)
pyplot.show()
def derivative(x):
return x * 2.0
import numpy as np
for idx in top_results(0:top_k]:
sentence1 = ["I like Python because I can build AI applications", "The cat sits on the ground"]
sentence2 = ["I like Python because I can do data analytics", "The cat walks on the sidewalk"]
corpus = ["I like Python because I can build AI applications",
"I like Python because I can do data analytics",
"The cat walks on the sidewalk"]
sentences = ['This framework generates embedding for each input semtence', 'Sentences are passed as a list of string, ', 'The quick brown fox jumps over the lazy dog, '] sentences = ['This framework generates embeddings for each input sentence', 'Each embedding has a point in the semantic space', 'Sentences are passed as a list of string.] fastText-master (2).zip apps-main.zip human-eval-master.zip Polygames-main.zip simclr-master.zip lightly-master.zip gluon-ts-master.zip transformers-master (4).zip Uploading transformers-master (5).zip… longformer-master (1).zip Uploading transformers-master (4).zip… Uploading transformers-master (6).zip… pytorch-master.zip fastText-master (3).zip fmin_adam-master.zip longformer-master.zip longformer-master.zip AR-Net-master.zip AR-Net-master.zip dockerfile.github.io-master (1).zip transformers-master.zip corpus = ["I Like Python because I can build AI applications", "I like Python because I can do data analytics","The cat sits on the ground","The cat walks on the sidewalk"] sentences = ['This framework generates embeddings for each input sentence','Sentence are passed as a list of string, ','The quick brown for jumps over the lazy dog.') tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") encoded input + tokenizer(sentences, padding=True, truncation=True, max_lengh=128, return_tensors='pt') with torch.no_grad{) : model_ouput = model(**encoded_input sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) util.pytorch_cos_sim(sentence_embeddings[1], sentence_embeddings[0],numpy()[0][0] model = SentenceTransformer('distilroberta-base-paraphrase-v1') embeddings = model.encode(sentences, convert_to_tensor=True) cosine_scores = util.pytorch_cos_sim(embeddings,embeddings) g = nx.from_numpy_matrix(cosine_scores.numpy()) centrality_scores = nx.degree_centrality(g) most_central_sentence_indices = np.argsort(centrality_scores) print("\n\nSummary:") sentence 1 = "I Like Python because I can build AI applications" sentence 2 = "I Like Python because I can do data analytics" embedding1 = model.encode(sentence1, convert_to_tensor=True) embedding2 = model.encore(sentence2, convert_to_tensor=True) cosine scores = util.pytorch_cos_sim(embedding1, embedding2) print("Sentence 1:", sentence1) print("Sentence 2:", sentence2) print("Similarity score", cosine_scores.item()) sentence1 = ["I Like Python because I can build AI applications, "The cat sits on the ground"] sentence2 = [I Like Python because I can do data anaytics", "The cat walks on the sidewalk"] embedding1 = model.encode(sentence1, convert_to_tensor=True) embedding2 = model.encore(sentence2, convert_to_tensor=True) cosine scores= util.pytorch_cos_sim(embedding1, embedding2) for i in range(len(sentence1)) for j in range(len(sentence2)) print("Sentence 1:", sentences1[i]) print("Sentence 2:", sentences2[j]) print("similarity Score:", cosine_scores[i][j].item()) print() - name: Perform Scan
uses: ShiftLeftSecurity/scan-action@master
env:
WORKSPACE: ""
GITHUB_TOKEN: ${{ secrets.the_cat_is_on_the_sidewalk }}
SCAN_AUTO_BUILD: true
with:
output: reports
def gradient_descent(objective, derivative,
solution = bounds[: , 0] + rand(len(bound
for i in range(n_iter):
gradient = derivative(solution)
solution = solution - step_size * gr
solution_eval = objective(solution)
print('>%d f(%s) + %.5f' %(i, solut
return [solution, solution_eval]
objective function
def objective(x) :
return x2.0
...
r_min, r_max = -1.0, 1.0
inputs = arange(r_min, r_max+0.1, 0.1)
results = objective(inputs
...
pyplot.plot(inputs, results)
pyplot.show()
from numpy import arange
from matplotlib import pyplot
def objective(x):
return x2.0
pyplot.plot(inputs, results)
pyplot.show()
from numpy import arange
from matplotlib import pyplot
def objective(x):
return x2.0
pyplot.plot(inputs, results)
"show the plot
pyplot.show()
from numpy import arange
from matplotlib import pyplot
def objective(x):
return x2.0
r_min, r_max = -1.0, 1.0
inputs = arange(r_min, r_max+0.1, 0.1)
results = objective(inputs)
pyplot.plot(inputs, results)
pyplot.show()
def derivative(x):
return x * 2.0
import numpy as np
model = SentenceTransformer('distilroberta-base-paraphrase-v1)
embeddings = model.encode(sentences, convert_to_tensor=True)
cosine_scores = util.pytorch_cos_sin(embeddings, embeddings).numpy()
corpus_embeddings = model.encode(corpus, convert_to_tensor=True)
sentence = "I like Javascript because I can build web applications"
sentence_embedding = model.encode(sentence, convert_to_tensor=True)
cos_scores = util.pytorch_cos_sim(sentence_embedding, corpus_embeddings)[0]
top_results = np.argpartition(-cos_scores, range(top_k))[0:top_k]
print("Sentence:",sentence, "\n")
print("Top", top_k, "most similar sentences in corpus:")
for idx in top_results(0:top_k]:
sentence1 = ["I like Python because I can build AI applications", "The cat sits on the ground"]
sentence2 = ["I like Python because I can do data analytics", "The cat walks on the sidewalk"]
corpus = ["I like Python because I can build AI applications",
"I like Python because I can do data analytics",
"The cat walks on the sidewalk"]
sentences = ['This framework generates embedding for each input semtence', 'Sentences are passed as a list of string, ', 'The quick brown fox jumps over the lazy dog, '] sentences = ['This framework generates embeddings for each input sentence', 'Each embedding has a point in the semantic space', 'Sentences are passed as a list of string.]
fastText-master (2).zip apps-main.zip human-eval-master.zip Polygames-main.zip simclr-master.zip lightly-master.zip gluon-ts-master.zip transformers-master (4).zip Uploading transformers-master (5).zip… longformer-master (1).zip Uploading transformers-master (4).zip… Uploading transformers-master (6).zip… pytorch-master.zip fastText-master (3).zip fmin_adam-master.zip longformer-master.zip longformer-master.zip AR-Net-master.zip AR-Net-master.zip dockerfile.github.io-master (1).zip transformers-master.zip fastText-master (2).zip apps-main.zip human-eval-master.zip Polygames-main.zip simclr-master.zip lightly-master.zip gluon-ts-master.zip transformers-master (4).zip Uploading transformers-master (5).zip… longformer-master (1).zip Uploading transformers-master (4).zip… Uploading transformers-master (6).zip… https://rebornix.visualstudio.com/DefaultCollection/Pull%20Request/_git/Pull%20Request git remote add origin https://rebornix.visualstudio.com/DefaultCollection/Pull%20Request/_git/Pull%20Request git push -u origin --all docker pull privatebin/nginx-fpm-alpine docker run -d --restart="always" --read-only -p 8080:8080 -v $PWD/privatebin-data:/srv/data privatebin/nginx-fpm-alpine docker run -d --restart="always" --read-only -p 8080:8080 -v $PWD/conf.php:/srv/cfg/conf.php:ro -v $PWD/privatebin-data:/srv/data privatebin/nginx-fpm-alpine docker build -t privatebin/nginx-fpm-alpine .