Skip to content

Commit

Permalink
Updated the submodule and added some new functions
Browse files Browse the repository at this point in the history
  • Loading branch information
iampradiptaghosh committed Mar 10, 2018
1 parent 115a198 commit f829d7f
Show file tree
Hide file tree
Showing 3 changed files with 271 additions and 1 deletion.
186 changes: 186 additions & 0 deletions scripts/k8s_get_service_ips.py
@@ -0,0 +1,186 @@
"""
* Copyright (c) 2018, Autonomous Networks Research Group. All rights reserved.
* contributors:
* Pradipta Ghosh
* Bhaskar Krishnamachari
* Read license file in main directory for more details
"""

import sys
sys.path.append("../")
import jupiter_config
sys.path.append(jupiter_config.CIRCE_PATH)

from readconfig import *
import yaml
from kubernetes import client, config
from pprint import *
from kubernetes.client.apis import core_v1_api
from kubernetes.client.rest import ApiException

"""
This function prints out all the profilers that are not running.
If all the profilers are running: return True; else return False.
"""
def get_all_profilers():

"""
This loads the node lists in use
"""
mapping = {}
path1 = jupiter_config.HERE + 'nodes.txt'
nodes = read_node_list(path1)

"""
This loads the kubernetes instance configuration.
In our case this is stored in admin.conf.
You should set the config file path in the jupiter_config.py file.
"""
config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)

"""
Loop through the list of nodes and deletes the all profiler related k8 deployment, replicaset, pods, and service.
The deletion should follow this particular order for a proper removal.
You can always check if a service/pod/deployment is running after running this script via kubectl command.
E.g.,
kubectl get svc -n "namespace name"
kubectl get deployement -n "namespace name"
kubectl get replicaset -n "namespace name"
kubectl get pod -n "namespace name"
"""
for key in nodes:

# We have defined the namespace for deployments in jupiter_config
namespace = jupiter_config.PROFILER_NAMESPACE

# Get proper handles or pointers to the k8-python tool to call different functions.
api = client.ExtensionsV1beta1Api()
body = client.V1DeleteOptions()

# First check if there is a exisitng profiler deployment with
# the name = key in the respective namespace
label = "app=" + key + "profiler"

resp = None
api_2 = client.CoreV1Api()
try:
resp = api_2.read_namespaced_service(key, namespace)
except ApiException as e:
print("Exception Occurred")
# if a service is running, kill it
if resp:
# print(resp.spec.cluster_ip)
mapping[key] = resp.spec.cluster_ip
return mapping

# At this point you should not have any of the profiler related service, pod, or deployment running

def get_all_waves():

mapping = {}

"""
This loads the node lists in use
"""
path1 = jupiter_config.HERE + 'nodes.txt'
nodes = read_node_list(path1)

"""
This loads the kubernetes instance configuration.
In our case this is stored in admin.conf.
You should set the config file path in the jupiter_config.py file.
"""
config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)

"""
Loop through the list of nodes and deletes the all profiler related k8 deployment, replicaset, pods, and service.
The deletion should follow this particular order for a proper removal.
You can always check if a service/pod/deployment is running after running this script via kubectl command.
E.g.,
kubectl get svc -n "namespace name"
kubectl get deployement -n "namespace name"
kubectl get replicaset -n "namespace name"
kubectl get pod -n "namespace name"
"""
for key in nodes:

# We have defined the namespace for deployments in jupiter_config
namespace = jupiter_config.WAVE_NAMESPACE

# Get proper handles or pointers to the k8-python tool to call different functions.
api = client.ExtensionsV1beta1Api()
body = client.V1DeleteOptions()

# First check if there is a exisitng profiler deployment with
# the name = key in the respective namespace
label = "app=wave_" + key

resp = None
api_2 = client.CoreV1Api()
try:
resp = api_2.read_namespaced_service(key, namespace)
except ApiException as e:
print("Exception Occurred")
# if a service is running, kill it
if resp:
# print(resp.spec.cluster_ip)
mapping[key] = resp.spec.cluster_ip
return mapping
# At this point you should not have any of the profiler related service, pod, or deployment running

def get_all_execs():

mapping = {}

"""
This loads the node lists in use
"""
path1 = jupiter_config.HERE + 'nodes.txt'
nodes = read_node_list(path1)

"""
This loads the kubernetes instance configuration.
In our case this is stored in admin.conf.
You should set the config file path in the jupiter_config.py file.
"""
config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)

"""
Loop through the list of nodes and deletes the all profiler related k8 deployment, replicaset, pods, and service.
The deletion should follow this particular order for a proper removal.
You can always check if a service/pod/deployment is running after running this script via kubectl command.
E.g.,
kubectl get svc -n "namespace name"
kubectl get deployement -n "namespace name"
kubectl get replicaset -n "namespace name"
kubectl get pod -n "namespace name"
"""

# We have defined the namespace for deployments in jupiter_config
namespace = jupiter_config.EXEC_NAMESPACE

# Get proper handles or pointers to the k8-python tool to call different functions.
api = client.ExtensionsV1beta1Api()
body = client.V1DeleteOptions()

# First check if there is a exisitng profiler deployment with
# the name = key in the respective namespace
key = "home"

resp = None
api_2 = client.CoreV1Api()
try:
resp = api_2.read_namespaced_service(key, namespace)
except ApiException as e:
print("Exception Occurred")
# if a service is running, kill it
if resp:
print(resp.spec.cluster_ip)
mapping[key] = resp.spec.cluster_ip

# At this point you should not have any of the profiler related service, pod, or deployment running
return mapping


if __name__ == '__main__':
print(get_all_execs())
84 changes: 84 additions & 0 deletions scripts/utilities.py
@@ -0,0 +1,84 @@
"""
* Copyright (c) 2018, Autonomous Networks Research Group. All rights reserved.
* contributors:
* Pradipta Ghosh
* Pranav Sakulkar
* Jason A Tran
* Bhaskar Krishnamachari
* Read license file in main directory for more details
"""

import sys
import jupiter_config
import time
import os
from os import path
from multiprocessing import Process
# from readconfig import k8s_read_config, read_config
from k8s_profiler_scheduler import *
from k8s_wave_scheduler import *
from k8s_circe_scheduler import *
from delete_all_circe_deployments import *
from delete_all_profilers import *
from delete_all_waves import *
from pprint import *
import jupiter_config
import requests
import json
from pprint import *

"""
read the dag from the file input
"""
def k8s_read_dag(dag_info_file):

dag_info=[]
config_file = open(dag_info_file,'r')
dag_size = int(config_file.readline())

dag={}
for i, line in enumerate(config_file, 1):
dag_line = line.strip().split(" ")
if i == 1:
dag_info.append(dag_line[0])
dag.setdefault(dag_line[0], [])
for j in range(1,len(dag_line)):
dag[dag_line[0]].append(dag_line[j])
if i == dag_size:
break

dag_info.append(dag)
return dag_info


def k8s_get_nodes(node_info_file):

nodes = {}
node_file = open(node_info_file, "r")
for line in node_file:
node_line = line.strip().split(" ")
nodes.setdefault(node_line[0], [])
for i in range(1, len(node_line)):
nodes[node_line[0]].append(node_line[i])
return nodes


def k8s_get_hosts(dag_info_file, node_info_file, mapping):

dag_info = k8s_read_dag(dag_info_file)
nodes = k8s_get_nodes(node_info_file)
print(nodes)
hosts={}

for i in mapping:
#get task, node IP, username and password
print(i, mapping[i], nodes[mapping[i]])
hosts.setdefault(i,[])
hosts[i].append(i) # task
hosts[i].extend(nodes[mapping[i]]) # assigned node id

hosts.setdefault('home',[])
hosts['home'].append('home')
hosts['home'].extend(nodes.get('home'))
dag_info.append(hosts)
return dag_info

0 comments on commit f829d7f

Please sign in to comment.