diff --git a/Model Code/Model.py b/Model Code/Model.py new file mode 100644 index 0000000..fd6bee8 --- /dev/null +++ b/Model Code/Model.py @@ -0,0 +1,316 @@ +import argparse +import numpy as np +import pandas as pd +import joblib +import tensorflow as tf +import time +import os +import warnings +import random +warnings.filterwarnings('ignore') + +# Define attack category mapping based on the provided table +ATTACK_CATEGORIES = { + 'Benign': 'Normal', + 'DrDoS_MSSQL': 'TCP-based Reflection DDoS', + 'DrDoS_DNS': 'TCP/UDP-based reflection DDoS', + 'DrDoS_LDAP': 'TCP/UDP-based reflection DDoS', + 'DrDoS_NetBIOS': 'TCP/UDP-based reflection DDoS', + 'DrDoS_SNMP': 'TCP/UDP-based reflection DDoS', + 'DrDoS_Portmap': 'TCP/UDP-based reflection DDoS', + 'CharGen': 'UDP-based Reflection DDoS', + 'DrDoS_NTP': 'UDP-based Reflection DDoS', + 'DrDoS_TFTP': 'UDP-based Reflection DDoS', + 'DDoS_SYN': 'TCP-based Exploitation DDoS', + 'DDoS_Web': 'TCP-based Exploitation DDoS', + 'DDoS_UDP_Lag': 'UDP-based Exploitation DDoS', + 'DrDoS_UDP': 'UDP-based Reflection DDoS', + 'DDoS_UDP': 'UDP-based Exploitation DDoS' +} + +# Define protocol mapping +PROTOCOL_MAP = { + 'TCP': 6, + 'UDP': 17 +} + +# Map attack types to protocols +ATTACK_PROTOCOLS = { + 'Benign': 'TCP', + 'DrDoS_MSSQL': 'TCP', + 'DrDoS_DNS': 'UDP', + 'DrDoS_LDAP': 'TCP', + 'DrDoS_NetBIOS': 'UDP', + 'DrDoS_SNMP': 'UDP', + 'DrDoS_Portmap': 'UDP', + 'CharGen': 'UDP', + 'DrDoS_NTP': 'UDP', + 'DrDoS_TFTP': 'UDP', + 'DDoS_SYN': 'TCP', + 'DDoS_Web': 'TCP', + 'DDoS_UDP_Lag': 'UDP', + 'DrDoS_UDP': 'UDP', + 'DDoS_UDP': 'UDP' +} +ATTACK_DESCRIPTIONS = { + # TCP-based Reflection DDoS + 'DrDoS_MSSQL': 'This is a TCP-based reflection DDoS attack that leverages exposed MSSQL servers to reflect and amplify traffic toward the victim. Restrict MSSQL access to trusted IPs, use firewalls, and apply rate-limiting on port 1433.', + + # TCP/UDP-based Reflection DDoS + 'DrDoS_DNS': 'This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic.', + 'DrDoS_LDAP': 'This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic.', + 'DrDoS_NetBIOS': 'This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic.', + 'DrDoS_SNMP': 'This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic.', + 'DrDoS_Portmap': 'This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic.', + + # UDP-based Reflection DDoS + 'CharGen': 'This is a UDP-based reflection DDoS attack where services like CharGen, NTP, TFTP, and generic UDP reflect large responses to spoofed requests. Disable unused UDP services, restrict access, and implement anti-spoofing measures.', + 'DrDoS_NTP': 'This is a UDP-based reflection DDoS attack where services like CharGen, NTP, TFTP, and generic UDP reflect large responses to spoofed requests. Disable unused UDP services, restrict access, and implement anti-spoofing measures.', + 'DrDoS_TFTP': 'This is a UDP-based reflection DDoS attack where services like CharGen, NTP, TFTP, and generic UDP reflect large responses to spoofed requests. Disable unused UDP services, restrict access, and implement anti-spoofing measures.', + 'DrDoS_UDP': 'This is a UDP-based reflection DDoS attack where services like CharGen, NTP, TFTP, and generic UDP reflect large responses to spoofed requests. Disable unused UDP services, restrict access, and implement anti-spoofing measures.', + + # UDP-based Exploitation DDoS + 'DDoS_UDP': 'This is a UDP-based exploitation DDoS attack where direct UDP floods overwhelm target systems with traffic. Use rate-limiting, deploy DDoS protection services, and implement traffic filtering at the perimeter.', + 'DDoS_UDP_Lag': 'This is a UDP-based exploitation DDoS attack where direct UDP floods overwhelm target systems with traffic. Use rate-limiting, deploy DDoS protection services, and implement traffic filtering at the perimeter.', + + # TCP-based Exploitation DDoS + 'DDoS_SYN': 'This is a TCP-based exploitation DDoS attack exploiting connection-based protocols (e.g., SYN floods, HTTP floods) to exhaust server resources. Enable SYN cookies, use stateful firewalls, and apply rate-limiting.', + 'DDoS_Web': 'This is a TCP-based exploitation DDoS attack exploiting connection-based protocols (e.g., SYN floods, HTTP floods) to exhaust server resources. Enable SYN cookies, use stateful firewalls, and apply rate-limiting.', + + # Benign + 'Benign': 'Normal network traffic exhibiting no signs of malicious behavior.' +} + + +def remove_columns(df): + print("Removing specified columns...") + + # Get the column names to drop (by index position) + columns_to_drop = df.columns[[0, 1, 2, 3, 4, 6]] + print(f"Columns being removed: {columns_to_drop.tolist()}") + + # Drop the columns + df_reduced = df.drop(columns=columns_to_drop) + + print(f"Original column count: {df.shape[1]}") + print(f"New column count: {df_reduced.shape[1]}") + + return df_reduced + +def preprocess_data(df, scaler, feature_cols): + """ + Apply preprocessing steps to the input dataframe + """ + print(f"Original data shape: {df.shape}") + + # Keep only required features + df_features = df[feature_cols].copy() + + # Handle missing values + print(f"Missing values before handling: {df_features.isnull().sum().sum()}") + df_features = df_features.fillna(0) # Fill NA with zeros + + # Convert all feature columns to numeric + df_features = df_features.apply(pd.to_numeric, errors='coerce') + + # Replace any remaining NA values after conversion + df_features = df_features.fillna(0) + + # Handle infinite values + inf_mask = np.isinf(df_features.values).any(axis=1) + print(f"Found {inf_mask.sum()} rows with infinite values") + + # Replace inf values with large finite values + df_features = df_features.replace([np.inf, -np.inf], np.finfo(np.float64).max) + + # Apply the same scaling as during training + df_scaled = pd.DataFrame( + scaler.transform(df_features), + columns=feature_cols + ) + + print(f"Processed data shape: {df_scaled.shape}") + return df_scaled + +def generate_random_ip(): + """Generate a random internal IP address for demonstration""" + return f"192.168.{random.randint(1, 254)}.{random.randint(1, 254)}" + +def generate_random_port(): + """Generate a random port from specified options""" + return random.choice([80, 12345, 12543]) + +def write_attack_info(attack_counts, output_dir="data"): + """Write attack information to a single text file""" + # Create output directory if it doesn't exist + os.makedirs(output_dir, exist_ok=True) + + # Generate a unique filename with timestamp + timestamp = int(time.time()) + # filename = f"{output_dir}/attack_summary_{timestamp}.txt" + filename = f"{output_dir}/attack_summary.txt" + + with open(filename, 'w') as f: + # Write a header + # f.write("==== DDoS Attack Detection Summary ====\n\n") + # f.write(f"Report generated: {time.ctime()}\n\n") + + # Write summary for each attack type + for attack_type, count in attack_counts.items(): + # Determine if this is an anomaly or benign + is_anomaly = "Benign" if attack_type == "Benign" else "Anomaly" + + # Get attack category + attack_category = ATTACK_CATEGORIES.get(attack_type, "Unknown") + + # Determine protocol + protocol_name = ATTACK_PROTOCOLS.get(attack_type, "TCP") + protocol_num = PROTOCOL_MAP.get(protocol_name, 6) + + # Generate description + description = ATTACK_DESCRIPTIONS.get(attack_type, "Unknown attack type detected.") + + # Generate random components for each attack type + dst_ip = "192.168.143.3" + dst_port = generate_random_port() + + # Write the attack information + # f.write(f"=== {attack_type} ===\n") + f.write(f"Anomaly or Benign: {is_anomaly}\n") + f.write(f"Type of Attack: {attack_type}\n") + f.write(f"Attack Count: {count}\n") + f.write(f"DST IP Address: {dst_ip}\n") + f.write(f"DST Port: {dst_port}\n") + f.write(f"Attack Category: {attack_category}\n") + f.write(f"Protocol: {protocol_num}\n") + f.write(f"Description: {description}\n\n") + + print(f"Attack summary written to {filename}") + return filename + +def detect_and_classify(df, model_dir='.', output_dir="data"): + """ + Main function to load models and perform DDoS detection and classification + """ + start_time = time.time() + + # Load preprocessing components + print("Loading preprocessing components...") + scaler = joblib.load(f'{model_dir}/scaler.joblib') + label_encoder = joblib.load(f'{model_dir}/label_encoder.joblib') + feature_cols = joblib.load(f'{model_dir}/feature_cols.joblib') + threshold = np.load(f'{model_dir}/anomaly_threshold.npy') + + # Load models + print("Loading models...") + autoencoder = tf.keras.models.load_model(f'{model_dir}/autoencoder_model.keras') + classifier = tf.keras.models.load_model(f'{model_dir}/classifier_model.keras') + + # Preprocess the data + print("Preprocessing data...") + X_processed = preprocess_data(df, scaler, feature_cols) + + # Convert to numpy and reshape for LSTM + X = X_processed.values + X_reshaped = X.reshape(X.shape[0], 1, X.shape[1]) + + # Step 3: Anomaly detection with autoencoder + print("Performing anomaly detection...") + X_pred = autoencoder.predict(X_reshaped) + reconstruction_errors = np.mean(np.abs(X_reshaped - X_pred), axis=(1, 2)) + + # Determine anomalies + is_anomaly = reconstruction_errors > threshold + + # Add results to the dataframe + results = pd.DataFrame({ + 'reconstruction_error': reconstruction_errors, + 'threshold': threshold, + 'is_anomaly': is_anomaly, + 'predicted_label': ['Benign'] * len(is_anomaly) # Default to benign + }) + + # Initialize attack count dictionary + attack_counts = {'Benign': sum(~is_anomaly)} + + # Step 4 & 5: Classify anomalies with DNN + if np.any(is_anomaly): + print(f"Classifying {np.sum(is_anomaly)} detected anomalies...") + anomaly_indices = np.where(is_anomaly)[0] + anomaly_data = X[anomaly_indices] + + # Predict attack types + attack_probs = classifier.predict(anomaly_data) + attack_types = np.argmax(attack_probs, axis=1) + + # Convert numeric predictions to labels + attack_labels = label_encoder.inverse_transform(attack_types) + + # Update results for anomalies + results.loc[anomaly_indices, 'predicted_label'] = attack_labels + + # Count attack types + for attack_type in attack_labels: + if attack_type in attack_counts: + attack_counts[attack_type] += 1 + else: + attack_counts[attack_type] = 1 + + # Calculate timing stats + end_time = time.time() + total_time = end_time - start_time + per_sample_time = total_time / len(df) if len(df) > 0 else 0 + + print(f"Processed {len(df)} samples in {total_time:.2f} seconds") + print(f"Average processing time: {per_sample_time*1000:.2f} ms per sample") + + # Create a summary report + summary_file = write_attack_info(attack_counts, output_dir) + + # print("\nDetection Results:") + # for attack_type, count in attack_counts.items(): + # print(f"{attack_type}: {count}") + + return results, summary_file + +def main(): + parser = argparse.ArgumentParser(description='DDoS Detection and Classification') + parser.add_argument('--input', type=str, required=True, help='Path to the input CSV file') + parser.add_argument('--output', type=str, default='detection_results.csv', help='Path to save the results') + parser.add_argument('--model_dir', type=str, default='.', help='Directory containing model files') + parser.add_argument('--report_dir', type=str, default='data', help='Directory to store attack reports') + parser.add_argument('--remove_columns', action='store_true', help='Remove columns 1-5 and 7 from the input CSV') + parser.add_argument('--save_reduced', type=str, help='Path to save the CSV with removed columns') + args = parser.parse_args() + + print(f"Loading test data from: {args.input}") + df = pd.read_csv(args.input) + + # Apply column removal if requested + if args.remove_columns: + df_reduced = remove_columns(df) + + # Save the reduced CSV if a path is specified + if args.save_reduced: + df_reduced.to_csv(args.save_reduced, index=False) + print(f"CSV with removed columns saved to: {args.save_reduced}") + + # Use the reduced dataframe for detection + df = df_reduced + + results, summary_file = detect_and_classify(df, args.model_dir, args.report_dir) + + # Combine original data with results + output_df = pd.concat([df, results], axis=1) + output_df.to_csv(args.output, index=False) + + print(f"Results saved to: {args.output}") + + # Display summary + benign_count = sum(results['predicted_label'] == 'Benign') + attack_count = sum(results['predicted_label'] != 'Benign') + # print(f"\nSummary: {benign_count} benign traffic, {attack_count} attack traffic detected") + print(f"\nAttack summary report has been saved to: {summary_file}") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/Model Code/anomaly_threshold.npy b/Model Code/anomaly_threshold.npy new file mode 100644 index 0000000..cb8eb56 Binary files /dev/null and b/Model Code/anomaly_threshold.npy differ diff --git a/Model Code/app.py b/Model Code/app.py new file mode 100644 index 0000000..bc5eed7 --- /dev/null +++ b/Model Code/app.py @@ -0,0 +1,226 @@ +# from flask import Flask, render_template +# import os + +# app = Flask(__name__) + +# # Hardcoded file path +# TEXT_FILE_PATH = "data/sample.txt" + +# # Function to read and parse the text file into records +# def parse_file(): +# records = [] +# attack_stats = {"yes": 0, "no": 0} +# ddos_types = {} +# ip_addresses = set() + +# if os.path.exists(TEXT_FILE_PATH): +# with open(TEXT_FILE_PATH, "r") as file: +# lines = file.readlines() + +# record = {} +# for line in lines: +# if line.strip(): +# key, value = line.split(":") +# record[key.strip()] = value.strip() + +# # If all keys are collected, add to records and reset +# if len(record) == 3: +# records.append(record) + +# # Process stats +# attack = record.get("Attack") +# attack_stats[attack] += 1 + +# ddos_type = record.get("Type of DDOS") +# ddos_types[ddos_type] = ddos_types.get(ddos_type, 0) + 1 + +# ip_address = record.get("IP address") +# ip_addresses.add(ip_address) + +# record = {} + +# return records, attack_stats, ddos_types, ip_addresses + +# @app.route("/") +# def index(): +# records, attack_stats, ddos_types, ip_addresses = parse_file() +# return render_template("index.html", records=records, attack_stats=attack_stats, ddos_types=ddos_types, ip_addresses=ip_addresses) + +# if __name__ == "__main__": +# app.run(debug=True) + + + + + + + + + + + +# from flask import Flask, render_template, jsonify +# import os + +# app = Flask(__name__) + +# TEXT_FILE_PATH = "data/sample.txt" + +# def parse_file(): +# records = [] +# attack_type_counter = {} +# anomaly_counter = {"Anomaly": 0, "Benign": 0} + +# if os.path.exists(TEXT_FILE_PATH): +# with open(TEXT_FILE_PATH, "r", encoding="utf-8") as file: +# content = file.read().strip() +# entries = content.split("\n\n") # split records by blank lines + +# for entry in entries: +# lines = entry.strip().split("\n") +# record = {} +# for line in lines: +# if ":" in line: +# key, value = line.split(":", 1) +# record[key.strip()] = value.strip() + +# if record: +# records.append(record) + +# attack_type = record.get("Type of Attack", "Unknown") +# attack_type_counter[attack_type] = attack_type_counter.get(attack_type, 0) + 1 + +# anomaly_status = record.get("Anomaly or Beningn", "Unknown") +# if "anomaly" in anomaly_status.lower(): +# anomaly_counter["Anomaly"] += 1 +# else: +# anomaly_counter["Benign"] += 1 + +# return records, attack_type_counter, anomaly_counter + +# @app.route('/') +# def index(): +# records, attack_type_counter, anomaly_counter = parse_file() +# return render_template('index.html', +# records=records, +# attack_type_counter=attack_type_counter, +# anomaly_counter=anomaly_counter) + +# @app.route('/get-data') +# def get_data(): +# records, attack_type_counter, anomaly_counter = parse_file() +# return jsonify({ +# "records": records, +# "attack_type_counter": attack_type_counter, +# "anomaly_counter": anomaly_counter +# }) + +# if __name__ == "__main__": +# app.run(debug=True) + + + + + + + + + + + + + + + + + + + + + +from flask import Flask, render_template, jsonify +import os + +app = Flask(__name__) + +TEXT_FILE_PATH = "data/sample.txt" + +@app.route("/") +def index(): + return render_template("index.html") + +@app.route("/get-data") +def get_data(): + data = [] + if os.path.exists(TEXT_FILE_PATH): + with open(TEXT_FILE_PATH, "r") as file: + lines = file.readlines() + record = {} + for line in lines: + line = line.strip() + if not line: # skip empty lines + continue + if line.startswith("Anomaly or Benign:"): + if record: + data.append(record) + record = {} + record["Anomaly or Benign"] = line.split(":", 1)[1].strip() + elif line.startswith("Type of Attack:"): + record["Type of Attack"] = line.split(":", 1)[1].strip() + elif line.startswith("Attack Count:"): + record["Attack Count"] = int(line.split(":", 1)[1].strip()) + elif line.startswith("DST IP Address:"): + record["DST IP Address"] = line.split(":", 1)[1].strip() + elif line.startswith("DST Port:"): + record["DST Port"] = line.split(":", 1)[1].strip() + elif line.startswith("Attack Category:"): + record["Attack Category"] = line.split(":", 1)[1].strip() + elif line.startswith("Protocol:"): + record["Protocol"] = line.split(":", 1)[1].strip() + elif line.startswith("Description:"): + record["Description"] = line.split(":", 1)[1].strip() + + if record: + data.append(record) + + return jsonify(data) + +# @app.route("/get-data") +# def get_data(): +# data = [] +# if os.path.exists(TEXT_FILE_PATH): +# with open(TEXT_FILE_PATH, "r") as file: +# lines = file.readlines() +# record = {} +# for line in lines: +# line = line.strip() +# if not line: # skip empty lines +# continue +# if line.startswith("Anomaly or Benign:"): +# if record: +# data.append(record) +# record = {} +# record["Anomaly or Benign"] = line.split(":", 1)[1].strip() +# elif line.startswith("Type of Attack:"): +# record["Type of Attack"] = line.split(":", 1)[1].strip() +# elif line.startswith("Attack Count:"): +# record["Attack Count"] = int(line.split(":", 1)[1].strip()) +# elif line.startswith("DST IP Address:"): +# record["DST IP Address"] = line.split(":", 1)[1].strip() +# elif line.startswith("DST Port:"): +# record["DST Port"] = line.split(":", 1)[1].strip() +# elif line.startswith("Attack Category:"): +# record["Attack Category"] = line.split(":", 1)[1].strip() +# elif line.startswith("Protocol:"): +# record["Protocol"] = line.split(":", 1)[1].strip() +# elif line.startswith("Description:"): +# record["Description"] = line.split(":", 1)[1].strip() + +# if record: +# data.append(record) + +# return jsonify(data) + + +if __name__ == "__main__": + app.run(debug=True) diff --git a/Model Code/attack_reports/attack_summary.txt b/Model Code/attack_reports/attack_summary.txt new file mode 100644 index 0000000..16daed3 --- /dev/null +++ b/Model Code/attack_reports/attack_summary.txt @@ -0,0 +1,36 @@ +Anomaly or Benign: Benign +Type of Attack: Benign +Attack Count: 42 +DST IP Address: 192.168.143.3 +DST Port: 12543 +Attack Category: Normal +Protocol: 6 +Description: Normal network traffic exhibiting no signs of malicious behavior. + +Anomaly or Benign: Anomaly +Type of Attack: DDoS_SYN +Attack Count: 63605 +DST IP Address: 192.168.143.3 +DST Port: 80 +Attack Category: TCP-based Exploitation DDoS +Protocol: 6 +Description: This is a TCP-based exploitation DDoS attack exploiting connection-based protocols (e.g., SYN floods, HTTP floods) to exhaust server resources. Enable SYN cookies, use stateful firewalls, and apply rate-limiting. + +Anomaly or Benign: Anomaly +Type of Attack: DDoS_UDP_Lag +Attack Count: 26 +DST IP Address: 192.168.143.3 +DST Port: 12345 +Attack Category: UDP-based Exploitation DDoS +Protocol: 17 +Description: This is a UDP-based exploitation DDoS attack where direct UDP floods overwhelm target systems with traffic. Use rate-limiting, deploy DDoS protection services, and implement traffic filtering at the perimeter. + +Anomaly or Benign: Anomaly +Type of Attack: DrDoS_Portmap +Attack Count: 11 +DST IP Address: 192.168.143.3 +DST Port: 80 +Attack Category: TCP/UDP-based reflection DDoS +Protocol: 17 +Description: This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic. + diff --git a/Model Code/autoencoder_model.keras b/Model Code/autoencoder_model.keras new file mode 100644 index 0000000..484147a Binary files /dev/null and b/Model Code/autoencoder_model.keras differ diff --git a/Model Code/classifier_model.keras b/Model Code/classifier_model.keras new file mode 100644 index 0000000..602efaf Binary files /dev/null and b/Model Code/classifier_model.keras differ diff --git a/Model Code/data/attack_summary.txt b/Model Code/data/attack_summary.txt new file mode 100644 index 0000000..3ac4d02 --- /dev/null +++ b/Model Code/data/attack_summary.txt @@ -0,0 +1,54 @@ +Anomaly or Benign: Benign +Type of Attack: Benign +Attack Count: 30152 +DST IP Address: 192.168.143.3 +DST Port: 12345 +Attack Category: Normal +Protocol: 6 +Description: Normal network traffic exhibiting no signs of malicious behavior. + +Anomaly or Benign: Anomaly +Type of Attack: DDoS_SYN +Attack Count: 9949 +DST IP Address: 192.168.143.3 +DST Port: 12543 +Attack Category: TCP-based Exploitation DDoS +Protocol: 6 +Description: This is a TCP-based exploitation DDoS attack exploiting connection-based protocols (e.g., SYN floods, HTTP floods) to exhaust server resources. Enable SYN cookies, use stateful firewalls, and apply rate-limiting. + +Anomaly or Benign: Anomaly +Type of Attack: DDoS_UDP_Lag +Attack Count: 1924 +DST IP Address: 192.168.143.3 +DST Port: 80 +Attack Category: UDP-based Exploitation DDoS +Protocol: 17 +Description: This is a UDP-based exploitation DDoS attack where direct UDP floods overwhelm target systems with traffic. Use rate-limiting, deploy DDoS protection services, and implement traffic filtering at the perimeter. + +Anomaly or Benign: Anomaly +Type of Attack: DrDoS_Portmap +Attack Count: 16895 +DST IP Address: 192.168.143.3 +DST Port: 12543 +Attack Category: TCP/UDP-based reflection DDoS +Protocol: 17 +Description: This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic. + +Anomaly or Benign: Anomaly +Type of Attack: DrDoS_DNS +Attack Count: 7425 +DST IP Address: 192.168.143.3 +DST Port: 12345 +Attack Category: TCP/UDP-based reflection DDoS +Protocol: 17 +Description: This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic. + +Anomaly or Benign: Anomaly +Type of Attack: DrDoS_NTP +Attack Count: 12475 +DST IP Address: 192.168.143.3 +DST Port: 80 +Attack Category: UDP-based Reflection DDoS +Protocol: 17 +Description: This is a UDP-based reflection DDoS attack where services like CharGen, NTP, TFTP, and generic UDP reflect large responses to spoofed requests. Disable unused UDP services, restrict access, and implement anti-spoofing measures. + diff --git a/Model Code/data/sample.txt b/Model Code/data/sample.txt new file mode 100644 index 0000000..c9eaafc --- /dev/null +++ b/Model Code/data/sample.txt @@ -0,0 +1,44 @@ +Anomaly or Benign: Benign +Type of Attack: Benign +Attack Count: 1862 +DST IP Address: 192.168.143.3 +DST Port: 80 +Attack Category: Normal +Protocol: 6 +Description: Normal network traffic exhibiting no signs of malicious behavior. + +Anomaly or Benign: Anomaly +Type of Attack: DDoS_SYN +Attack Count: 1834 +DST IP Address: 192.168.143.3 +DST Port: 12345 +Attack Category: TCP-based Exploitation DDoS +Protocol: 6 +Description: This is a TCP-based exploitation DDoS attack exploiting connection-based protocols (e.g., SYN floods, HTTP floods) to exhaust server resources. Enable SYN cookies, use stateful firewalls, and apply rate-limiting. + +Anomaly or Benign: Anomaly +Type of Attack: DDoS_UDP_Lag +Attack Count: 2243 +DST IP Address: 192.168.143.3 +DST Port: 12345 +Attack Category: UDP-based Exploitation DDoS +Protocol: 17 +Description: This is a UDP-based exploitation DDoS attack where direct UDP floods overwhelm target systems with traffic. Use rate-limiting, deploy DDoS protection services, and implement traffic filtering at the perimeter. + +Anomaly or Benign: Anomaly +Type of Attack: DrDoS_DNS +Attack Count: 1252 +DST IP Address: 192.168.143.3 +DST Port: 12345 +Attack Category: TCP/UDP-based reflection DDoS +Protocol: 17 +Description: This is a TCP/UDP-based reflection DDoS attack using misconfigured or open services (like DNS, LDAP, NetBIOS, SNMP, Portmap) to amplify traffic. Secure the services, restrict access, apply rate-limiting, and block spoofed traffic. + +Anomaly or Benign: Anomaly +Type of Attack: DrDoS_UDP +Attack Count: 1875 +DST IP Address: 192.168.143.3 +DST Port: 80 +Attack Category: UDP-based Reflection DDoS +Protocol: 17 +Description: This is a UDP-based reflection DDoS attack where services like CharGen, NTP, TFTP, and generic UDP reflect large responses to spoofed requests. Disable unused UDP services, restrict access, and implement anti-spoofing measures. diff --git a/Model Code/ddos_manager.py b/Model Code/ddos_manager.py new file mode 100644 index 0000000..c1652ff --- /dev/null +++ b/Model Code/ddos_manager.py @@ -0,0 +1,111 @@ +import argparse +import os +import subprocess +import threading +import time +from pathlib import Path + +# Ensure data directory exists +os.makedirs("data", exist_ok=True) +os.makedirs("attack_reports", exist_ok=True) + +def run_receiver(): + """Run the file receiver script""" + print("Starting file receiver...") + from receiver import receive_file + receive_file() + print("File receiver completed.") + +def run_model(input_file=None): + """Run the DDoS detection model""" + print("Starting DDoS detection model...") + + if input_file is None: + # Look for CSV files in the current directory + csv_files = list(Path('.').glob('*.csv')) + if not csv_files: + print("No CSV files found. Please provide an input file with --input") + return + input_file = str(csv_files[0]) + print(f"Using {input_file} as input") + + from Model import detect_and_classify + import pandas as pd + + # Load the data and run detection + df = pd.read_csv(input_file) + results, summary_file = detect_and_classify(df) + + print(f"Model processing completed. Results written to {summary_file}") + return summary_file + +def run_webapp(): + """Run the Flask web application""" + print("Starting Flask web application...") + from app import app + # Run Flask app in debug mode + app.run(debug=True, use_reloader=False) + +def main(): + parser = argparse.ArgumentParser(description='DDoS Detection System') + parser.add_argument('--mode', type=str, default='all', + choices=['all', 'receiver', 'model', 'webapp', 'model-webapp', 'receiver-model'], + help='Mode to run (default: all)') + parser.add_argument('--input', type=str, help='Input CSV file for the model') + args = parser.parse_args() + + if args.mode == 'receiver': + run_receiver() + + elif args.mode == 'model': + run_model(args.input) + + elif args.mode == 'webapp': + run_webapp() + + elif args.mode == 'model-webapp': + # Run the model and then start the webapp + summary_file = run_model(args.input) + run_webapp() + + elif args.mode == 'receiver-model': + # Run the receiver to get the file, then process with the model + run_receiver() + # Look for the most recently created CSV file + csv_files = list(Path('.').glob('*.csv')) + if csv_files: + # Get the most recently modified file + most_recent = max(csv_files, key=os.path.getmtime) + print(f"Processing most recently received file: {most_recent}") + run_model(str(most_recent)) + else: + print("No CSV files found after receiving. Cannot run model.") + + elif args.mode == 'all': + # Start receiver in a separate thread + receiver_thread = threading.Thread(target=run_receiver) + receiver_thread.daemon = True + receiver_thread.start() + + print("Waiting for file to be received...") + # Wait for some time for a file to be received + time.sleep(3) + + # Check for the file periodically + max_wait = 30 # Maximum wait time in seconds + start_time = time.time() + while time.time() - start_time < max_wait: + csv_files = list(Path('.').glob('*.csv')) + if csv_files: + # Get the most recently modified file + most_recent = max(csv_files, key=os.path.getmtime) + print(f"Processing file: {most_recent}") + run_model(str(most_recent)) + break + time.sleep(2) + + # Run the web app regardless of whether we received a file + run_webapp() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/Model Code/feature_cols.joblib b/Model Code/feature_cols.joblib new file mode 100644 index 0000000..6b83dca Binary files /dev/null and b/Model Code/feature_cols.joblib differ diff --git a/Model Code/label_encoder.joblib b/Model Code/label_encoder.joblib new file mode 100644 index 0000000..62b5e12 Binary files /dev/null and b/Model Code/label_encoder.joblib differ diff --git a/Model Code/receiver.py b/Model Code/receiver.py new file mode 100644 index 0000000..9d0bb31 --- /dev/null +++ b/Model Code/receiver.py @@ -0,0 +1,70 @@ +import socket +import os +import time + +def get_download_path(): + return r"./" + +def receive_file(): + server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + host = '0.0.0.0' + port = 12345 + + server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1048576) + server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + try: + server_socket.bind((host, port)) + server_socket.listen(1) + print(f"Waiting for incoming file on port {port}...") + + client_socket, address = server_socket.accept() + print(f"Connected to {address}") + client_socket.settimeout(600) + + header_data = client_socket.recv(1024).decode() + filename, filesize = header_data.split('::') + filesize = int(filesize) + + print(f"Receiving file: {filename} ({filesize/1048576:.2f} MB)") + + downloads_dir = get_download_path() + os.makedirs(downloads_dir, exist_ok=True) + save_path = os.path.join(downloads_dir, filename) + + start_time = time.time() + with open(save_path, 'wb') as f: + bytes_received = 0 + buffer_size = 262144 + + while bytes_received < filesize: + data = client_socket.recv(min(buffer_size, filesize - bytes_received)) + if not data: + break + f.write(data) + bytes_received += len(data) + + percent = (bytes_received/filesize)*100 + elapsed = time.time() - start_time + speed = bytes_received / (elapsed * 1024 * 1024) if elapsed > 0 else 0 + + print(f"Progress: {bytes_received/(1024*1024):.2f}/{filesize/(1024*1024):.2f} MB ({percent:.1f}%) - {speed:.2f} MB/s", end='\r') + + total_time = time.time() - start_time + avg_speed = filesize / (total_time * 1024 * 1024) if total_time > 0 else 0 + + print(f"\nFile saved as: {save_path}") + print(f"Transfer complete! Time: {total_time:.2f} seconds, Avg Speed: {avg_speed:.2f} MB/s") + + except Exception as e: + print(f"Error: {e}") + + finally: + try: + client_socket.close() + except: + pass + server_socket.close() + +if __name__ == "__main__": # Fixed the main block + receive_file() \ No newline at end of file diff --git a/Model Code/scaler.joblib b/Model Code/scaler.joblib new file mode 100644 index 0000000..aba166b Binary files /dev/null and b/Model Code/scaler.joblib differ diff --git a/Model Code/templates/index.html b/Model Code/templates/index.html new file mode 100644 index 0000000..372c30c --- /dev/null +++ b/Model Code/templates/index.html @@ -0,0 +1,264 @@ + + + + + + 🚀 Network Anomaly Dashboard + + + + +
+ 🛡️ Network Anomaly Detection Dashboard +
+ +
+
+ +
+

+ Attack Types Distribution +

+ +
+ +
+

+ Network Traffic (Anomaly vs Normal) +

+ +
+
+ + +
+

📄 Detailed Records

+
+ +
+
+
+ + + +