diff --git a/Dashboard/__pycache__/views.cpython-310.pyc b/Dashboard/__pycache__/views.cpython-310.pyc index 2b96af5..a0bfd00 100644 Binary files a/Dashboard/__pycache__/views.cpython-310.pyc and b/Dashboard/__pycache__/views.cpython-310.pyc differ diff --git a/Dashboard/views.py b/Dashboard/views.py index aa09ab6..566f938 100644 --- a/Dashboard/views.py +++ b/Dashboard/views.py @@ -5,6 +5,7 @@ import io import csv import json from django.http import HttpResponse +from django.http import HttpResponseForbidden import os import botocore from django.http import JsonResponse @@ -12,16 +13,22 @@ from django.conf import settings import re import random from cryptography.fernet import Fernet -from django.template.loader import render_to_string from django.http import JsonResponse from Accounts.models import UserProfile from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.views.decorators.cache import never_cache +from django.contrib.sessions.models import Session +from rest_framework.authentication import SessionAuthentication, BasicAuthentication +from django.views.decorators.csrf import csrf_exempt +from rest_framework.decorators import api_view, authentication_classes, permission_classes +from rest_framework.permissions import IsAuthenticated from .models import* from .serializers import* from rest_framework import generics from django.shortcuts import render,redirect,get_object_or_404 +from django.http import HttpResponse +from django.template.loader import render_to_string from django.views.decorators.csrf import csrf_exempt import pytz from datetime import datetime @@ -34,7 +41,6 @@ import csv from rest_framework.decorators import api_view from django.core.files.storage import default_storage from django.core.files.base import ContentFile -from Device .models import Devices import pandas as pd import time @@ -69,15 +75,52 @@ def home(request): def navbar(request): return render(request, 'navbar/nav.html') -#=========================================================================================== +#================================================================ @never_cache +@csrf_exempt @login_required(login_url='login') -# def ddos(request): +# def ddos(request, device_id=None): +# # Get the logged-in user's profile +# user_profile = request.user.userprofile +# # Fetch the list of devices associated with the logged-in user +# logged_in_devices = Devices.objects.filter(used_by=user_profile).order_by('-created_at') + +# # Debugging: Print the list of devices +# print(f"Logged-in user's devices: {logged_in_devices}") + +# # Check if the user has more than one device +# has_multiple_devices = logged_in_devices.count() > 1 + +# # If the frontend passed a device_id and the user has multiple devices, use it +# if device_id and has_multiple_devices: +# try: +# # Fetch the device from the database that belongs to the logged-in user +# device = get_object_or_404(Devices, id=device_id, used_by=user_profile) +# print(f"Using passed device with ID: {device.id}") +# logged_in_device_id = device.id +# logged_in_device_name = device.device_name +# except Devices.DoesNotExist: +# print(f"Device with ID {device_id} not found or doesn't belong to the logged-in user.") +# return HttpResponseForbidden("You do not have permission to view this device's data.") +# else: +# # If no device_id is passed or user has only one device, get the most recent device +# if logged_in_devices.exists(): +# logged_in_device = logged_in_devices.first() # Get the most recent device +# logged_in_device_id = logged_in_device.id +# logged_in_device_name = logged_in_device.device_name +# print(f"Using the most recent device: {logged_in_device_name} (ID: {logged_in_device_id})") +# else: +# # If no devices found, set ID as None and name as "Unknown Device" +# logged_in_device_id = None +# logged_in_device_name = "Unknown Device" +# print("No devices found for the logged-in user.") + +# # Path to your DDoS prediction CSV file # file_path = 'media/ddos_predictions/predictions.csv' # data = pd.read_csv(file_path) -# # Create a mapping for protocol names to their short forms +# # Create a mapping for protocol names to their short forms # protocol_mapping = { # "Protocol_ICMP": "ICMP", # "Protocol_TCP": "TCP", @@ -93,7 +136,88 @@ def navbar(request): # "Protocol_DNS": "DNS" # } +# # Sum up the DDoS-related columns +# ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur'] +# ddos_sums = {col: int(data[col].sum()) for col in ddos_columns} +# ddos_sums['byteperflow'] /= 15 +# ddos_sums['tot_kbps'] /= 15 +# # Get the source and destination IP counts +# src_ip_counts = data['src_ip'].value_counts() +# src_ip_dict = src_ip_counts.to_dict() + +# dest_ip_counts = data['dst_ip'].value_counts() +# dest_ip_dict = dest_ip_counts.to_dict() + +# # Get protocol counts +# protocol_columns = data.columns[7:19] +# protocol_counts = {} +# for protocol in protocol_columns: +# short_form = protocol_mapping.get(protocol, protocol) # Default to the original name if not found +# protocol_counts[short_form] = int((data[protocol] == 1).sum()) + +# # Filtered data where probability > 0.9 +# filtered_data = data[data['probability'] > 0.9] +# src_ip_counts2 = filtered_data['src_ip'].value_counts() +# src_ip_dict2 = src_ip_counts2.to_dict() + +# # Pass the logged-in user's device information (device ID and device name) to the template +# return render(request, 'ddos/ddos.html', { +# 'ddos_sums': ddos_sums, +# 'src_ip_dict': src_ip_dict, +# 'dest_ip_dict': dest_ip_dict, +# 'protocol_counts': protocol_counts, +# 'src_ip_dict2': src_ip_dict2, +# 'logged_in_device_id': logged_in_device_id, # Device ID passed here +# 'logged_in_device_name': logged_in_device_name, # Device name +# 'has_multiple_devices': has_multiple_devices # Pass this flag to the frontend +# }) + +# def ddos(request): +# print("Inside the ddos view...") + + + +# # Construct the file path for the selected device's DDoS predictions +# response_data=None +# if request.method=="POST": +# try : +# response_data= json.loads(request.body) +# except json.JSONDecodeError: +# return JsonResponse({'error':'invalid json'}) +# if response_data: +# file_path = 'media/ddos_predictions/predictions.csv' + +# else : +# print('have no csv file ') + + + + +# file_path = f'media/ddos_predictions/predictions.csv' + +# # Read the CSV file for the selected device +# try: +# data = pd.read_csv(file_path) +# except FileNotFoundError: +# print(f"File not found: {file_path}") +# return HttpResponse( status=401) + +# # Process the data and generate DDoS statistics (as before) +# protocol_mapping = { +# "Protocol_ICMP": "ICMP", +# "Protocol_TCP": "TCP", +# "Protocol_UDP": "UDP", +# "Protocol_HTTP": "HTTP", +# "Protocol_HTTPS": "HTTPS", +# "Protocol_SSH": "SSH", +# "Protocol_DHCP": "DHCP", +# "Protocol_FTP": "FTP", +# "Protocol_SMTP": "SMTP", +# "Protocol_POP3": "POP3", +# "Protocol_IMAP": "IMAP", +# "Protocol_DNS": "DNS" +# } # ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur'] # ddos_sums = {col: int(data[col].sum()) for col in ddos_columns} @@ -103,32 +227,138 @@ def navbar(request): # src_ip_counts = data['src_ip'].value_counts() # src_ip_dict = src_ip_counts.to_dict() - # dest_ip_counts = data['dst_ip'].value_counts() # dest_ip_dict = dest_ip_counts.to_dict() - # protocol_columns = data.columns[7:19] # protocol_counts = {} # for protocol in protocol_columns: -# short_form = protocol_mapping.get(protocol, protocol) # Default to the original name if not found +# short_form = protocol_mapping.get(protocol, protocol) # protocol_counts[short_form] = int((data[protocol] == 1).sum()) -# print(protocol_counts) # filtered_data = data[data['probability'] > 0.9] # src_ip_counts2 = filtered_data['src_ip'].value_counts() # src_ip_dict2 = src_ip_counts2.to_dict() +# # Return the response with the DDoS data +# try: +# return render(request, 'ddos/ddos.html', { +# 'ddos_sums': ddos_sums, +# 'src_ip_dict': src_ip_dict, +# 'dest_ip_dict': dest_ip_dict, +# 'protocol_counts': protocol_counts, +# 'src_ip_dict2': src_ip_dict2, + +# }) +# except Exception as e: +# print(f"Error rendering template: {e}") +# return HttpResponseForbidden("Internal Server Error occurred while rendering the page.") +#================================================================ - +# @never_cache +# @csrf_exempt +# @login_required(login_url='login') +# def ddos(request): +# print("Inside the ddos view...") +# device_id = None # Initialize device_id +# print('device id',device_id) - +# if request.method == "POST": +# try: +# # Try to load the request body as JSON +# response_data = json.loads(request.body) +# if 'device_id' in response_data: +# device_id = int(response_data['device_id']) # Set the device_id from POST +# print(f"Device ID received from POST: {device_id}") +# else: +# return JsonResponse({'error': 'Device ID is required'}, status=400) +# except json.JSONDecodeError: +# return JsonResponse({'error': 'Invalid JSON'}, status=400) +# # Only fetch the recent device if no device_id was set from POST +# if not device_id: +# print("No device ID found from POST data. Fetching the latest device for the logged-in user.") +# recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() +# if recent_device: +# device_id = recent_device.id +# print(f"Fetched recent device ID: {device_id}") +# else: +# print("No devices found for the logged-in user.") +# return JsonResponse({'error': 'No devices found for the logged-in user'}, status=404) - +# # Construct the file path for the device's DDoS prediction CSV file +# file_path = os.path.join('media', 'ddos_predictions', str(device_id), 'predictions.csv') +# print(f"Constructed file path: {file_path}") -# return render(request, 'ddos/ddos.html',{'ddos_sums': ddos_sums,'src_ip_dict' : src_ip_dict , 'dest_ip_dict' : dest_ip_dict , 'protocol_counts' : protocol_counts,'src_ip_dict2' : src_ip_dict2}) +# # Check if the file exists +# if not os.path.exists(file_path): +# print(f"File not found at path: {file_path}") +# return JsonResponse({'error': f"File not found for device ID {device_id}"}, status=404) + +# # Attempt to read the CSV file +# try: +# data = pd.read_csv(file_path) +# print(f"Data loaded successfully. First rows:\n{data.head()}") +# except pd.errors.EmptyDataError: +# print(f"CSV file is empty: {file_path}") +# return JsonResponse({'error': 'CSV file is empty'}, status=400) +# except Exception as e: +# print(f"Unexpected error reading CSV: {e}") +# return JsonResponse({'error': 'Error reading the CSV file'}, status=500) + +# # Process the CSV data +# protocol_mapping = { +# "Protocol_ICMP": "ICMP", +# "Protocol_TCP": "TCP", +# "Protocol_UDP": "UDP", +# "Protocol_HTTP": "HTTP", +# "Protocol_HTTPS": "HTTPS", +# "Protocol_SSH": "SSH", +# "Protocol_DHCP": "DHCP", +# "Protocol_FTP": "FTP", +# "Protocol_SMTP": "SMTP", +# "Protocol_POP3": "POP3", +# "Protocol_IMAP": "IMAP", +# "Protocol_DNS": "DNS" +# } + +# ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur'] +# ddos_sums = {col: int(data[col].sum()) for col in ddos_columns} +# ddos_sums['byteperflow'] /= 15 +# ddos_sums['tot_kbps'] /= 15 + +# src_ip_counts = data['src_ip'].value_counts() +# src_ip_dict = src_ip_counts.to_dict() + +# dest_ip_counts = data['dst_ip'].value_counts() +# dest_ip_dict = dest_ip_counts.to_dict() + +# protocol_columns = data.columns[7:19] +# protocol_counts = {} +# for protocol in protocol_columns: +# short_form = protocol_mapping.get(protocol, protocol) +# protocol_counts[short_form] = int((data[protocol] == 1).sum()) + +# # Filter data where the probability is above 0.9 +# filtered_data = data[data['probability'] > 0.9] +# src_ip_counts2 = filtered_data['src_ip'].value_counts() +# src_ip_dict2 = src_ip_counts2.to_dict() + +# # Return the response with the DDoS data +# try: +# return render(request, 'ddos/ddos.html', { +# 'ddos_sums': ddos_sums, +# 'src_ip_dict': src_ip_dict, +# 'dest_ip_dict': dest_ip_dict, +# 'protocol_counts': protocol_counts, +# 'src_ip_dict2': src_ip_dict2, +# }) +# except Exception as e: +# print(f"Error rendering template: {e}") +# return HttpResponseForbidden("Internal Server Error occurred while rendering the page.") +@never_cache +@csrf_exempt @login_required(login_url='login') def ddos(request): print("Inside the ddos view...") @@ -240,7 +470,106 @@ def ddos(request): -#================================================================================ + + + + + + + +# def DdosApi(request): +# print("Inside the ddos view...") + +# device_id = request.GET.get('device_id', None) +# print(f"Device ID from headers: {device_id}") + +# if not device_id: +# print("No device ID provided. Fetching the latest device for the logged-in user.") +# recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() +# if recent_device: +# device_id = recent_device.id +# print(f"Fetched recent device ID: {device_id}") +# else: +# print("No devices found for the logged-in user.") +# return JsonResponse({'error': 'No devices found for the logged-in user'}, status=404) + +# try: +# device_id = int(device_id) +# print(f"Using device ID: {device_id}") +# except ValueError: +# return JsonResponse({'error': 'Invalid device ID'}, status=400) + +# try: +# device = Devices.objects.get(id=device_id) +# device_pod = device.pod +# print(f"Device Pod: {device_pod}") +# except Devices.DoesNotExist: +# return JsonResponse({'error': f"Device with ID {device_id} not found"}, status=404) + +# file_path = os.path.join('media', 'ddos_predictions', str(device_id), 'predictions.csv') +# print(f"Constructed file path: {file_path}") + +# if not os.path.exists(file_path): +# print(f"File not found at path: {file_path}") +# return JsonResponse({'error': f"File not found for device ID {device_id}"}, status=404) + +# try: +# data = pd.read_csv(file_path) +# print(f"Data loaded successfully. First rows:\n{data.head()}") +# except pd.errors.EmptyDataError: +# print(f"CSV file is empty: {file_path}") +# return JsonResponse({'error': 'CSV file is empty'}, status=400) +# except Exception as e: +# print(f"Unexpected error reading CSV: {e}") +# return JsonResponse({'error': 'Error reading the CSV file'}, status=500) + +# protocol_mapping = { +# "Protocol_ICMP": "ICMP", +# "Protocol_TCP": "TCP", +# "Protocol_UDP": "UDP", +# "Protocol_HTTP": "HTTP", +# "Protocol_HTTPS": "HTTPS", +# "Protocol_SSH": "SSH", +# "Protocol_DHCP": "DHCP", +# "Protocol_FTP": "FTP", +# "Protocol_SMTP": "SMTP", +# "Protocol_POP3": "POP3", +# "Protocol_IMAP": "IMAP", +# "Protocol_DNS": "DNS" +# } + +# ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur'] +# ddos_sums = {col: int(data[col].sum()) for col in ddos_columns} +# ddos_sums['byteperflow'] /= 15 +# ddos_sums['tot_kbps'] /= 15 + +# src_ip_counts = data['src_ip'].value_counts() +# src_ip_dict = src_ip_counts.to_dict() + +# dest_ip_counts = data['dst_ip'].value_counts() +# dest_ip_dict = dest_ip_counts.to_dict() + +# protocol_columns = data.columns[7:19] +# protocol_counts = { +# protocol_mapping.get(protocol, protocol): int((data[protocol] == 1).sum()) +# for protocol in protocol_columns +# } + +# filtered_data = data[data['probability'] > 0.9] +# src_ip_counts2 = filtered_data['src_ip'].value_counts() +# src_ip_dict2 = src_ip_counts2.to_dict() + +# return JsonResponse({ +# 'device_pod': device_pod, +# 'ddos_sums': ddos_sums, +# 'src_ip_dict': src_ip_dict, +# 'dest_ip_dict': dest_ip_dict, +# 'protocol_counts': protocol_counts, +# 'src_ip_dict2': src_ip_dict2, +# }, status=200) + + +# ================================================================================ @never_cache def read_tx_bytes(request): @@ -474,12 +803,15 @@ def processes_log(request): def dma(request): return render(request, 'dma/dma.html') + + + # def get_combined_files(): # df1 = pd.read_csv('media/malware_predictions/bytes_predictions_KNeighborsClassifier.csv') # df2 = pd.read_csv('media/malware_predictions/bytes_predictions_RandomForestClassifier.csv') -# df3 = pd.read_csv('media/malware_predictions/latest_malware_bytes_predictions_SGD.csv') -# df4 = pd.read_csv('media/malware_predictions/latest_malware_bytes_predictions_XGB.csv') +# df3 = pd.read_csv('media/malware_predictions/bytes_predictions_SGDClassifier.csv') +# df4 = pd.read_csv('media/malware_predictions/bytes_predictions_XGBClassifier.csv') # # df1 = pd.read_csv('media/temp/bytes_predictions_KNeighborsClassifier.csv') @@ -578,6 +910,296 @@ def dma(request): # combined_data2.at[i,'Prediction Probability'] = probs[max_index] +# combined_data = pd.concat([combined_data1, combined_data2], ignore_index=True) + +# return combined_data + + +# @login_required(login_url='login') +# @never_cache +# def malware(request): +# combined_data = get_combined_files() + +# class_names = { +# 1: "Ramnit", +# 2: "Lollipop", +# 3: "Kelihos_ver3", +# 4: "Vundo", +# 5: "Simda", +# 6: "Tracur", +# 7: "Kelihos_ver1", +# 8: "Obfuscator.ACY", +# 9: "Gatak" +# } + + +# high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9] +# files_list = high_probability_files['File'].tolist() + + +# files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)] +# frequency = files70_90['Predicted Class'].value_counts().sort_index() +# complete_index = pd.Index(range(10)) +# frequency = frequency.reindex(complete_index, fill_value=0) +# print(frequency,'in the frequency') +# # if frequency: +# # print("Check_malware_frequency") + + +# all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index() +# frequency_with_names = all_frequency.rename(class_names) +# print(frequency_with_names,'with name') + + +# avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index() +# all_classes = pd.DataFrame({'Predicted Class': range(1, 10)}) +# avg_probability = pd.merge(all_classes, avg_probability, on='Predicted Class', how='left') +# avg_probability['Prediction Probability'].fillna(0, inplace=True) +# avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names) +# average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability'])) +# print(average_probability_dict,"avg is here ") + +# file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt') +# data = None +# try: +# with open(file_path, 'r') as file: +# data = file.readlines()[::-1] # Reverse lines for latest logs +# except: +# pass + + + +# return render(request, 'malware/malware.html', {'files_list': files_list , 'frequency' : frequency.to_dict() , 'class_frequency' : frequency_with_names.to_dict() , 'average' : average_probability_dict ,"logs":data}) + + +# def get_combined_files(device_id): + +# base_dir = os.path.join("media", "malware_predictions", str(device_id)) +# # base_dir = f"media/malware_predictions" +# # Construct file paths using base_dir +# file1_path = os.path.join(base_dir, 'bytes_predictions_KNeighborsClassifier.csv') +# file2_path = os.path.join(base_dir, 'bytes_predictions_RandomForestClassifier.csv') +# file3_path = os.path.join(base_dir, 'bytes_predictions_SGDClassifier.csv') +# file4_path = os.path.join(base_dir, 'bytes_predictions_XGBClassifier.csv') + + +# df1 = pd.read_csv(file1_path) +# df2 = pd.read_csv(file2_path) +# df3 = pd.read_csv(file3_path) +# df4 = pd.read_csv(file4_path) +# # df1 = pd.read_csv('media/temp/bytes_predictions_KNeighborsClassifier.csv') +# # df2 = pd.read_csv('media/temp/bytes_predictions_RandomForestClassifier.csv') +# # df3 = pd.read_csv('media/temp/bytes_predictions_SGDClassifier.csv') +# # df4 = pd.read_csv('media/temp/bytes_predictions_XGBooster.csv') + +# # Step 2: Create a new DataFrame to hold combined results +# combined_data1 = pd.DataFrame() + +# # Step 3: Combine predictions +# combined_data1['File'] = df1['File'] # Assuming all files are the same +# combined_data1['Predicted Class'] = df1['Predicted Class'] # Placeholder +# combined_data1['Prediction Probability'] = 0.0 # Initialize probability column +# max_length = max(len(df1), len(df2), len(df3), len(df4)) +# # Step 4: Loop through each row and calculate the highest probability and average +# # for i in range(len(df1)): +# # # Get probabilities from all models +# # probs = [ +# # df1['Prediction Probability'][i], +# # df2['Prediction Probability'][i], +# # df3['Prediction Probability'][i], +# # df4['Prediction Probability'][i], +# # ] + +# # # Get predicted classes +# # classes = [ +# # df1['Predicted Class'][i], +# # df2['Predicted Class'][i], +# # df3['Predicted Class'][i], +# # df4['Predicted Class'][i], +# # ] + +# # # Find the index of the highest probability +# # max_index = probs.index(max(probs)) + +# # # Set the highest predicted class +# # combined_data1.at[i, 'Predicted Class'] = classes[max_index] + +# # # Calculate the average probability +# # combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs) + +# for i in range(max_length): +# probs, classes = [], [] + +# for df in [df1, df2, df3, df4]: +# try: +# probs.append(df['Prediction Probability'].iloc[i]) +# classes.append(df['Predicted Class'].iloc[i]) +# except IndexError: +# # Skip if the row does not exist in this DataFrame +# pass + +# if probs and classes: +# max_index = probs.index(max(probs)) +# combined_data1.at[i, 'Predicted Class'] = classes[max_index] +# combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs) + +# file5_path = os.path.join(base_dir, 'asm_prediction_KNeighborsClassifier.csv') +# file6_path = os.path.join(base_dir, 'asm_prediction_LogisticRegression.csv') +# file7_path = os.path.join(base_dir, 'asm_prediction_RandomForestClassifier.csv') +# file8_path = os.path.join(base_dir, 'asm_prediction_XGBClassifier.csv') +# df5 = pd.read_csv(file5_path) +# df6 = pd.read_csv(file6_path) +# df7 = pd.read_csv(file7_path) +# df8 = pd.read_csv(file8_path) + +# combined_data2 = pd.DataFrame() + +# # Step 3: Combine predictions +# combined_data2['File'] = df5['File'] # Assuming all files are the same +# combined_data2['Predicted Class'] = df5['Predicted Class'] # Placeholder +# combined_data2['Prediction Probability'] = 0.0 # Initialize probability column + +# # Step 4: Loop through each row and calculate the highest probability and average +# for i in range(len(df5)): +# # Get probabilities from all models +# probs = [ +# df5['Prediction Probability'][i], +# df6['Prediction Probability'][i], +# df7['Prediction Probability'][i], +# df8['Prediction Probability'][i], +# ] + +# # Get predicted classes +# classes = [ +# df5['Predicted Class'][i], +# df6['Predicted Class'][i], +# df7['Predicted Class'][i], +# df8['Predicted Class'][i], +# ] + +# # Find the index of the highest probability +# max_index = probs.index(max(probs)) + +# # Set the highest predicted class +# combined_data2.at[i, 'Predicted Class'] = classes[max_index] + +# # Calculate the average probability +# # combined_data2.at[i, 'Prediction Probability'] = sum(probs) / len(probs) +# combined_data2.at[i,'Prediction Probability'] = probs[max_index] + + +# combined_data = pd.concat([combined_data1, combined_data2], ignore_index=True) + +# return combined_data +# def get_combined_files(device_id): + + # df1 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_KNeighborsClassifier.csv') + # df2 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_RandomForestClassifier.csv') + # df3 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_SGDClassifier.csv') + # df4 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_XGBClassifier.csv') +# file_path = f'media/malware_predictions/{device_id}/bytes_predictions_KNeighborsClassifier.csv' +# print('df1........hukjkjjnkjn:',file_path) + +# # df1 = pd.read_csv('media/temp/bytes_predictions_KNeighborsClassifier.csv') +# # df2 = pd.read_csv('media/temp/bytes_predictions_RandomForestClassifier.csv') +# # df3 = pd.read_csv('media/temp/bytes_predictions_SGDClassifier.csv') +# # df4 = pd.read_csv('media/temp/bytes_predictions_XGBooster.csv') + +# # Step 2: Create a new DataFrame to hold combined results +# combined_data1 = pd.DataFrame() + +# # Step 3: Combine predictions +# combined_data1['File'] = df1['File'] # Assuming all files are the same +# combined_data1['Predicted Class'] = df1['Predicted Class'] # Placeholder +# combined_data1['Prediction Probability'] = 0.0 # Initialize probability column +# max_length = max(len(df1), len(df2), len(df3), len(df4)) +# # Step 4: Loop through each row and calculate the highest probability and average +# # for i in range(len(df1)): +# # # Get probabilities from all models +# # probs = [ +# # df1['Prediction Probability'][i], +# # df2['Prediction Probability'][i], +# # df3['Prediction Probability'][i], +# # df4['Prediction Probability'][i], +# # ] + +# # # Get predicted classes +# # classes = [ +# # df1['Predicted Class'][i], +# # df2['Predicted Class'][i], +# # df3['Predicted Class'][i], +# # df4['Predicted Class'][i], +# # ] + +# # # Find the index of the highest probability +# # max_index = probs.index(max(probs)) + +# # # Set the highest predicted class +# # combined_data1.at[i, 'Predicted Class'] = classes[max_index] + +# # # Calculate the average probability +# # combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs) + +# for i in range(max_length): +# probs, classes = [], [] + +# for df in [df1, df2, df3, df4]: +# try: +# probs.append(df['Prediction Probability'].iloc[i]) +# classes.append(df['Predicted Class'].iloc[i]) +# except IndexError: +# # Skip if the row does not exist in this DataFrame +# pass + +# if probs and classes: +# max_index = probs.index(max(probs)) +# combined_data1.at[i, 'Predicted Class'] = classes[max_index] +# combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs) + + + # df5 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_KNeighborsClassifier.csv') + # df6 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_LogisticRegression.csv') + # df7 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_RandomForestClassifier.csv') + # df8 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_XGBClassifier.csv') + + + +# combined_data2 = pd.DataFrame() + +# # Step 3: Combine predictions +# combined_data2['File'] = df5['File'] # Assuming all files are the same +# combined_data2['Predicted Class'] = df5['Predicted Class'] # Placeholder +# combined_data2['Prediction Probability'] = 0.0 # Initialize probability column + +# # Step 4: Loop through each row and calculate the highest probability and average +# for i in range(len(df5)): +# # Get probabilities from all models +# probs = [ +# df5['Prediction Probability'][i], +# df6['Prediction Probability'][i], +# df7['Prediction Probability'][i], +# df8['Prediction Probability'][i], +# ] + +# # Get predicted classes +# classes = [ +# df5['Predicted Class'][i], +# df6['Predicted Class'][i], +# df7['Predicted Class'][i], +# df8['Predicted Class'][i], +# ] + +# # Find the index of the highest probability +# max_index = probs.index(max(probs)) + +# # Set the highest predicted class +# combined_data2.at[i, 'Predicted Class'] = classes[max_index] + +# # Calculate the average probability +# # combined_data2.at[i, 'Prediction Probability'] = sum(probs) / len(probs) +# combined_data2.at[i,'Prediction Probability'] = probs[max_index] + + # combined_data = pd.concat([combined_data1, combined_data2], ignore_index=True) # return combined_data @@ -708,82 +1330,99 @@ def get_combined_files(device_id): return pd.DataFrame() -@login_required(login_url='login') -@never_cache +# @csrf_exempt +# @login_required(login_url='login') +# @never_cache + def malware(request): - print("Inside the ddos view...") + print("Inside the ddos view...") - device_id = request.GET.get('device_id', None) - print(f"Device ID from headers: {device_id}") + # Attempt to get the device_id from request headers + device_id = request.GET.get('device_id', None) + print(f"Device ID from headers: {device_id}") - if not device_id: - print("No device ID provided in headers. Fetching the latest device for the logged-in user.") - recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() - if recent_device: - device_id = recent_device.id - print(f"Fetched recent device ID: {device_id}") - else: - print("No devices found for the logged-in user.") + # If device_id is not provided in headers, fetch the latest device for the logged-in user + if not device_id: + print("No device ID provided in headers. Fetching the latest device for the logged-in user.") + recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() + if recent_device: + device_id = recent_device.id + print(f"Fetched recent device ID: {device_id}") + else: + print("No devices found for the logged-in user.") + + # Assuming get_combined_files is a function that fetches the combined data + combined_data = get_combined_files(device_id) + # combined_data = get_combined_files() + # print("Combined data:", combined_data) + class_names = { + 1: "Ramnit", + 2: "Lollipop", + 3: "Kelihos_ver3", + 4: "Vundo", + 5: "Simda", + 6: "Tracur", + 7: "Kelihos_ver1", + 8: "Obfuscator.ACY", + 9: "Gatak" + } - # Fetch combined data - combined_data = get_combined_files(device_id) - - # If the data is empty, show a message - if combined_data.empty: - message = "Data is still being captured. Please try again later." - return render(request, 'malware/malware.html', {'message': message}) - - class_names = { - 1: "Ramnit", - 2: "Lollipop", - 3: "Kelihos_ver3", - 4: "Vundo", - 5: "Simda", - 6: "Tracur", - 7: "Kelihos_ver1", - 8: "Obfuscator.ACY", - 9: "Gatak" - } + high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9] + print("heree") + files_list = high_probability_files['File'].tolist() - high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9] - files_list = high_probability_files['File'].tolist() + files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)] + frequency = files70_90['Predicted Class'].value_counts().sort_index() + complete_index = pd.Index(range(10)) + frequency = frequency.reindex(complete_index, fill_value=0) + # print(frequency, 'in the frequency') - files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)] - frequency = files70_90['Predicted Class'].value_counts().sort_index() - complete_index = pd.Index(range(10)) - frequency = frequency.reindex(complete_index, fill_value=0) + all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index() + frequency_with_names = all_frequency.rename(class_names) + # print(frequency_with_names, 'with name') - all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index() - frequency_with_names = all_frequency.rename(class_names) + avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index() + all_classes = pd.DataFrame({'Predicted Class': range(1, 10)}) + avg_probability = pd.merge(all_classes, avg_probability, on='Predicted Class', how='left') + avg_probability['Prediction Probability'].fillna(0, inplace=True) + avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names) + average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability'])) + # print(average_probability_dict, "avg is here ") - avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index() - all_classes = pd.DataFrame({'Predicted Class': range(1, 10)}) - avg_probability = pd.merge(all_classes, avg_probability, on='Predicted Class', how='left') - avg_probability['Prediction Probability'].fillna(0, inplace=True) - avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names) - average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability'])) + file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt') + data = None + try: + with open(file_path, 'r') as file: + data = file.readlines()[::-1] # Reverse lines for latest logs + except: + pass - file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt') - data = None - try: - with open(file_path, 'r') as file: - data = file.readlines()[::-1] # Reverse lines for latest logs - except: - pass - - return render(request, 'malware/malware.html', { - 'files_list': files_list, - 'frequency': frequency.to_dict(), - 'class_frequency': frequency_with_names.to_dict(), - 'average': average_probability_dict, - "logs": data, - 'message': None # Clear message if data is available - }) + return render(request, 'malware/malware.html', {'files_list': files_list, 'frequency': frequency.to_dict(), 'class_frequency': frequency_with_names.to_dict(), 'average': average_probability_dict, "logs": data}) # def malware(request): -# combined_data = get_combined_files() +# print("Inside the ddos view...") +# device_id = request.GET.get('device_id', None) +# print(f"Device ID from headers: {device_id}") + +# if not device_id: +# print("No device ID provided in headers. Fetching the latest device for the logged-in user.") +# recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() +# if recent_device: +# device_id = recent_device.id +# print(f"Fetched recent device ID: {device_id}") +# else: +# print("No devices found for the logged-in user.") + +# # Fetch combined data +# combined_data = get_combined_files(device_id) + +# # If the data is empty, show a message +# if combined_data.empty: +# message = "Data is still being captured. Please try again later." +# return render(request, 'malware/malware.html', {'message': message}) + # class_names = { # 1: "Ramnit", # 2: "Lollipop", @@ -795,25 +1434,17 @@ def malware(request): # 8: "Obfuscator.ACY", # 9: "Gatak" # } - # high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9] # files_list = high_probability_files['File'].tolist() - # files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)] # frequency = files70_90['Predicted Class'].value_counts().sort_index() # complete_index = pd.Index(range(10)) # frequency = frequency.reindex(complete_index, fill_value=0) -# print(frequency,'in the frequency') -# # if frequency: -# # print("Check_malware_frequency") - # all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index() # frequency_with_names = all_frequency.rename(class_names) -# print(frequency_with_names,'with name') - # avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index() # all_classes = pd.DataFrame({'Predicted Class': range(1, 10)}) @@ -821,8 +1452,7 @@ def malware(request): # avg_probability['Prediction Probability'].fillna(0, inplace=True) # avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names) # average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability'])) -# print(average_probability_dict,"avg is here ") - + # file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt') # data = None # try: @@ -831,11 +1461,19 @@ def malware(request): # except: # pass - - -# return render(request, 'malware/malware.html', {'files_list': files_list , 'frequency' : frequency.to_dict() , 'class_frequency' : frequency_with_names.to_dict() , 'average' : average_probability_dict ,"logs":data}) +# return render(request, 'malware/malware.html', { +# 'files_list': files_list, +# 'frequency': frequency.to_dict(), +# 'class_frequency': frequency_with_names.to_dict(), +# 'average': average_probability_dict, +# "logs": data, +# 'message': None # Clear message if data is available +# }) + @never_cache + + def bye_asm_log(request): space_name = 'Extract' object_key = 'extract.log' @@ -866,12 +1504,11 @@ def bye_asm_log(request): return JsonResponse(live_data) - +# @csrf_exempt # @login_required(login_url='login') # @never_cache # def ransomware(request): - -# file_path = 'media/logs/usage_log.txt' +# file_path = os.path.join('media', 'logs', 'usage_log.txt') # cpu_data = [] # memory_data = [] @@ -886,40 +1523,118 @@ def bye_asm_log(request): # for line in lines: # # Parse CPU and memory usage from each line # parts = line.strip().split(",") -# cpu_usage = parts[0] -# memory_usage = parts[1] -# cpu_data.append(cpu_usage) -# memory_data.append(memory_usage) +# if len(parts) >= 2: +# cpu_usage = parts[0] +# memory_usage = parts[1] +# cpu_data.append(cpu_usage) +# memory_data.append(memory_usage) +# else: +# print(f"Skipping malformed line: {line}") +# else: +# print(f"Usage log file not found at path: {file_path}") - +# device_id = request.GET.get('device_id', None) +# # device_id=53 +# print(f"Device ID from headers: {device_id}") +# # If no device_id is found in the request, get the latest device for the logged-in user +# if not device_id: +# print("No device ID found. Fetching the latest device for the logged-in user.") +# recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() +# if recent_device: +# device_id = recent_device.id # Use the actual device ID from the database +# print(f"Fetched recent device ID: {device_id}") +# else: +# print("No devices found for the logged-in user.") +# return JsonResponse({'error': 'No devices found for the logged-in user'}, status=404) +# # Construct file paths correctly using os.path.join +# csv_file_path = os.path.join('media', 'ransomware_predictions', str(device_id), 'latest_ransomware_type.csv') +# mapping_file_path = os.path.join('media', 'ransomware_predictions', 'mapping_win.txt') +# yes_no_path = os.path.join('media', 'ransomware_predictions', 'ransomware.csv') -# csv_file_path = 'media/ransomware_predictions/latest_ransomware_type.csv' # Replace with your actual CSV file path -# df = pd.read_csv(csv_file_path) -# mapping_file_path = 'media/ransomware_predictions/mapping_win.txt' -# mapping_df = pd.read_csv(mapping_file_path, header=None, names=['predicted_class', 'class_name']) -# class_mapping = dict(zip(mapping_df['predicted_class'], mapping_df['class_name'])) -# df['class_name'] = df['predicted_class'].map(class_mapping) -# class_frequency = df['class_name'].value_counts() -# all_classes_df = pd.DataFrame({'class_name': mapping_df['class_name']}) -# all_classes_df['frequency'] = all_classes_df['class_name'].map(class_frequency).fillna(0).astype(int) -# class_frequency_dict = dict(zip(all_classes_df['class_name'], all_classes_df['frequency'])) +# # Debugging: Print the file paths +# print(f"CSV file path: {csv_file_path}") +# print(f"Mapping file path: {mapping_file_path}") +# print(f"Yes/No file path: {yes_no_path}") -# yes_no_path = 'media/ransomware_predictions/ransomware.csv' +# # Initialize variables to hold processed data +# class_frequency_dict = {} +# flag = None +# time = None -# # Reading the CSV file into a DataFrame -# yes_no = pd.read_csv(yes_no_path) +# # Process the latest ransomware type CSV +# try: +# if not os.path.exists(csv_file_path): +# raise FileNotFoundError(f"CSV file not found at path: {csv_file_path}") -# # # Extracting the value of 'Predicted Label' -# flag =yes_no[yes_no.columns[-1]].iloc[0] -# time = yes_no[yes_no.columns[-2]].iloc[0] - +# # Load ransomware type CSV +# df = pd.read_csv(csv_file_path) +# print(f"Loaded ransomware type CSV: {csv_file_path}") +# # Load mapping file +# if not os.path.exists(mapping_file_path): +# raise FileNotFoundError(f"Mapping file not found at path: {mapping_file_path}") - -# return render(request, 'ransomware/ransomware.html' , context={ 'type' : class_frequency_dict, 'cpu' : json.dumps(cpu_data) , 'memory' : json.dumps(memory_data) , 'flag' : flag,'time' : time}) +# mapping_df = pd.read_csv(mapping_file_path, header=None, names=['predicted_class', 'class_name']) +# class_mapping = dict(zip(mapping_df['predicted_class'], mapping_df['class_name'])) +# print("Loaded mapping file and created class mapping dictionary.") + +# # Map predicted classes to class names +# df['class_name'] = df['predicted_class'].map(class_mapping) +# class_frequency = df['class_name'].value_counts() + +# # Ensure all classes from mapping are present in the frequency dictionary +# all_classes_df = pd.DataFrame({'class_name': mapping_df['class_name']}) +# all_classes_df['frequency'] = all_classes_df['class_name'].map(class_frequency).fillna(0).astype(int) +# class_frequency_dict = dict(zip(all_classes_df['class_name'], all_classes_df['frequency'])) + +# print(f"Class frequency dictionary: {class_frequency_dict}") + +# except FileNotFoundError as e: +# print(f"FileNotFoundError: {str(e)}") +# return JsonResponse({'error': str(e)}, status=404) +# except Exception as e: +# print(f"Exception while processing ransomware type CSV: {str(e)}") +# return JsonResponse({'error': f"Error processing ransomware type CSV: {str(e)}"}, status=500) + +# # Process the ransomware flag CSV +# try: +# if not os.path.exists(yes_no_path): +# raise FileNotFoundError(f"Ransomware CSV file not found at path: {yes_no_path}") + +# # Load ransomware flag CSV +# yes_no = pd.read_csv(yes_no_path) +# print('Loaded ransomware flag CSV:', yes_no) + +# if yes_no.empty: +# raise ValueError("Ransomware CSV file is empty.") + +# # Extracting the value of 'Predicted Label' and 'Time' +# flag = yes_no.iloc[0, -1] # Assuming 'Predicted Label' is the last column +# time = yes_no.iloc[0, -2] # Assuming 'Time' is the second last column + +# print(f"Extracted flag: {flag}, time: {time}") + +# except FileNotFoundError as e: +# print(f"FileNotFoundError: {str(e)}") +# return JsonResponse({'error': str(e)}, status=404) +# except Exception as e: +# print(f"Exception while processing ransomware flag CSV: {str(e)}") +# return JsonResponse({'error': f"Error processing ransomware flag CSV: {str(e)}"}, status=500) + +# # Prepare context for rendering the template +# context = { +# 'type': class_frequency_dict, +# 'cpu': json.dumps(cpu_data), +# 'memory': json.dumps(memory_data), +# 'flag': flag, +# 'time': time +# } + +# return render(request, 'ransomware/ransomware.html', context=context) +@csrf_exempt @login_required(login_url='login') @never_cache def ransomware(request): @@ -1051,7 +1766,6 @@ def ransomware(request): return render(request, 'ransomware/ransomware.html', context=context) - #================================================================================================== import time @@ -1572,7 +2286,7 @@ def generate_random_values(request): # response = s3.get_object(Bucket=space_name, Key=object_key) # content = response['Body'].read().decode('utf-8') -# # Return the content as a JSON response +# # Return the content as a JSON response1 # return JsonResponse({"log_content": content}) # except Exception as e: # return JsonResponse({"error": str(e)}, status=500) @@ -1662,6 +2376,7 @@ class SqlStatusView(APIView): # } # return JsonResponse(response_data) + from django.http import JsonResponse import boto3 from botocore.exceptions import NoCredentialsError, ClientError @@ -1704,8 +2419,6 @@ def sql_status_info(request): return JsonResponse(response_data) - - @csrf_exempt def restore_database(request): if request.method == "POST": @@ -1758,7 +2471,7 @@ def check_restore_value1(request): 'mysql': mysql, } return JsonResponse(response_data) - +from datetime import datetime @api_view(['POST']) def upload_csv(request): # Check if the request contains a file @@ -1999,8 +2712,9 @@ def malware_ASM_predictions_KNeighbours(request): return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) - +from datetime import datetime @api_view(['POST']) + def upload_logs(request): log_file = request.FILES.get('file') @@ -2021,22 +2735,119 @@ def upload_logs(request): save_path = os.path.join(folder_path, 'logs.txt') # If the file already exists, remove it to ensure overwriting - try: - if os.path.exists(save_path): - os.remove(save_path) - except Exception as e: - print(f"warning: {e}") - + if os.path.exists(save_path): + os.remove(save_path) # Save the new file - with open(save_path, 'w') as destination: + with open(save_path, 'wb+') as destination: for chunk in log_file.chunks(): - destination.write(f'{datetime.now()} - {chunk}') + destination.write(chunk) return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) +# @api_view(['POST']) +# def ransomware_predictions(request): +# csv_file = request.FILES.get('file') +# if not csv_file: +# return JsonResponse({'error': 'No file provided'}, status=400) +# if not csv_file.name.endswith('.csv'): +# return JsonResponse({'error': 'File is not CSV'}, status=400) + +# # Define the directory and file path where the CSV will be stored +# # folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions') + +# # # Make sure the directory exists +# # if not os.path.exists(folder_path): +# # os.makedirs(folder_path) + +# # # Define the path for the file (always named 'latest_ransomware.csv') +# # save_path = os.path.join(folder_path, 'latest_ransomware.csv') + +# # # If the file already exists, remove it to ensure overwriting +# # if os.path.exists (save_path): +# # os.remove(save_path) + +# # # Save the new file +# # with open(save_path, 'wb+') as destination: +# # for chunk in csv_file.chunks(): +# # destination.write(chunk) +# # user_id = request.data.get('user_id') +# user_id = request.data.get('user_id') + +# if not user_id: +# return JsonResponse({'error': 'User ID is required'}, status=400) + +# try: +# # Retrieve the UserProfile based on the provided user_id +# user_profile = UserProfile.objects.get(user__id=user_id) +# print(user_profile) + +# # Get the device IDs associated with the user +# device_ids = get_device_ids_by_user_id(user_id) +# print(f"Device IDs: {device_ids}") + +# # Check if the user has devices associated with them +# if not device_ids: +# return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) + +# # Assuming we want to use the first device associated with the user +# device = Devices.objects.get(id=device_ids[-1]) +# print(f"Device ID: {device.id}") +# folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions', str(device.id)) +# # folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions') +# if not os.path.exists(folder_path): +# os.makedirs(folder_path) + +# # Define the path for the file (always named 'latest_ransomware.csv') +# save_path = os.path.join(folder_path, 'latest_ransomware.csv') + +# # If the file already exists, remove it to ensure overwriting +# if os.path.exists(save_path): +# os.remove(save_path) + +# # Save the new file +# with open(save_path, 'wb+') as destination: +# for chunk in csv_file.chunks(): +# destination.write(chunk) + +# # if not user_id: +# # return JsonResponse({'error': 'User ID is required'}, status=400) +# # try: +# # # Retrieve the UserProfile based on the provided user_id +# # user_profile = UserProfile.objects.get(user__id=user_id) +# # print(user_profile) + +# # # Get the device IDs associated with the user +# # device_ids = get_device_ids_by_user_id(user_id) +# # print(f"Device IDs: {device_ids}") + +# # # Check if the user has devices associated with them +# # if not device_ids: +# # return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) + +# # # Assuming we want to use the first device associated with the user +# # device = Devices.objects.get(id=device_ids[-1]) +# # print(f"Device ID: {device.id}") + +# # Create the DdosPrediction record +# rensomware_audit_prediction = Rensomware_AuditPrediction.objects.create( +# device=device, +# user=user_profile, +# file_path=save_path +# ) + +# return JsonResponse({ +# 'message': 'File uploaded and prediction saved successfully', +# 'file_path': save_path, +# 'prediction_id': rensomware_audit_prediction.id +# }) + +# except UserProfile.DoesNotExist: +# return JsonResponse({'error': 'User not found'}, status=404) +# except Devices.DoesNotExist: +# return JsonResponse({'error': 'Device not found'}, status=404) @api_view(['POST']) def ransomware_predictions(request): csv_file = request.FILES.get('file') @@ -2047,62 +2858,62 @@ def ransomware_predictions(request): if not csv_file.name.endswith('.csv'): return JsonResponse({'error': 'File is not CSV'}, status=400) - # Define the directory and file path where the CSV will be stored - folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions') - - # Make sure the directory exists - if not os.path.exists(folder_path): - os.makedirs(folder_path) + user_id = request.data.get('user_id') - # Define the path for the file (always named 'latest_ransomware.csv') - save_path = os.path.join(folder_path, 'latest_ransomware.csv') + if not user_id: + return JsonResponse({'error': 'User ID is required'}, status=400) - # If the file already exists, remove it to ensure overwriting - if os.path.exists(save_path): - os.remove(save_path) + try: + # Retrieve the UserProfile based on the provided user_id + user_profile = UserProfile.objects.get(user__id=user_id) + print(user_profile) - # Save the new file - with open(save_path, 'wb+') as destination: - for chunk in csv_file.chunks(): - destination.write(chunk) + # Get the device IDs associated with the user + device_ids = get_device_ids_by_user_id(user_id) + print(f"Device IDs: {device_ids}") - return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) + # Check if the user has devices associated with them + if not device_ids: + return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) -# @api_view(['POST']) -# def ransomware_type_predictions(request): -# try: - -# csv_file = request.FILES.get('file') + # Assuming we want to use the last device associated with the user + device = Devices.objects.get(id=device_ids[-1]) + print(f"Device ID: {device.id}") -# if not csv_file: -# return JsonResponse({'error': 'No file provided'}, status=400) + # Define the directory for storing the file + folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions', str(device.id)) + if not os.path.exists(folder_path): + os.makedirs(folder_path) -# if not csv_file.name.endswith('.csv'): -# return JsonResponse({'error': 'File is not CSV'}, status=400) + # Define the path for the file (always named 'latest_ransomware.csv') + save_path = os.path.join(folder_path, 'latest_ransomware.csv') -# # Define the directory and file path where the CSV will be stored -# folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions') - -# # Make sure the directory exists -# if not os.path.exists(folder_path): -# os.makedirs(folder_path) + # If the file already exists, remove it to ensure overwriting + if os.path.exists(save_path): + os.remove(save_path) -# # Define the path for the file (always named 'latest_ransomware.csv') -# save_path = os.path.join(folder_path, 'latest_ransomware_type.csv') + # Save the new file + with open(save_path, 'wb+') as destination: + for chunk in csv_file.chunks(): + destination.write(chunk) -# # If the file already exists, remove it to ensure overwriting -# if os.path.exists(save_path): -# os.remove(save_path) + # Create the Rensomware_AuditPrediction record + rensomware_audit_prediction = Rensomware_AuditPrediction.objects.create( + device=device, + user=user_profile, + file_path=save_path + ) -# # Save the new file -# with open(save_path, 'wb+') as destination: -# for chunk in csv_file.chunks(): -# destination.write(chunk) - -# return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) -# except Exception as e: -# print(e) + return JsonResponse({ + 'message': 'File uploaded and prediction saved successfully', + 'file_path': save_path, + 'prediction_id': rensomware_audit_prediction.id + }) + except UserProfile.DoesNotExist: + return JsonResponse({'error': 'User not found'}, status=404) + except Devices.DoesNotExist: + return JsonResponse({'error': 'Device not found'}, status=404) @api_view(['POST']) def ransomware_type_predictions(request): @@ -2194,7 +3005,40 @@ def ransomware_type_predictions(request): except Devices.DoesNotExist: return JsonResponse({'error': 'Device not found'}, status=404) - + + +# @api_view(['POST']) +# def ddos_predictions(request): +# csv_file = request.FILES.get('file') + +# if not csv_file: +# return JsonResponse({'error': 'No file provided'}, status=400) + +# if not csv_file.name.endswith('.csv'): +# return JsonResponse({'error': 'File is not CSV'}, status=400) + +# # Define the directory and file path where the CSV will be stored +# folder_path = os.path.join(settings.MEDIA_ROOT, 'ddos_predictions') + +# # Make sure the directory exists +# if not os.path.exists(folder_path): +# os.makedirs(folder_path) + +# # Define the path for the file (always named 'latest_ransomware.csv') +# save_path = os.path.join(folder_path, 'predictions.csv') + +# # If the file already exists, remove it to ensure overwriting +# if os.path.exists(save_path): +# os.remove(save_path) + +# # Save the new file +# with open(save_path, 'wb+') as destination: +# for chunk in csv_file.chunks(): +# destination.write(chunk) + + +# return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) + def get_device_ids_by_user_id(user_id): try: # Get the UserProfile instance using the user ID @@ -2228,7 +3072,7 @@ def get_device_ids_by_user_id(user_id): # if not os.path.exists(folder_path): # os.makedirs(folder_path) -# # Define the path for the file (always named 'latest_ransomware.csv') +# # Define the path for the file (always named 'predictions.csv') # save_path = os.path.join(folder_path, 'predictions.csv') # # If the file already exists, remove it to ensure overwriting @@ -2240,7 +3084,113 @@ def get_device_ids_by_user_id(user_id): # for chunk in csv_file.chunks(): # destination.write(chunk) -# return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) +# # Extract user_id from the request (device_id is not needed now) +# user_id = request.data.get('user_id') + +# if not user_id: +# return JsonResponse({'error': 'User ID is required'}, status=400) + +# try: +# # Retrieve the UserProfile based on the provided user_id +# user_profile = UserProfile.objects.get(user__id=user_id) +# print(user_profile) + +# # Get the device IDs associated with the user +# device_ids = get_device_ids_by_user_id(user_id) +# print(f"Device IDs: {device_ids}") + +# # Check if the user has devices associated with them +# if not device_ids: +# return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) + +# # Assuming we want to use the first device associated with the user +# device = Devices.objects.get(id=device_ids[0]) +# print(f"Device ID: {device.id}") + +# # Create the DdosPrediction record +# ddos_prediction = DdosPrediction.objects.create( +# device=device, +# user=user_profile, +# file_path=save_path +# ) + +# return JsonResponse({ +# 'message': 'File uploaded and prediction saved successfully', +# 'file_path': save_path, +# 'prediction_id': ddos_prediction.id +# }) + +# except UserProfile.DoesNotExist: +# return JsonResponse({'error': 'User not found'}, status=404) +# except Devices.DoesNotExist: +# return JsonResponse({'error': 'Device not found'}, status=404) + +# @api_view(['POST']) +# def ddos_predictions(request): +# # Check if a file is provided in the request +# csv_file = request.FILES.get('file') +# if not csv_file: +# return JsonResponse({'error': 'No file provided'}, status=400) + +# # Ensure the file is a CSV +# if not csv_file.name.endswith('.csv'): +# return JsonResponse({'error': 'File is not CSV'}, status=400) + +# # Extract user_id from the request data +# user_id = request.data.get('user_id') +# device_ids = get_device_ids_by_user_id(user_id) +# print(f"Device IDs: {device_ids}") + +# # Check if the user has associated devices +# if not device_ids: +# return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) + +# try: +# # Retrieve the UserProfile for the logged-in user +# user_profile = UserProfile.objects.get(user__id=user_id) +# print('userrr',user_profile) + + +# # Get the most recent device associated with the user +# device_ids = get_device_ids_by_user_id(user_id) +# print('deviceeeee',device_ids) +# if not device_ids: +# return JsonResponse({'error': 'No devices found for the logged-in user'}, status=400) + +# device = Devices.objects.get(id=device_ids[-1]) +# print(f"Device ID: {device.id}") +# # Define the path for saving the file under 'ddos_predictions' +# folder_path = os.path.join(settings.MEDIA_ROOT, 'ddos_predictions') +# os.makedirs(folder_path, exist_ok=True) + +# # Create a unique file name for the recent device +# save_path = os.path.join(folder_path, f'prediction.csv') + +# # Save the file in chunks +# with open(save_path, 'wb+') as destination: +# for chunk in csv_file.chunks(): +# destination.write(chunk) + +# # Create a DdosPrediction record +# ddos_prediction = DdosPrediction.objects.create( +# device=device, +# user=user_profile, +# file_path=save_path +# ) + +# # Return a success response +# return JsonResponse({ +# 'message': 'File uploaded and prediction saved successfully', + +# 'file_path': save_path, +# 'prediction_id': ddos_prediction.id +# }) + +# except UserProfile.DoesNotExist: +# return JsonResponse({'error': 'User profile not found'}, status=404) +# except Exception as e: +# return JsonResponse({'error': f'An unexpected error occurred: {str(e)}'}, status=500) +#===================with unique file path=============================================== from rest_framework.permissions import AllowAny from rest_framework.decorators import permission_classes @@ -2306,6 +3256,7 @@ def ddos_predictions(request): except Exception as e: return JsonResponse({'error': f'An unexpected error occurred: {str(e)}'}, status=500) + @api_view(['POST']) def usage_log(request): try: