this is multiple device support log
This commit is contained in:
parent
b4344e3dbd
commit
889d7471b7
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,46 @@
|
||||
# Generated by Django 5.1.3 on 2024-12-10 09:51
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('Accounts', '0003_userprofile_company_name'),
|
||||
('Dashboard', '0005_restoredatabase'),
|
||||
('Device', '0007_devices_mac_address_devices_unique_id'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='DdosPrediction',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('file_path', models.FileField(upload_to='ddos_predictions/')),
|
||||
('uploaded_at', models.DateTimeField(auto_now_add=True)),
|
||||
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Device.devices')),
|
||||
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Accounts.userprofile')),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='Rensomware_AuditPrediction',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('file_path', models.FileField(max_length=555, upload_to='ransomware_predictions/')),
|
||||
('uploaded_at', models.DateTimeField(auto_now_add=True)),
|
||||
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Device.devices')),
|
||||
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Accounts.userprofile')),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='Rensomware_TypePrediction',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('file_path', models.FileField(max_length=555, upload_to='ransomware_predictions/')),
|
||||
('uploaded_at', models.DateTimeField(auto_now_add=True)),
|
||||
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Device.devices')),
|
||||
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Accounts.userprofile')),
|
||||
],
|
||||
),
|
||||
]
|
||||
Binary file not shown.
@ -1,5 +1,7 @@
|
||||
from django.db import models
|
||||
|
||||
from Accounts .models import UserProfile
|
||||
from Device.models import Devices
|
||||
# Create your models here.
|
||||
class Status(models.Model):
|
||||
number = models.CharField(max_length=15)
|
||||
@ -24,3 +26,34 @@ class RestoreDatabase(models.Model):
|
||||
|
||||
def __str__(self):
|
||||
return f"Number (ID: {self.id}, Status: {self.value})"
|
||||
|
||||
|
||||
|
||||
class DdosPrediction(models.Model):
|
||||
device = models.ForeignKey(Devices, on_delete=models.CASCADE)
|
||||
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) # Add this field to reference the user
|
||||
file_path = models.FileField(upload_to='ddos_predictions/')
|
||||
uploaded_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
def __str__(self):
|
||||
return f"Prediction for {self.device.device_name} by {self.user.user.username} at {self.uploaded_at}"
|
||||
|
||||
class Rensomware_TypePrediction(models.Model):
|
||||
device = models.ForeignKey(Devices, on_delete=models.CASCADE)
|
||||
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) # Add this field to reference the user
|
||||
file_path = models.FileField(upload_to='ransomware_predictions/', max_length=555)
|
||||
uploaded_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
def __str__(self):
|
||||
return f"Prediction for {self.device.device_name} by {self.user.user.username} at {self.uploaded_at}"
|
||||
|
||||
class Rensomware_AuditPrediction(models.Model):
|
||||
device = models.ForeignKey(Devices, on_delete=models.CASCADE)
|
||||
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE) # Add this field to reference the user
|
||||
file_path = models.FileField(upload_to='ransomware_predictions/',max_length=555)
|
||||
uploaded_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
def __str__(self):
|
||||
return f"Prediction for {self.device.device_name} by {self.user.user.username} at {self.uploaded_at}"
|
||||
|
||||
|
||||
@ -47,7 +47,7 @@ urlpatterns = [
|
||||
# DDoS
|
||||
|
||||
path('generate/', generate_random_values, name='generate_random_values'),
|
||||
path('fetch_ddos_value/', fetch_ddos_value, name='fetch_ddos_value'),
|
||||
path('ddos/fetch_ddos_value/', fetch_ddos_value, name='fetch_ddos_value'),
|
||||
|
||||
#shadow script
|
||||
path('status1/', views.get_number_status1, name='get_number_status1'),
|
||||
|
||||
@ -12,6 +12,7 @@ from django.conf import settings
|
||||
import re
|
||||
import random
|
||||
from cryptography.fernet import Fernet
|
||||
from django.template.loader import render_to_string
|
||||
from django.http import JsonResponse
|
||||
from Accounts.models import UserProfile
|
||||
from django.contrib.auth.models import User
|
||||
@ -33,6 +34,7 @@ import csv
|
||||
from rest_framework.decorators import api_view
|
||||
from django.core.files.storage import default_storage
|
||||
from django.core.files.base import ContentFile
|
||||
from Device .models import Devices
|
||||
import pandas as pd
|
||||
import time
|
||||
|
||||
@ -70,12 +72,119 @@ def navbar(request):
|
||||
#===========================================================================================
|
||||
@never_cache
|
||||
@login_required(login_url='login')
|
||||
# def ddos(request):
|
||||
|
||||
# file_path = 'media/ddos_predictions/predictions.csv'
|
||||
# data = pd.read_csv(file_path)
|
||||
|
||||
# # Create a mapping for protocol names to their short forms
|
||||
# protocol_mapping = {
|
||||
# "Protocol_ICMP": "ICMP",
|
||||
# "Protocol_TCP": "TCP",
|
||||
# "Protocol_UDP": "UDP",
|
||||
# "Protocol_HTTP": "HTTP",
|
||||
# "Protocol_HTTPS": "HTTPS",
|
||||
# "Protocol_SSH": "SSH",
|
||||
# "Protocol_DHCP": "DHCP",
|
||||
# "Protocol_FTP": "FTP",
|
||||
# "Protocol_SMTP": "SMTP",
|
||||
# "Protocol_POP3": "POP3",
|
||||
# "Protocol_IMAP": "IMAP",
|
||||
# "Protocol_DNS": "DNS"
|
||||
# }
|
||||
|
||||
|
||||
|
||||
# ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur']
|
||||
# ddos_sums = {col: int(data[col].sum()) for col in ddos_columns}
|
||||
# ddos_sums['byteperflow'] /= 15
|
||||
# ddos_sums['tot_kbps'] /= 15
|
||||
|
||||
# src_ip_counts = data['src_ip'].value_counts()
|
||||
# src_ip_dict = src_ip_counts.to_dict()
|
||||
|
||||
|
||||
# dest_ip_counts = data['dst_ip'].value_counts()
|
||||
# dest_ip_dict = dest_ip_counts.to_dict()
|
||||
|
||||
|
||||
# protocol_columns = data.columns[7:19]
|
||||
# protocol_counts = {}
|
||||
# for protocol in protocol_columns:
|
||||
# short_form = protocol_mapping.get(protocol, protocol) # Default to the original name if not found
|
||||
# protocol_counts[short_form] = int((data[protocol] == 1).sum())
|
||||
# print(protocol_counts)
|
||||
|
||||
# filtered_data = data[data['probability'] > 0.9]
|
||||
# src_ip_counts2 = filtered_data['src_ip'].value_counts()
|
||||
# src_ip_dict2 = src_ip_counts2.to_dict()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# return render(request, 'ddos/ddos.html',{'ddos_sums': ddos_sums,'src_ip_dict' : src_ip_dict , 'dest_ip_dict' : dest_ip_dict , 'protocol_counts' : protocol_counts,'src_ip_dict2' : src_ip_dict2})
|
||||
@login_required(login_url='login')
|
||||
def ddos(request):
|
||||
print("Inside the ddos view...")
|
||||
|
||||
file_path = 'media/ddos_predictions/predictions.csv'
|
||||
# Attempt to get the device_id from request headers
|
||||
device_id = request.GET.get('device_id', None)
|
||||
print(f"Device ID from headers: {device_id}")
|
||||
|
||||
# If device_id is not provided in headers, fetch the latest device for the logged-in user
|
||||
if not device_id:
|
||||
print("No device ID provided in headers. Fetching the latest device for the logged-in user.")
|
||||
recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first()
|
||||
if recent_device:
|
||||
device_id = recent_device.id
|
||||
print(f"Fetched recent device ID: {device_id}")
|
||||
else:
|
||||
print("No devices found for the logged-in user.")
|
||||
return JsonResponse({'error': 'No devices found for the logged-in user'}, status=404)
|
||||
|
||||
# Convert device_id to integer
|
||||
try:
|
||||
device_id = int(device_id)
|
||||
print(f"Using device ID: {device_id}")
|
||||
except ValueError:
|
||||
return JsonResponse({'error': 'Invalid device ID'}, status=400)
|
||||
|
||||
# Retrieve the device and its pod
|
||||
try:
|
||||
device = Devices.objects.get(id=device_id)
|
||||
device_pod = device.pod
|
||||
print(f"Device Pod: {device_pod}")
|
||||
except Devices.DoesNotExist:
|
||||
return JsonResponse({'error': f"Device with ID {device_id} not found"}, status=404)
|
||||
|
||||
# Construct the file path for the device's DDoS prediction CSV file
|
||||
file_path = os.path.join('media', 'ddos_predictions', str(device_id), 'predictions.csv')
|
||||
# file_path = os.path.join('media', 'ddos_predictions', 'predictions.csv')
|
||||
print(f"Constructed file path: {file_path}")
|
||||
|
||||
# Check if the file exists
|
||||
if not os.path.exists(file_path):
|
||||
print(f"File not found at path: {file_path}")
|
||||
return JsonResponse({'error': f"File not found for device ID {device_id}"}, status=404)
|
||||
|
||||
# Attempt to read the CSV file
|
||||
try:
|
||||
data = pd.read_csv(file_path)
|
||||
print(f"Data loaded successfully. First rows:\n{data.head()}")
|
||||
except pd.errors.EmptyDataError:
|
||||
print(f"CSV file is empty: {file_path}")
|
||||
return JsonResponse({'error': 'CSV file is empty'}, status=400)
|
||||
except Exception as e:
|
||||
print(f"Unexpected error reading CSV: {e}")
|
||||
return JsonResponse({'error': 'Error reading the CSV file'}, status=500)
|
||||
|
||||
# Create a mapping for protocol names to their short forms
|
||||
# Process the CSV data
|
||||
protocol_mapping = {
|
||||
"Protocol_ICMP": "ICMP",
|
||||
"Protocol_TCP": "TCP",
|
||||
@ -91,8 +200,6 @@ def ddos(request):
|
||||
"Protocol_DNS": "DNS"
|
||||
}
|
||||
|
||||
|
||||
|
||||
ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur']
|
||||
ddos_sums = {col: int(data[col].sum()) for col in ddos_columns}
|
||||
ddos_sums['byteperflow'] /= 15
|
||||
@ -101,33 +208,38 @@ def ddos(request):
|
||||
src_ip_counts = data['src_ip'].value_counts()
|
||||
src_ip_dict = src_ip_counts.to_dict()
|
||||
|
||||
|
||||
dest_ip_counts = data['dst_ip'].value_counts()
|
||||
dest_ip_dict = dest_ip_counts.to_dict()
|
||||
|
||||
|
||||
protocol_columns = data.columns[7:19]
|
||||
protocol_counts = {}
|
||||
for protocol in protocol_columns:
|
||||
short_form = protocol_mapping.get(protocol, protocol) # Default to the original name if not found
|
||||
short_form = protocol_mapping.get(protocol, protocol)
|
||||
protocol_counts[short_form] = int((data[protocol] == 1).sum())
|
||||
print(protocol_counts)
|
||||
|
||||
# Filter data where the probability is above 0.9
|
||||
filtered_data = data[data['probability'] > 0.9]
|
||||
src_ip_counts2 = filtered_data['src_ip'].value_counts()
|
||||
src_ip_dict2 = src_ip_counts2.to_dict()
|
||||
|
||||
# Return the response with the DDoS data and device pod
|
||||
try:
|
||||
rendered_html = render_to_string('ddos/ddos.html', {
|
||||
'ddos_sums': ddos_sums,
|
||||
'src_ip_dict': src_ip_dict,
|
||||
'dest_ip_dict': dest_ip_dict,
|
||||
'protocol_counts': protocol_counts,
|
||||
'src_ip_dict2': src_ip_dict2,
|
||||
'device_pod': device_pod, # Include device pod in the context
|
||||
})
|
||||
|
||||
return HttpResponse(rendered_html, status=200)
|
||||
except Exception as e:
|
||||
print(f"Error processing data: {e}")
|
||||
return JsonResponse({'error': 'Error processing DDoS data'}, status=500)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
return render(request, 'ddos/ddos.html',{'ddos_sums': ddos_sums,'src_ip_dict' : src_ip_dict , 'dest_ip_dict' : dest_ip_dict , 'protocol_counts' : protocol_counts,'src_ip_dict2' : src_ip_dict2})
|
||||
|
||||
#================================================================================
|
||||
|
||||
@never_cache
|
||||
@ -362,18 +474,121 @@ def processes_log(request):
|
||||
def dma(request):
|
||||
return render(request, 'dma/dma.html')
|
||||
|
||||
def get_combined_files():
|
||||
# def get_combined_files():
|
||||
|
||||
df1 = pd.read_csv('media/malware_predictions/bytes_predictions_KNeighborsClassifier.csv')
|
||||
df2 = pd.read_csv('media/malware_predictions/bytes_predictions_RandomForestClassifier.csv')
|
||||
df3 = pd.read_csv('media/malware_predictions/latest_malware_bytes_predictions_SGD.csv')
|
||||
df4 = pd.read_csv('media/malware_predictions/latest_malware_bytes_predictions_XGB.csv')
|
||||
# df1 = pd.read_csv('media/malware_predictions/bytes_predictions_KNeighborsClassifier.csv')
|
||||
# df2 = pd.read_csv('media/malware_predictions/bytes_predictions_RandomForestClassifier.csv')
|
||||
# df3 = pd.read_csv('media/malware_predictions/latest_malware_bytes_predictions_SGD.csv')
|
||||
# df4 = pd.read_csv('media/malware_predictions/latest_malware_bytes_predictions_XGB.csv')
|
||||
|
||||
|
||||
# df1 = pd.read_csv('media/temp/bytes_predictions_KNeighborsClassifier.csv')
|
||||
# df2 = pd.read_csv('media/temp/bytes_predictions_RandomForestClassifier.csv')
|
||||
# df3 = pd.read_csv('media/temp/bytes_predictions_SGDClassifier.csv')
|
||||
# df4 = pd.read_csv('media/temp/bytes_predictions_XGBooster.csv')
|
||||
# # df1 = pd.read_csv('media/temp/bytes_predictions_KNeighborsClassifier.csv')
|
||||
# # df2 = pd.read_csv('media/temp/bytes_predictions_RandomForestClassifier.csv')
|
||||
# # df3 = pd.read_csv('media/temp/bytes_predictions_SGDClassifier.csv')
|
||||
# # df4 = pd.read_csv('media/temp/bytes_predictions_XGBooster.csv')
|
||||
|
||||
# # Step 2: Create a new DataFrame to hold combined results
|
||||
# combined_data1 = pd.DataFrame()
|
||||
|
||||
# # Step 3: Combine predictions
|
||||
# combined_data1['File'] = df1['File'] # Assuming all files are the same
|
||||
# combined_data1['Predicted Class'] = df1['Predicted Class'] # Placeholder
|
||||
# combined_data1['Prediction Probability'] = 0.0 # Initialize probability column
|
||||
# max_length = max(len(df1), len(df2), len(df3), len(df4))
|
||||
# # Step 4: Loop through each row and calculate the highest probability and average
|
||||
# # for i in range(len(df1)):
|
||||
# # # Get probabilities from all models
|
||||
# # probs = [
|
||||
# # df1['Prediction Probability'][i],
|
||||
# # df2['Prediction Probability'][i],
|
||||
# # df3['Prediction Probability'][i],
|
||||
# # df4['Prediction Probability'][i],
|
||||
# # ]
|
||||
|
||||
# # # Get predicted classes
|
||||
# # classes = [
|
||||
# # df1['Predicted Class'][i],
|
||||
# # df2['Predicted Class'][i],
|
||||
# # df3['Predicted Class'][i],
|
||||
# # df4['Predicted Class'][i],
|
||||
# # ]
|
||||
|
||||
# # # Find the index of the highest probability
|
||||
# # max_index = probs.index(max(probs))
|
||||
|
||||
# # # Set the highest predicted class
|
||||
# # combined_data1.at[i, 'Predicted Class'] = classes[max_index]
|
||||
|
||||
# # # Calculate the average probability
|
||||
# # combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs)
|
||||
|
||||
# for i in range(max_length):
|
||||
# probs, classes = [], []
|
||||
|
||||
# for df in [df1, df2, df3, df4]:
|
||||
# try:
|
||||
# probs.append(df['Prediction Probability'].iloc[i])
|
||||
# classes.append(df['Predicted Class'].iloc[i])
|
||||
# except IndexError:
|
||||
# # Skip if the row does not exist in this DataFrame
|
||||
# pass
|
||||
|
||||
# if probs and classes:
|
||||
# max_index = probs.index(max(probs))
|
||||
# combined_data1.at[i, 'Predicted Class'] = classes[max_index]
|
||||
# combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs)
|
||||
# df5 = pd.read_csv('media/malware_predictions/latest_malware_ASM_predictions_KNeighbours.csv')
|
||||
# df6 = pd.read_csv('media/malware_predictions/latest_malware_ASM_predictions_LogisticRegression.csv')
|
||||
# df7 = pd.read_csv('media/malware_predictions/latest_malware_ASM_predictions_RandomForest.csv')
|
||||
# df8 = pd.read_csv('media/malware_predictions/latest_malware_ASM_predictions_XGB.csv')
|
||||
|
||||
# combined_data2 = pd.DataFrame()
|
||||
|
||||
# # Step 3: Combine predictions
|
||||
# combined_data2['File'] = df5['File'] # Assuming all files are the same
|
||||
# combined_data2['Predicted Class'] = df5['Predicted Class'] # Placeholder
|
||||
# combined_data2['Prediction Probability'] = 0.0 # Initialize probability column
|
||||
|
||||
# # Step 4: Loop through each row and calculate the highest probability and average
|
||||
# for i in range(len(df5)):
|
||||
# # Get probabilities from all models
|
||||
# probs = [
|
||||
# df5['Prediction Probability'][i],
|
||||
# df6['Prediction Probability'][i],
|
||||
# df7['Prediction Probability'][i],
|
||||
# df8['Prediction Probability'][i],
|
||||
# ]
|
||||
|
||||
# # Get predicted classes
|
||||
# classes = [
|
||||
# df5['Predicted Class'][i],
|
||||
# df6['Predicted Class'][i],
|
||||
# df7['Predicted Class'][i],
|
||||
# df8['Predicted Class'][i],
|
||||
# ]
|
||||
|
||||
# # Find the index of the highest probability
|
||||
# max_index = probs.index(max(probs))
|
||||
|
||||
# # Set the highest predicted class
|
||||
# combined_data2.at[i, 'Predicted Class'] = classes[max_index]
|
||||
|
||||
# # Calculate the average probability
|
||||
# # combined_data2.at[i, 'Prediction Probability'] = sum(probs) / len(probs)
|
||||
# combined_data2.at[i,'Prediction Probability'] = probs[max_index]
|
||||
|
||||
|
||||
# combined_data = pd.concat([combined_data1, combined_data2], ignore_index=True)
|
||||
|
||||
# return combined_data
|
||||
|
||||
def get_combined_files(device_id):
|
||||
try:
|
||||
|
||||
df1 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_KNeighborsClassifier.csv')
|
||||
df2 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_RandomForestClassifier.csv')
|
||||
df3 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_SGDClassifier.csv')
|
||||
df4 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_XGBClassifier.csv')
|
||||
|
||||
# Step 2: Create a new DataFrame to hold combined results
|
||||
combined_data1 = pd.DataFrame()
|
||||
@ -425,10 +640,13 @@ def get_combined_files():
|
||||
max_index = probs.index(max(probs))
|
||||
combined_data1.at[i, 'Predicted Class'] = classes[max_index]
|
||||
combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs)
|
||||
df5 = pd.read_csv('media/malware_predictions/latest_malware_ASM_predictions_KNeighbours.csv')
|
||||
df6 = pd.read_csv('media/malware_predictions/latest_malware_ASM_predictions_LogisticRegression.csv')
|
||||
df7 = pd.read_csv('media/malware_predictions/latest_malware_ASM_predictions_RandomForest.csv')
|
||||
df8 = pd.read_csv('media/malware_predictions/latest_malware_ASM_predictions_XGB.csv')
|
||||
|
||||
df5 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_KNeighborsClassifier.csv')
|
||||
df6 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_LogisticRegression.csv')
|
||||
df7 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_RandomForestClassifier.csv')
|
||||
df8 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_XGBClassifier.csv')
|
||||
|
||||
|
||||
|
||||
combined_data2 = pd.DataFrame()
|
||||
|
||||
@ -436,45 +654,85 @@ def get_combined_files():
|
||||
combined_data2['File'] = df5['File'] # Assuming all files are the same
|
||||
combined_data2['Predicted Class'] = df5['Predicted Class'] # Placeholder
|
||||
combined_data2['Prediction Probability'] = 0.0 # Initialize probability column
|
||||
max_length = max(len(df5), len(df6), len(df7), len(df8))
|
||||
|
||||
# Step 4: Loop through each row and calculate the highest probability and average
|
||||
for i in range(len(df5)):
|
||||
# Get probabilities from all models
|
||||
probs = [
|
||||
df5['Prediction Probability'][i],
|
||||
df6['Prediction Probability'][i],
|
||||
df7['Prediction Probability'][i],
|
||||
df8['Prediction Probability'][i],
|
||||
]
|
||||
# for i in range(len(df5)):
|
||||
# # Get probabilities from all models
|
||||
# probs = [
|
||||
# df5['Prediction Probability'][i],
|
||||
# df6['Prediction Probability'][i],
|
||||
# df7['Prediction Probability'][i],
|
||||
# df8['Prediction Probability'][i],
|
||||
# ]
|
||||
|
||||
# Get predicted classes
|
||||
classes = [
|
||||
df5['Predicted Class'][i],
|
||||
df6['Predicted Class'][i],
|
||||
df7['Predicted Class'][i],
|
||||
df8['Predicted Class'][i],
|
||||
]
|
||||
# # Get predicted classes
|
||||
# classes = [
|
||||
# df5['Predicted Class'][i],
|
||||
# df6['Predicted Class'][i],
|
||||
# df7['Predicted Class'][i],
|
||||
# df8['Predicted Class'][i],
|
||||
# ]
|
||||
|
||||
# Find the index of the highest probability
|
||||
# # Find the index of the highest probability
|
||||
# max_index = probs.index(max(probs))
|
||||
|
||||
# # Set the highest predicted class
|
||||
# combined_data2.at[i, 'Predicted Class'] = classes[max_index]
|
||||
|
||||
# # Calculate the average probability
|
||||
# # combined_data2.at[i, 'Prediction Probability'] = sum(probs) / len(probs)
|
||||
# combined_data2.at[i,'Prediction Probability'] = probs[max_index]
|
||||
for i in range(max_length):
|
||||
probs, classes = [], []
|
||||
|
||||
for df in [df5, df6, df7, df8]:
|
||||
try:
|
||||
probs.append(df['Prediction Probability'].iloc[i])
|
||||
classes.append(df['Predicted Class'].iloc[i])
|
||||
except IndexError:
|
||||
# Skip if the row does not exist in this DataFrame
|
||||
pass
|
||||
|
||||
if probs and classes:
|
||||
max_index = probs.index(max(probs))
|
||||
|
||||
# Set the highest predicted class
|
||||
combined_data2.at[i, 'Predicted Class'] = classes[max_index]
|
||||
|
||||
# Calculate the average probability
|
||||
# combined_data2.at[i, 'Prediction Probability'] = sum(probs) / len(probs)
|
||||
combined_data2.at[i,'Prediction Probability'] = probs[max_index]
|
||||
|
||||
combined_data1.at[i, 'Predicted Class'] = classes[max_index]
|
||||
combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs)
|
||||
|
||||
combined_data = pd.concat([combined_data1, combined_data2], ignore_index=True)
|
||||
|
||||
return combined_data
|
||||
except FileNotFoundError as e:
|
||||
# Handle missing files: log the error and return an empty DataFrame
|
||||
print(f"File not found: {e}")
|
||||
return pd.DataFrame()
|
||||
|
||||
|
||||
@login_required(login_url='login')
|
||||
@never_cache
|
||||
|
||||
def malware(request):
|
||||
combined_data = get_combined_files()
|
||||
print("Inside the ddos view...")
|
||||
|
||||
device_id = request.GET.get('device_id', None)
|
||||
print(f"Device ID from headers: {device_id}")
|
||||
|
||||
if not device_id:
|
||||
print("No device ID provided in headers. Fetching the latest device for the logged-in user.")
|
||||
recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first()
|
||||
if recent_device:
|
||||
device_id = recent_device.id
|
||||
print(f"Fetched recent device ID: {device_id}")
|
||||
else:
|
||||
print("No devices found for the logged-in user.")
|
||||
|
||||
# Fetch combined data
|
||||
combined_data = get_combined_files(device_id)
|
||||
|
||||
# If the data is empty, show a message
|
||||
if combined_data.empty:
|
||||
message = "Data is still being captured. Please try again later."
|
||||
return render(request, 'malware/malware.html', {'message': message})
|
||||
|
||||
class_names = {
|
||||
1: "Ramnit",
|
||||
@ -488,24 +746,16 @@ def malware(request):
|
||||
9: "Gatak"
|
||||
}
|
||||
|
||||
|
||||
high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9]
|
||||
files_list = high_probability_files['File'].tolist()
|
||||
|
||||
|
||||
files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)]
|
||||
frequency = files70_90['Predicted Class'].value_counts().sort_index()
|
||||
complete_index = pd.Index(range(10))
|
||||
frequency = frequency.reindex(complete_index, fill_value=0)
|
||||
print(frequency,'in the frequency')
|
||||
# if frequency:
|
||||
# print("Check_malware_frequency")
|
||||
|
||||
|
||||
all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index()
|
||||
frequency_with_names = all_frequency.rename(class_names)
|
||||
print(frequency_with_names,'with name')
|
||||
|
||||
|
||||
avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index()
|
||||
all_classes = pd.DataFrame({'Predicted Class': range(1, 10)})
|
||||
@ -513,7 +763,6 @@ def malware(request):
|
||||
avg_probability['Prediction Probability'].fillna(0, inplace=True)
|
||||
avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names)
|
||||
average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability']))
|
||||
print(average_probability_dict,"avg is here ")
|
||||
|
||||
file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt')
|
||||
data = None
|
||||
@ -523,9 +772,68 @@ def malware(request):
|
||||
except:
|
||||
pass
|
||||
|
||||
return render(request, 'malware/malware.html', {
|
||||
'files_list': files_list,
|
||||
'frequency': frequency.to_dict(),
|
||||
'class_frequency': frequency_with_names.to_dict(),
|
||||
'average': average_probability_dict,
|
||||
"logs": data,
|
||||
'message': None # Clear message if data is available
|
||||
})
|
||||
|
||||
# def malware(request):
|
||||
# combined_data = get_combined_files()
|
||||
|
||||
# class_names = {
|
||||
# 1: "Ramnit",
|
||||
# 2: "Lollipop",
|
||||
# 3: "Kelihos_ver3",
|
||||
# 4: "Vundo",
|
||||
# 5: "Simda",
|
||||
# 6: "Tracur",
|
||||
# 7: "Kelihos_ver1",
|
||||
# 8: "Obfuscator.ACY",
|
||||
# 9: "Gatak"
|
||||
# }
|
||||
|
||||
|
||||
return render(request, 'malware/malware.html', {'files_list': files_list , 'frequency' : frequency.to_dict() , 'class_frequency' : frequency_with_names.to_dict() , 'average' : average_probability_dict ,"logs":data})
|
||||
# high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9]
|
||||
# files_list = high_probability_files['File'].tolist()
|
||||
|
||||
|
||||
# files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)]
|
||||
# frequency = files70_90['Predicted Class'].value_counts().sort_index()
|
||||
# complete_index = pd.Index(range(10))
|
||||
# frequency = frequency.reindex(complete_index, fill_value=0)
|
||||
# print(frequency,'in the frequency')
|
||||
# # if frequency:
|
||||
# # print("Check_malware_frequency")
|
||||
|
||||
|
||||
# all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index()
|
||||
# frequency_with_names = all_frequency.rename(class_names)
|
||||
# print(frequency_with_names,'with name')
|
||||
|
||||
|
||||
# avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index()
|
||||
# all_classes = pd.DataFrame({'Predicted Class': range(1, 10)})
|
||||
# avg_probability = pd.merge(all_classes, avg_probability, on='Predicted Class', how='left')
|
||||
# avg_probability['Prediction Probability'].fillna(0, inplace=True)
|
||||
# avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names)
|
||||
# average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability']))
|
||||
# print(average_probability_dict,"avg is here ")
|
||||
|
||||
# file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt')
|
||||
# data = None
|
||||
# try:
|
||||
# with open(file_path, 'r') as file:
|
||||
# data = file.readlines()[::-1] # Reverse lines for latest logs
|
||||
# except:
|
||||
# pass
|
||||
|
||||
|
||||
|
||||
# return render(request, 'malware/malware.html', {'files_list': files_list , 'frequency' : frequency.to_dict() , 'class_frequency' : frequency_with_names.to_dict() , 'average' : average_probability_dict ,"logs":data})
|
||||
|
||||
@never_cache
|
||||
def bye_asm_log(request):
|
||||
@ -559,11 +867,63 @@ def bye_asm_log(request):
|
||||
|
||||
|
||||
|
||||
# @login_required(login_url='login')
|
||||
# @never_cache
|
||||
# def ransomware(request):
|
||||
|
||||
# file_path = 'media/logs/usage_log.txt'
|
||||
# cpu_data = []
|
||||
# memory_data = []
|
||||
|
||||
# # Read data from the log file
|
||||
# if os.path.exists(file_path):
|
||||
# with open(file_path, 'r') as f:
|
||||
# lines = f.readlines()
|
||||
|
||||
# # Extract the last 5 entries
|
||||
# lines = lines[-5:]
|
||||
|
||||
# for line in lines:
|
||||
# # Parse CPU and memory usage from each line
|
||||
# parts = line.strip().split(",")
|
||||
# cpu_usage = parts[0]
|
||||
# memory_usage = parts[1]
|
||||
# cpu_data.append(cpu_usage)
|
||||
# memory_data.append(memory_usage)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# csv_file_path = 'media/ransomware_predictions/latest_ransomware_type.csv' # Replace with your actual CSV file path
|
||||
# df = pd.read_csv(csv_file_path)
|
||||
# mapping_file_path = 'media/ransomware_predictions/mapping_win.txt'
|
||||
# mapping_df = pd.read_csv(mapping_file_path, header=None, names=['predicted_class', 'class_name'])
|
||||
# class_mapping = dict(zip(mapping_df['predicted_class'], mapping_df['class_name']))
|
||||
# df['class_name'] = df['predicted_class'].map(class_mapping)
|
||||
# class_frequency = df['class_name'].value_counts()
|
||||
# all_classes_df = pd.DataFrame({'class_name': mapping_df['class_name']})
|
||||
# all_classes_df['frequency'] = all_classes_df['class_name'].map(class_frequency).fillna(0).astype(int)
|
||||
# class_frequency_dict = dict(zip(all_classes_df['class_name'], all_classes_df['frequency']))
|
||||
|
||||
# yes_no_path = 'media/ransomware_predictions/ransomware.csv'
|
||||
|
||||
# # Reading the CSV file into a DataFrame
|
||||
# yes_no = pd.read_csv(yes_no_path)
|
||||
|
||||
# # # Extracting the value of 'Predicted Label'
|
||||
# flag =yes_no[yes_no.columns[-1]].iloc[0]
|
||||
# time = yes_no[yes_no.columns[-2]].iloc[0]
|
||||
|
||||
|
||||
|
||||
|
||||
# return render(request, 'ransomware/ransomware.html' , context={ 'type' : class_frequency_dict, 'cpu' : json.dumps(cpu_data) , 'memory' : json.dumps(memory_data) , 'flag' : flag,'time' : time})
|
||||
@login_required(login_url='login')
|
||||
@never_cache
|
||||
def ransomware(request):
|
||||
|
||||
file_path = 'media/logs/usage_log.txt'
|
||||
file_path = os.path.join('media', 'logs', 'usage_log.txt')
|
||||
cpu_data = []
|
||||
memory_data = []
|
||||
|
||||
@ -578,40 +938,118 @@ def ransomware(request):
|
||||
for line in lines:
|
||||
# Parse CPU and memory usage from each line
|
||||
parts = line.strip().split(",")
|
||||
if len(parts) >= 2:
|
||||
cpu_usage = parts[0]
|
||||
memory_usage = parts[1]
|
||||
cpu_data.append(cpu_usage)
|
||||
memory_data.append(memory_usage)
|
||||
else:
|
||||
print(f"Skipping malformed line: {line}")
|
||||
else:
|
||||
print(f"Usage log file not found at path: {file_path}")
|
||||
|
||||
device_id = request.GET.get('device_id', None)
|
||||
# device_id=53
|
||||
print(f"Device ID from headers: {device_id}")
|
||||
|
||||
# If no device_id is found in the request, get the latest device for the logged-in user
|
||||
if not device_id:
|
||||
print("No device ID found. Fetching the latest device for the logged-in user.")
|
||||
recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first()
|
||||
|
||||
if recent_device:
|
||||
device_id = recent_device.id # Use the actual device ID from the database
|
||||
print(f"Fetched recent device ID: {device_id}")
|
||||
else:
|
||||
print("No devices found for the logged-in user.")
|
||||
return JsonResponse({'error': 'No devices found for the logged-in user'}, status=404)
|
||||
|
||||
# # Construct file paths correctly using os.path.join
|
||||
csv_file_path = os.path.join('media', 'ransomware_predictions', str(device_id), 'latest_ransomware_type.csv')
|
||||
# csv_file_path = os.path.join('media', 'ransomware_predictions','latest_ransomware_type.csv')
|
||||
mapping_file_path = os.path.join('media', 'ransomware_predictions', 'mapping_win.txt')
|
||||
yes_no_path = os.path.join('media', 'ransomware_predictions', 'ransomware.csv')
|
||||
|
||||
# Debugging: Print the file paths
|
||||
print(f"CSV file path: {csv_file_path}")
|
||||
print(f"Mapping file path: {mapping_file_path}")
|
||||
print(f"Yes/No file path: {yes_no_path}")
|
||||
|
||||
csv_file_path = 'media/ransomware_predictions/latest_ransomware_type.csv' # Replace with your actual CSV file path
|
||||
# Initialize variables to hold processed data
|
||||
class_frequency_dict = {}
|
||||
flag = None
|
||||
time = None
|
||||
|
||||
# Process the latest ransomware type CSV
|
||||
try:
|
||||
if not os.path.exists(csv_file_path):
|
||||
raise FileNotFoundError(f"CSV file not found at path: {csv_file_path}")
|
||||
|
||||
# Load ransomware type CSV
|
||||
df = pd.read_csv(csv_file_path)
|
||||
mapping_file_path = 'media/ransomware_predictions/mapping_win.txt'
|
||||
print(f"Loaded ransomware type CSV: {csv_file_path}")
|
||||
|
||||
# Load mapping file
|
||||
if not os.path.exists(mapping_file_path):
|
||||
raise FileNotFoundError(f"Mapping file not found at path: {mapping_file_path}")
|
||||
|
||||
mapping_df = pd.read_csv(mapping_file_path, header=None, names=['predicted_class', 'class_name'])
|
||||
class_mapping = dict(zip(mapping_df['predicted_class'], mapping_df['class_name']))
|
||||
print("Loaded mapping file and created class mapping dictionary.")
|
||||
|
||||
# Map predicted classes to class names
|
||||
df['class_name'] = df['predicted_class'].map(class_mapping)
|
||||
class_frequency = df['class_name'].value_counts()
|
||||
|
||||
# Ensure all classes from mapping are present in the frequency dictionary
|
||||
all_classes_df = pd.DataFrame({'class_name': mapping_df['class_name']})
|
||||
all_classes_df['frequency'] = all_classes_df['class_name'].map(class_frequency).fillna(0).astype(int)
|
||||
class_frequency_dict = dict(zip(all_classes_df['class_name'], all_classes_df['frequency']))
|
||||
|
||||
yes_no_path = 'media/ransomware_predictions/ransomware.csv'
|
||||
print(f"Class frequency dictionary: {class_frequency_dict}")
|
||||
|
||||
# Reading the CSV file into a DataFrame
|
||||
except FileNotFoundError as e:
|
||||
print(f"FileNotFoundError: {str(e)}")
|
||||
return JsonResponse({'error': str(e)}, status=404)
|
||||
except Exception as e:
|
||||
print(f"Exception while processing ransomware type CSV: {str(e)}")
|
||||
return JsonResponse({'error': f"Error processing ransomware type CSV: {str(e)}"}, status=500)
|
||||
|
||||
# Process the ransomware flag CSV
|
||||
try:
|
||||
if not os.path.exists(yes_no_path):
|
||||
raise FileNotFoundError(f"Ransomware CSV file not found at path: {yes_no_path}")
|
||||
|
||||
# Load ransomware flag CSV
|
||||
yes_no = pd.read_csv(yes_no_path)
|
||||
print('Loaded ransomware flag CSV:', yes_no)
|
||||
|
||||
# # Extracting the value of 'Predicted Label'
|
||||
flag =yes_no[yes_no.columns[-1]].iloc[0]
|
||||
time = yes_no[yes_no.columns[-2]].iloc[0]
|
||||
if yes_no.empty:
|
||||
raise ValueError("Ransomware CSV file is empty.")
|
||||
|
||||
# Extracting the value of 'Predicted Label' and 'Time'
|
||||
flag = yes_no.iloc[0, -1] # Assuming 'Predicted Label' is the last column
|
||||
time = yes_no.iloc[0, -2] # Assuming 'Time' is the second last column
|
||||
|
||||
print(f"Extracted flag: {flag}, time: {time}")
|
||||
|
||||
except FileNotFoundError as e:
|
||||
print(f"FileNotFoundError: {str(e)}")
|
||||
return JsonResponse({'error': str(e)}, status=404)
|
||||
except Exception as e:
|
||||
print(f"Exception while processing ransomware flag CSV: {str(e)}")
|
||||
return JsonResponse({'error': f"Error processing ransomware flag CSV: {str(e)}"}, status=500)
|
||||
|
||||
return render(request, 'ransomware/ransomware.html' , context={ 'type' : class_frequency_dict, 'cpu' : json.dumps(cpu_data) , 'memory' : json.dumps(memory_data) , 'flag' : flag,'time' : time})
|
||||
# Prepare context for rendering the template
|
||||
context = {
|
||||
'type': class_frequency_dict,
|
||||
'cpu': json.dumps(cpu_data),
|
||||
'memory': json.dumps(memory_data),
|
||||
'flag': flag,
|
||||
'time': time
|
||||
}
|
||||
|
||||
return render(request, 'ransomware/ransomware.html', context=context)
|
||||
|
||||
|
||||
#==================================================================================================
|
||||
@ -1589,9 +2027,44 @@ def ransomware_predictions(request):
|
||||
|
||||
return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path})
|
||||
|
||||
# @api_view(['POST'])
|
||||
# def ransomware_type_predictions(request):
|
||||
# try:
|
||||
|
||||
# csv_file = request.FILES.get('file')
|
||||
|
||||
# if not csv_file:
|
||||
# return JsonResponse({'error': 'No file provided'}, status=400)
|
||||
|
||||
# if not csv_file.name.endswith('.csv'):
|
||||
# return JsonResponse({'error': 'File is not CSV'}, status=400)
|
||||
|
||||
# # Define the directory and file path where the CSV will be stored
|
||||
# folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions')
|
||||
|
||||
# # Make sure the directory exists
|
||||
# if not os.path.exists(folder_path):
|
||||
# os.makedirs(folder_path)
|
||||
|
||||
# # Define the path for the file (always named 'latest_ransomware.csv')
|
||||
# save_path = os.path.join(folder_path, 'latest_ransomware_type.csv')
|
||||
|
||||
# # If the file already exists, remove it to ensure overwriting
|
||||
# if os.path.exists(save_path):
|
||||
# os.remove(save_path)
|
||||
|
||||
# # Save the new file
|
||||
# with open(save_path, 'wb+') as destination:
|
||||
# for chunk in csv_file.chunks():
|
||||
# destination.write(chunk)
|
||||
|
||||
# return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path})
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
|
||||
@api_view(['POST'])
|
||||
def ransomware_type_predictions(request):
|
||||
try:
|
||||
|
||||
|
||||
csv_file = request.FILES.get('file')
|
||||
|
||||
@ -1602,7 +2075,48 @@ def ransomware_type_predictions(request):
|
||||
return JsonResponse({'error': 'File is not CSV'}, status=400)
|
||||
|
||||
# Define the directory and file path where the CSV will be stored
|
||||
folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions')
|
||||
# folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions')
|
||||
|
||||
# # Make sure the directory exists
|
||||
# if not os.path.exists(folder_path):
|
||||
# os.makedirs(folder_path)
|
||||
|
||||
# # Define the path for the file (always named 'latest_ransomware.csv')
|
||||
# save_path = os.path.join(folder_path, 'latest_ransomware_type.csv')
|
||||
|
||||
# # If the file already exists, remove it to ensure overwriting
|
||||
# if os.path.exists(save_path):
|
||||
# os.remove(save_path)
|
||||
|
||||
# # Save the new file
|
||||
# with open(save_path, 'wb+') as destination:
|
||||
# for chunk in csv_file.chunks():
|
||||
# destination.write(chunk)
|
||||
|
||||
# Extract user_id from the request (device_id is not needed now)
|
||||
user_id = request.data.get('user_id')
|
||||
|
||||
if not user_id:
|
||||
return JsonResponse({'error': 'User ID is required'}, status=400)
|
||||
|
||||
try:
|
||||
# Retrieve the UserProfile based on the provided user_id
|
||||
user_profile = UserProfile.objects.get(user__id=user_id)
|
||||
print(user_profile)
|
||||
|
||||
# Get the device IDs associated with the user
|
||||
device_ids = get_device_ids_by_user_id(user_id)
|
||||
print(f"Device IDs: {device_ids}")
|
||||
|
||||
# Check if the user has devices associated with them
|
||||
if not device_ids:
|
||||
return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400)
|
||||
|
||||
# Assuming we want to use the first device associated with the user
|
||||
device = Devices.objects.get(id=device_ids[-1])
|
||||
print(f"Device ID: {device.id}")
|
||||
folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions', str(device.id))
|
||||
# folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions')
|
||||
|
||||
# Make sure the directory exists
|
||||
if not os.path.exists(folder_path):
|
||||
@ -1620,40 +2134,136 @@ def ransomware_type_predictions(request):
|
||||
for chunk in csv_file.chunks():
|
||||
destination.write(chunk)
|
||||
|
||||
return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path})
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
# Create the DdosPrediction record
|
||||
rensomware_type_prediction = Rensomware_TypePrediction.objects.create(
|
||||
device=device,
|
||||
user=user_profile,
|
||||
file_path=save_path
|
||||
)
|
||||
|
||||
return JsonResponse({
|
||||
'message': 'File uploaded and prediction saved successfully',
|
||||
'file_path': save_path,
|
||||
'prediction_id': rensomware_type_prediction.id
|
||||
})
|
||||
|
||||
except UserProfile.DoesNotExist:
|
||||
return JsonResponse({'error': 'User not found'}, status=404)
|
||||
except Devices.DoesNotExist:
|
||||
return JsonResponse({'error': 'Device not found'}, status=404)
|
||||
|
||||
|
||||
def get_device_ids_by_user_id(user_id):
|
||||
try:
|
||||
# Get the UserProfile instance using the user ID
|
||||
user_profile = UserProfile.objects.get(user__id=user_id)
|
||||
print('user_profile', user_profile)
|
||||
|
||||
# Retrieve all Devices associated with this UserProfile
|
||||
devices = Devices.objects.filter(used_by=user_profile)
|
||||
print('devices', devices)
|
||||
|
||||
# Get the device IDs
|
||||
device_ids = [device.id for device in devices]
|
||||
return device_ids
|
||||
except UserProfile.DoesNotExist:
|
||||
return []
|
||||
|
||||
# @api_view(['POST'])
|
||||
# def ddos_predictions(request):
|
||||
# csv_file = request.FILES.get('file')
|
||||
|
||||
# if not csv_file:
|
||||
# return JsonResponse({'error': 'No file provided'}, status=400)
|
||||
|
||||
# if not csv_file.name.endswith('.csv'):
|
||||
# return JsonResponse({'error': 'File is not CSV'}, status=400)
|
||||
|
||||
# # Define the directory and file path where the CSV will be stored
|
||||
# folder_path = os.path.join(settings.MEDIA_ROOT, 'ddos_predictions')
|
||||
|
||||
# # Make sure the directory exists
|
||||
# if not os.path.exists(folder_path):
|
||||
# os.makedirs(folder_path)
|
||||
|
||||
# # Define the path for the file (always named 'latest_ransomware.csv')
|
||||
# save_path = os.path.join(folder_path, 'predictions.csv')
|
||||
|
||||
# # If the file already exists, remove it to ensure overwriting
|
||||
# if os.path.exists(save_path):
|
||||
# os.remove(save_path)
|
||||
|
||||
# # Save the new file
|
||||
# with open(save_path, 'wb+') as destination:
|
||||
# for chunk in csv_file.chunks():
|
||||
# destination.write(chunk)
|
||||
|
||||
# return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path})
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework.decorators import permission_classes
|
||||
|
||||
@api_view(['POST'])
|
||||
def ddos_predictions(request):
|
||||
csv_file = request.FILES.get('file')
|
||||
@permission_classes([AllowAny])
|
||||
|
||||
def ddos_predictions(request):
|
||||
# Check if a file is provided in the request
|
||||
csv_file = request.FILES.get('file')
|
||||
if not csv_file:
|
||||
return JsonResponse({'error': 'No file provided'}, status=400)
|
||||
|
||||
# Ensure the file is a CSV
|
||||
if not csv_file.name.endswith('.csv'):
|
||||
return JsonResponse({'error': 'File is not CSV'}, status=400)
|
||||
|
||||
# Define the directory and file path where the CSV will be stored
|
||||
folder_path = os.path.join(settings.MEDIA_ROOT, 'ddos_predictions')
|
||||
# Extract user_id from the request data
|
||||
user_id = request.data.get('user_id')
|
||||
device_ids = get_device_ids_by_user_id(user_id)
|
||||
|
||||
# Make sure the directory exists
|
||||
if not os.path.exists(folder_path):
|
||||
os.makedirs(folder_path)
|
||||
# Check if the user has associated devices
|
||||
if not device_ids:
|
||||
return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400)
|
||||
|
||||
# Define the path for the file (always named 'latest_ransomware.csv')
|
||||
save_path = os.path.join(folder_path, 'predictions.csv')
|
||||
try:
|
||||
# Retrieve the UserProfile for the logged-in user
|
||||
user_profile = UserProfile.objects.get(user__id=user_id)
|
||||
|
||||
# If the file already exists, remove it to ensure overwriting
|
||||
if os.path.exists(save_path):
|
||||
os.remove(save_path)
|
||||
# Get the most recent device associated with the user
|
||||
device = Devices.objects.get(id=device_ids[-1])
|
||||
|
||||
# Save the new file
|
||||
# Define the directory path: MEDIA_ROOT/ddos_predictions/<device_id>
|
||||
folder_path = os.path.join(settings.MEDIA_ROOT, 'ddos_predictions', str(device.id))
|
||||
# folder_path = os.path.join(settings.MEDIA_ROOT, 'ddos_predictions')
|
||||
os.makedirs(folder_path, exist_ok=True) # Ensure the folder exists
|
||||
|
||||
# Define the file path: predictions.csv
|
||||
file_name = 'predictions.csv'
|
||||
save_path = os.path.join(folder_path, file_name)
|
||||
|
||||
# Save the file in chunks
|
||||
with open(save_path, 'wb+') as destination:
|
||||
for chunk in csv_file.chunks():
|
||||
destination.write(chunk)
|
||||
|
||||
return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path})
|
||||
# Create a DdosPrediction record with the relative file path
|
||||
relative_path = os.path.relpath(save_path, settings.MEDIA_ROOT) # Save the relative path
|
||||
ddos_prediction = DdosPrediction.objects.create(
|
||||
device=device,
|
||||
user=user_profile,
|
||||
file_path=relative_path
|
||||
)
|
||||
|
||||
# Return a success response
|
||||
return JsonResponse({
|
||||
'message': 'File uploaded and prediction saved successfully',
|
||||
'file_path': relative_path,
|
||||
'prediction_id': ddos_prediction.id
|
||||
})
|
||||
|
||||
except UserProfile.DoesNotExist:
|
||||
return JsonResponse({'error': 'User profile not found'}, status=404)
|
||||
except Exception as e:
|
||||
return JsonResponse({'error': f'An unexpected error occurred: {str(e)}'}, status=500)
|
||||
|
||||
@api_view(['POST'])
|
||||
def usage_log(request):
|
||||
|
||||
Binary file not shown.
Binary file not shown.
BIN
helpdesk/management/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
helpdesk/management/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
26
malware/migrations/0003_malwarepredictionsdevice.py
Normal file
26
malware/migrations/0003_malwarepredictionsdevice.py
Normal file
@ -0,0 +1,26 @@
|
||||
# Generated by Django 5.1.3 on 2024-12-10 09:51
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('Accounts', '0003_userprofile_company_name'),
|
||||
('Device', '0007_devices_mac_address_devices_unique_id'),
|
||||
('malware', '0002_malwareprediction_model_type'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='MalwarePredictionsDevice',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('file_path', models.FileField(upload_to='malware_predictions/')),
|
||||
('uploaded_at', models.DateTimeField(auto_now_add=True)),
|
||||
('device', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Device.devices')),
|
||||
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Accounts.userprofile')),
|
||||
],
|
||||
),
|
||||
]
|
||||
Binary file not shown.
@ -1,4 +1,6 @@
|
||||
from django.db import models
|
||||
from Device .models import Devices
|
||||
from Accounts.models import UserProfile
|
||||
|
||||
# Create your models here.
|
||||
|
||||
@ -19,3 +21,13 @@ class MalwarePrediction(models.Model):
|
||||
def __str__(self):
|
||||
return f"{self.process_name} - {self.predicted_malware} - {self.get_model_type_display()}"
|
||||
|
||||
|
||||
class MalwarePredictionsDevice(models.Model):
|
||||
device = models.ForeignKey(Devices, on_delete=models.CASCADE,null=True)
|
||||
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE , null=True) # Add this field to reference the user
|
||||
file_path = models.FileField(upload_to='malware_predictions/')
|
||||
uploaded_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
def __str__(self):
|
||||
return f"Prediction for {self.device.device_name} by {self.user.user.username} at {self.uploaded_at}"
|
||||
|
||||
693
malware/views.py
693
malware/views.py
@ -18,12 +18,386 @@ from django.core.files.storage import default_storage
|
||||
from rest_framework.parsers import MultiPartParser
|
||||
from django.conf import settings
|
||||
from django.http import HttpResponse
|
||||
from .models import MalwarePrediction
|
||||
from .models import MalwarePrediction,MalwarePredictionsDevice
|
||||
from .serializers import MalwarePredictionSerializer
|
||||
from Device.models import Devices
|
||||
from Accounts.models import UserProfile
|
||||
from django.utils import timezone
|
||||
from django.http import JsonResponse
|
||||
from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
|
||||
class MalwarePredictionAPIView(APIView):
|
||||
parser_classes = [MultiPartParser] # To handle file uploads
|
||||
|
||||
@staticmethod
|
||||
def get_device_ids_by_user_id(user_id):
|
||||
try:
|
||||
# Get the UserProfile instance using the user ID
|
||||
user_profile = UserProfile.objects.get(user__id=user_id)
|
||||
print('user_profile', user_profile)
|
||||
|
||||
# Retrieve all Devices associated with this UserProfile
|
||||
devices = Devices.objects.filter(used_by=user_profile)
|
||||
print('devices', devices)
|
||||
|
||||
# Get the device IDs
|
||||
device_ids = [device.id for device in devices]
|
||||
return device_ids
|
||||
except UserProfile.DoesNotExist:
|
||||
return []
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
if 'csv_file' not in request.FILES:
|
||||
return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
csv_file = request.FILES['csv_file']
|
||||
user_id= request.data.get('user_id')
|
||||
if not user_id :
|
||||
return Response({"error": "User ID is required"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
device_ids = self.get_device_ids_by_user_id(user_id)
|
||||
print(f"Device IDs: {device_ids}")
|
||||
if not device_ids:
|
||||
return Response({'error': 'No devices associated with the given user ID'}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
try:
|
||||
# device = Devices.objects.filter(id__in=device_ids).order_by('-created_at').first() # Use the first device ID
|
||||
|
||||
|
||||
# Get the most recent device associated with the user
|
||||
device = Devices.objects.get(id=device_ids[-1])
|
||||
print(f"Device ID: {device.id}")
|
||||
except Devices.DoesNotExist:
|
||||
return Response({"error": "Device not found for the given device ID"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
try:
|
||||
# Define the temp directory path
|
||||
temp_dir = os.path.join(settings.MEDIA_ROOT, 'malware_predictions',str(device.id))
|
||||
|
||||
# Create the 'temp' directory if it doesn't exist
|
||||
if not os.path.exists(temp_dir):
|
||||
os.makedirs(temp_dir)
|
||||
|
||||
# Save the file temporarily
|
||||
temp_file_path = os.path.join(temp_dir, csv_file.name)
|
||||
with default_storage.open(temp_file_path, 'wb+') as destination:
|
||||
for chunk in csv_file.chunks():
|
||||
destination.write(chunk)
|
||||
|
||||
# Read the CSV file with headers
|
||||
df = pd.read_csv(temp_file_path)
|
||||
|
||||
# Extract column names from the CSV
|
||||
actual_columns = df.columns.tolist()
|
||||
|
||||
except Exception as e:
|
||||
return Response({"error": "Could not read the CSV file", "details": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Define the expected column names
|
||||
expected_columns = ['process_name', 'class', 'probability_of_malware']
|
||||
|
||||
# Mapping logic
|
||||
if actual_columns != expected_columns:
|
||||
# Map actual column names to expected ones
|
||||
column_mapping = dict(zip(actual_columns, expected_columns))
|
||||
df.rename(columns=column_mapping, inplace=True)
|
||||
|
||||
# Continue with processing the dataframe...
|
||||
|
||||
records_saved = 0
|
||||
for _, row in df.iterrows():
|
||||
process_name = row['process_name']
|
||||
process_class = row['class']
|
||||
probability = row['probability_of_malware']
|
||||
|
||||
MalwarePrediction.objects.create(
|
||||
process_name=process_name,
|
||||
process_class=process_class,
|
||||
probability_of_malware=probability,
|
||||
)
|
||||
records_saved += 1
|
||||
# print(data_sent,"dataaaaaa")
|
||||
|
||||
return Response({"message": f"{records_saved} records saved successfully!"}, status=status.HTTP_201_CREATED)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
# Query all MalwarePrediction records from the database
|
||||
predictions = MalwarePrediction.objects.all()
|
||||
|
||||
if not predictions.exists():
|
||||
return Response({"error": "No data available to generate graph."}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
# Create a DataFrame from the queryset
|
||||
data = {
|
||||
'process_name': [p.process_name for p in predictions],
|
||||
'class': [p.process_class for p in predictions],
|
||||
'probability_of_malware': [p.probability_of_malware for p in predictions]
|
||||
}
|
||||
df = pd.DataFrame(data)
|
||||
|
||||
# Plot using seaborn or matplotlib
|
||||
plt.figure(figsize=(10, 6))
|
||||
|
||||
# Create a barplot where the class is on the x-axis and the probability is on the y-axis
|
||||
sns.barplot(
|
||||
data=df,
|
||||
x='class', # Independent variable (x-axis)
|
||||
y='probability_of_malware', # Dependent variable (y-axis)
|
||||
ci=None, # No confidence intervals
|
||||
palette='Set2' # Use a color palette for different classes
|
||||
)
|
||||
|
||||
plt.title('Malware Probability by Class')
|
||||
plt.xlabel('Class')
|
||||
plt.ylabel('Probability of Malware')
|
||||
plt.tight_layout()
|
||||
|
||||
# Save the plot to a bytes buffer
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format='png')
|
||||
buf.seek(0)
|
||||
|
||||
# Return the image as a response
|
||||
return HttpResponse(buf, content_type='image/png')
|
||||
|
||||
|
||||
|
||||
|
||||
# class MalwarePredictionAPIView(APIView):
|
||||
# parser_classes = [MultiPartParser] # To handle file uploads
|
||||
|
||||
# @staticmethod
|
||||
# def get_device_ids_by_user_id(user_id):
|
||||
# try:
|
||||
# # Get the UserProfile instance using the user ID
|
||||
# user_profile = UserProfile.objects.get(user__id=user_id)
|
||||
# print('user_profile', user_profile)
|
||||
|
||||
# # Retrieve all Devices associated with this UserProfile
|
||||
# devices = Devices.objects.filter(used_by=user_profile)
|
||||
# print('devices', devices)
|
||||
|
||||
# # Get the device IDs
|
||||
# device_ids = [device.id for device in devices]
|
||||
# return device_ids
|
||||
# except UserProfile.DoesNotExist:
|
||||
# return []
|
||||
|
||||
# def post(self, request, *args, **kwargs):
|
||||
# if 'csv_file' not in request.FILES:
|
||||
# return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# csv_file = request.FILES['csv_file']
|
||||
|
||||
# # Retrieve user ID from the request
|
||||
# user_id = request.data.get('user_id') # Ensure 'user_id' is being sent in the request body
|
||||
# print("user_id ", user_id)
|
||||
|
||||
# if not user_id:
|
||||
# return Response({"error": "User ID is required"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # Get the device IDs associated with the user
|
||||
# device_ids = self.get_device_ids_by_user_id(user_id)
|
||||
# print(f"Device IDs: {device_ids}")
|
||||
|
||||
# # Fetch the first associated device for the user
|
||||
# if not device_ids:
|
||||
# return Response({'error': 'No devices associated with the given user ID'}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# try:
|
||||
# # device = Devices.objects.filter(id__in=device_ids).order_by('-created_at').first() # Use the first device ID
|
||||
# device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first()
|
||||
# print(f"Device ID: {device.id}")
|
||||
# except Devices.DoesNotExist:
|
||||
# return Response({"error": "Device not found for the given device ID"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# try:
|
||||
# user_profile = UserProfile.objects.get(user__id=user_id)
|
||||
# print(user_profile)
|
||||
# except UserProfile.DoesNotExist:
|
||||
# return Response({"error": "User profile not found"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# try:
|
||||
# # Define the temp directory path using the device ID
|
||||
# temp_dir = os.path.join(settings.MEDIA_ROOT, 'malware_predictions')
|
||||
|
||||
# # Create the temp directory if it doesn't exist
|
||||
# if not os.path.exists(temp_dir):
|
||||
# os.makedirs(temp_dir)
|
||||
|
||||
# # Save the file temporarily
|
||||
# temp_file_path = os.path.join(temp_dir, csv_file.name)
|
||||
# with default_storage.open(temp_file_path, 'wb+') as destination:
|
||||
# for chunk in csv_file.chunks():
|
||||
# destination.write(chunk)
|
||||
|
||||
# # Read the CSV file with headers
|
||||
# df = pd.read_csv(temp_file_path)
|
||||
|
||||
# # Extract column names from the CSV
|
||||
# actual_columns = df.columns.tolist()
|
||||
# except Exception as e:
|
||||
# return Response({"error": "Could not read the CSV file", "details": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # Define the expected column names
|
||||
# expected_columns = ['process_name', 'class', 'probability_of_malware']
|
||||
|
||||
# # Mapping logic
|
||||
# if actual_columns != expected_columns:
|
||||
# # Map actual column names to expected ones
|
||||
# column_mapping = dict(zip(actual_columns, expected_columns))
|
||||
# df.rename(columns=column_mapping, inplace=True)
|
||||
|
||||
# # Save the data to the database
|
||||
# records_saved = 0
|
||||
# for _, row in df.iterrows():
|
||||
# try:
|
||||
# process_name = row['process_name']
|
||||
# process_class = row['class']
|
||||
# probability = float(row['probability_of_malware']) # Ensure it's a number
|
||||
# except ValueError:
|
||||
# return Response({
|
||||
# "error": f"Invalid value in 'probability_of_malware': {row['probability_of_malware']}"
|
||||
# }, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # MalwarePredictionsDevice.objects.create(
|
||||
# # device=device, # Pass the Devices instance here
|
||||
# # user=user_profile, # This will reference the user related to the device
|
||||
# # file_path=temp_file_path, # The path to the uploaded file
|
||||
# # )
|
||||
# MalwarePrediction.objects.create(
|
||||
# process_name=process_name,
|
||||
# process_class=process_class,
|
||||
# probability_of_malware=probability,
|
||||
# )
|
||||
# records_saved += 1
|
||||
|
||||
# return Response({
|
||||
# "message": f"{records_saved} records saved successfully!",
|
||||
# }, status=status.HTTP_201_CREATED)
|
||||
|
||||
|
||||
# class MalwarePredictionAPIView(APIView):
|
||||
# parser_classes = [MultiPartParser] # To handle file uploads
|
||||
# @staticmethod
|
||||
# def get_device_ids_by_user_id(user_id):
|
||||
# try:
|
||||
# # Get the UserProfile instance using the user ID
|
||||
# user_profile = UserProfile.objects.get(user__id=user_id)
|
||||
# print('user_profile', user_profile)
|
||||
|
||||
# # Retrieve all Devices associated with this UserProfile
|
||||
# devices = Devices.objects.filter(used_by=user_profile)
|
||||
# print('devices', devices)
|
||||
|
||||
# # Get the device IDs
|
||||
# device_ids = [device.id for device in devices]
|
||||
# return device_ids
|
||||
# except UserProfile.DoesNotExist:
|
||||
# return []
|
||||
|
||||
# def post(self, request, *args, **kwargs):
|
||||
# if 'csv_file' not in request.FILES:
|
||||
# return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# csv_file = request.FILES.get('csv_file')
|
||||
# if not csv_file:
|
||||
# return Response({"error": "No CSV file provided"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# user_id = request.data.get('user_id')
|
||||
# if not user_id:
|
||||
# return Response({"error": "User ID is required"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # Retrieve associated device IDs for the user
|
||||
# device_ids = self.get_device_ids_by_user_id(user_id)
|
||||
# print(device_ids)
|
||||
# if not device_ids:
|
||||
# return Response({"error": "No devices associated with the given user ID"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # Try to get therecent device associated with the user
|
||||
# try:
|
||||
# device = Devices.objects.get(id=device_ids[-1])
|
||||
# except Devices.DoesNotExist:
|
||||
# return Response({"error": "Device not found for the given device ID"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # Define the temp directory path for saving the file
|
||||
# temp_dir = os.path.join(settings.MEDIA_ROOT, 'malware_predictions', f'device_{device.id}')
|
||||
|
||||
# # Create the 'temp' directory if it doesn't exist
|
||||
# if not os.path.exists(temp_dir):
|
||||
# os.makedirs(temp_dir)
|
||||
|
||||
# # Save the file temporarily
|
||||
# temp_file_path = os.path.join(temp_dir, csv_file.name)
|
||||
# try:
|
||||
# with default_storage.open(temp_file_path, 'wb+') as destination:
|
||||
# for chunk in csv_file.chunks():
|
||||
# destination.write(chunk)
|
||||
# except Exception as e:
|
||||
# return Response({"error": "Failed to save the file", "details": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
# # Read the CSV file with headers
|
||||
# try:
|
||||
# df = pd.read_csv(temp_file_path)
|
||||
# actual_columns = df.columns.tolist()
|
||||
# except Exception as e:
|
||||
# return Response({"error": "Could not read the CSV file", "details": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # Define the expected column names
|
||||
# expected_columns = ['process_name', 'class', 'probability_of_malware']
|
||||
|
||||
# # Validate and map columns
|
||||
# if actual_columns != expected_columns:
|
||||
# if len(actual_columns) == len(expected_columns):
|
||||
# column_mapping = dict(zip(actual_columns, expected_columns))
|
||||
# df.rename(columns=column_mapping, inplace=True)
|
||||
# else:
|
||||
# return Response({"error": "CSV columns do not match expected format"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # Ensure the user profile exists
|
||||
# try:
|
||||
# user_profile = UserProfile.objects.get(user__id=user_id)
|
||||
# except UserProfile.DoesNotExist:
|
||||
# return Response({"error": "User profile not found"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# # Save the predictions and create the related record
|
||||
# records_saved = 0
|
||||
# for _, row in df.iterrows():
|
||||
# process_name = row['process_name']
|
||||
# process_class = row['class']
|
||||
# probability = row['probability_of_malware']
|
||||
|
||||
# try:
|
||||
# # Save malware prediction
|
||||
# MalwarePrediction.objects.create(
|
||||
# process_name=process_name,
|
||||
# process_class=process_class,
|
||||
# probability_of_malware=probability,
|
||||
# )
|
||||
|
||||
# # Save the device association
|
||||
# MalwarePredictionsDevice.objects.create(
|
||||
# device=device,
|
||||
# user=user_profile,
|
||||
# file_path=temp_file_path,
|
||||
# )
|
||||
# records_saved += 1
|
||||
# except Exception as e:
|
||||
# return Response({"error": "Failed to save record", "details": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
# return Response({
|
||||
# "message": f"{records_saved} records saved successfully!"
|
||||
# }, status=status.HTTP_201_CREATED)
|
||||
|
||||
|
||||
class KNeighborsModelView(APIView):
|
||||
parser_classes = [MultiPartParser] # To handle file uploads
|
||||
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
if 'csv_file' not in request.FILES:
|
||||
@ -75,18 +449,321 @@ class MalwarePredictionAPIView(APIView):
|
||||
process_name=process_name,
|
||||
process_class=process_class,
|
||||
probability_of_malware=probability,
|
||||
model_type=1
|
||||
)
|
||||
records_saved += 1
|
||||
|
||||
return Response({"message": f"{records_saved} records saved successfully!"}, status=status.HTTP_201_CREATED)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
return Response({"message": " knn file saved successfully!"}, status=status.HTTP_201_CREATED)
|
||||
def get(self, request, *args, **kwargs):
|
||||
# Query all MalwarePrediction records from the database
|
||||
predictions = MalwarePrediction.objects.all()
|
||||
predictions = MalwarePrediction.objects.filter(model_type=1)
|
||||
|
||||
if not predictions.exists():
|
||||
return Response({"error": "No data available to generate graph."}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
# Create a DataFrame from the queryset
|
||||
data = {
|
||||
'process_name': [p.process_name for p in predictions],
|
||||
'class': [p.process_class for p in predictions],
|
||||
'probability_of_malware': [p.probability_of_malware for p in predictions]
|
||||
}
|
||||
df = pd.DataFrame(data)
|
||||
|
||||
# Plot using seaborn or matplotlib
|
||||
plt.figure(figsize=(10, 6))
|
||||
|
||||
# Create a barplot where the class is on the x-axis and the probability is on the y-axis
|
||||
sns.barplot(
|
||||
data=df,
|
||||
x='class', # Independent variable (x-axis)
|
||||
y='probability_of_malware', # Dependent variable (y-axis)
|
||||
ci=None, # No confidence intervals
|
||||
palette='Set2' # Use a color palette for different classes
|
||||
)
|
||||
|
||||
plt.title('Malware Probability by Class')
|
||||
plt.xlabel('Class')
|
||||
plt.ylabel('Probability of Malware')
|
||||
plt.tight_layout()
|
||||
|
||||
# Save the plot to a bytes buffer
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format='png')
|
||||
buf.seek(0)
|
||||
|
||||
# Return the image as a response
|
||||
return HttpResponse(buf, content_type='image/png')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class RandomForestModelView(APIView):
|
||||
parser_classes = [MultiPartParser] # To handle file uploads
|
||||
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
if 'csv_file' not in request.FILES:
|
||||
return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
csv_file = request.FILES['csv_file']
|
||||
|
||||
try:
|
||||
# Define the temp directory path
|
||||
temp_dir = os.path.join(settings.MEDIA_ROOT, 'temp')
|
||||
|
||||
# Create the 'temp' directory if it doesn't exist
|
||||
if not os.path.exists(temp_dir):
|
||||
os.makedirs(temp_dir)
|
||||
|
||||
# Save the file temporarily
|
||||
temp_file_path = os.path.join(temp_dir, csv_file.name)
|
||||
with default_storage.open(temp_file_path, 'wb+') as destination:
|
||||
for chunk in csv_file.chunks():
|
||||
destination.write(chunk)
|
||||
|
||||
# Read the CSV file with headers
|
||||
df = pd.read_csv(temp_file_path)
|
||||
|
||||
# Extract column names from the CSV
|
||||
actual_columns = df.columns.tolist()
|
||||
|
||||
except Exception as e:
|
||||
return Response({"error": "Could not read the CSV file", "details": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Define the expected column names
|
||||
expected_columns = ['process_name', 'class', 'probability_of_malware']
|
||||
|
||||
# Mapping logic
|
||||
if actual_columns != expected_columns:
|
||||
# Map actual column names to expected ones
|
||||
column_mapping = dict(zip(actual_columns, expected_columns))
|
||||
df.rename(columns=column_mapping, inplace=True)
|
||||
|
||||
# Continue with processing the dataframe...
|
||||
records_saved = 0
|
||||
for _, row in df.iterrows():
|
||||
process_name = row['process_name']
|
||||
process_class = row['class']
|
||||
probability = row['probability_of_malware']
|
||||
|
||||
# Save the row to the database
|
||||
MalwarePrediction.objects.create(
|
||||
process_name=process_name,
|
||||
process_class=process_class,
|
||||
probability_of_malware=probability,
|
||||
model_type=2
|
||||
)
|
||||
records_saved += 1
|
||||
|
||||
return Response({"message": " RandomForest file saved successfully!"}, status=status.HTTP_201_CREATED)
|
||||
def get(self, request, *args, **kwargs):
|
||||
# Query all MalwarePrediction records from the database
|
||||
predictions = MalwarePrediction.objects.filter(model_type=2)
|
||||
|
||||
if not predictions.exists():
|
||||
return Response({"error": "No data available to generate graph."}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
# Create a DataFrame from the queryset
|
||||
data = {
|
||||
'process_name': [p.process_name for p in predictions],
|
||||
'class': [p.process_class for p in predictions],
|
||||
'probability_of_malware': [p.probability_of_malware for p in predictions]
|
||||
}
|
||||
df = pd.DataFrame(data)
|
||||
|
||||
# Plot using seaborn or matplotlib
|
||||
plt.figure(figsize=(10, 6))
|
||||
|
||||
# Create a barplot where the class is on the x-axis and the probability is on the y-axis
|
||||
sns.barplot(
|
||||
data=df,
|
||||
x='class', # Independent variable (x-axis)
|
||||
y='probability_of_malware', # Dependent variable (y-axis)
|
||||
ci=None, # No confidence intervals
|
||||
palette='Set2' # Use a color palette for different classes
|
||||
)
|
||||
|
||||
plt.title('Malware Probability by Class')
|
||||
plt.xlabel('Class')
|
||||
plt.ylabel('Probability of Malware')
|
||||
plt.tight_layout()
|
||||
|
||||
# Save the plot to a bytes buffer
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format='png')
|
||||
buf.seek(0)
|
||||
|
||||
# Return the image as a response
|
||||
return HttpResponse(buf, content_type='image/png')
|
||||
|
||||
|
||||
|
||||
class XGBModelView(APIView):
|
||||
|
||||
parser_classes = [MultiPartParser] # To handle file uploads
|
||||
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
if 'csv_file' not in request.FILES:
|
||||
return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
csv_file = request.FILES['csv_file']
|
||||
|
||||
try:
|
||||
# Define the temp directory path
|
||||
temp_dir = os.path.join(settings.MEDIA_ROOT, 'temp')
|
||||
|
||||
# Create the 'temp' directory if it doesn't exist
|
||||
if not os.path.exists(temp_dir):
|
||||
os.makedirs(temp_dir)
|
||||
|
||||
# Save the file temporarily
|
||||
temp_file_path = os.path.join(temp_dir, csv_file.name)
|
||||
with default_storage.open(temp_file_path, 'wb+') as destination:
|
||||
for chunk in csv_file.chunks():
|
||||
destination.write(chunk)
|
||||
|
||||
# Read the CSV file with headers
|
||||
df = pd.read_csv(temp_file_path)
|
||||
|
||||
# Extract column names from the CSV
|
||||
actual_columns = df.columns.tolist()
|
||||
|
||||
except Exception as e:
|
||||
return Response({"error": "Could not read the CSV file", "details": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Define the expected column names
|
||||
expected_columns = ['process_name', 'class', 'probability_of_malware']
|
||||
|
||||
# Mapping logic
|
||||
if actual_columns != expected_columns:
|
||||
# Map actual column names to expected ones
|
||||
column_mapping = dict(zip(actual_columns, expected_columns))
|
||||
df.rename(columns=column_mapping, inplace=True)
|
||||
|
||||
# Continue with processing the dataframe...
|
||||
records_saved = 0
|
||||
for _, row in df.iterrows():
|
||||
process_name = row['process_name']
|
||||
process_class = row['class']
|
||||
probability = row['probability_of_malware']
|
||||
|
||||
# Save the row to the database
|
||||
MalwarePrediction.objects.create(
|
||||
process_name=process_name,
|
||||
process_class=process_class,
|
||||
probability_of_malware=probability,
|
||||
model_type=3
|
||||
)
|
||||
records_saved += 1
|
||||
|
||||
return Response({"message": " XGB file saved successfully!"}, status=status.HTTP_201_CREATED)
|
||||
def get(self, request, *args, **kwargs):
|
||||
# Query all MalwarePrediction records from the database
|
||||
predictions = MalwarePrediction.objects.filter(model_type=3)
|
||||
|
||||
if not predictions.exists():
|
||||
return Response({"error": "No data available to generate graph."}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
# Create a DataFrame from the queryset
|
||||
data = {
|
||||
'process_name': [p.process_name for p in predictions],
|
||||
'class': [p.process_class for p in predictions],
|
||||
'probability_of_malware': [p.probability_of_malware for p in predictions]
|
||||
}
|
||||
df = pd.DataFrame(data)
|
||||
|
||||
# Plot using seaborn or matplotlib
|
||||
plt.figure(figsize=(10, 6))
|
||||
|
||||
# Create a barplot where the class is on the x-axis and the probability is on the y-axis
|
||||
sns.barplot(
|
||||
data=df,
|
||||
x='class', # Independent variable (x-axis)
|
||||
y='probability_of_malware', # Dependent variable (y-axis)
|
||||
ci=None, # No confidence intervals
|
||||
palette='Set2' # Use a color palette for different classes
|
||||
)
|
||||
|
||||
plt.title('Malware Probability by Class')
|
||||
plt.xlabel('Class')
|
||||
plt.ylabel('Probability of Malware')
|
||||
plt.tight_layout()
|
||||
|
||||
# Save the plot to a bytes buffer
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format='png')
|
||||
buf.seek(0)
|
||||
|
||||
# Return the image as a response
|
||||
return HttpResponse(buf, content_type='image/png')
|
||||
|
||||
|
||||
class SGDModelView(APIView):
|
||||
|
||||
|
||||
parser_classes = [MultiPartParser] # To handle file uploads
|
||||
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
if 'csv_file' not in request.FILES:
|
||||
return Response({"error": "No file provided"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
csv_file = request.FILES['csv_file']
|
||||
|
||||
try:
|
||||
# Define the temp directory path
|
||||
temp_dir = os.path.join(settings.MEDIA_ROOT, 'temp')
|
||||
|
||||
# Create the 'temp' directory if it doesn't exist
|
||||
if not os.path.exists(temp_dir):
|
||||
os.makedirs(temp_dir)
|
||||
|
||||
# Save the file temporarily
|
||||
temp_file_path = os.path.join(temp_dir, csv_file.name)
|
||||
with default_storage.open(temp_file_path, 'wb+') as destination:
|
||||
for chunk in csv_file.chunks():
|
||||
destination.write(chunk)
|
||||
|
||||
# Read the CSV file with headers
|
||||
df = pd.read_csv(temp_file_path)
|
||||
|
||||
# Extract column names from the CSV
|
||||
actual_columns = df.columns.tolist()
|
||||
|
||||
except Exception as e:
|
||||
return Response({"error": "Could not read the CSV file", "details": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Define the expected column names
|
||||
expected_columns = ['process_name', 'class', 'probability_of_malware']
|
||||
|
||||
# Mapping logic
|
||||
if actual_columns != expected_columns:
|
||||
# Map actual column names to expected ones
|
||||
column_mapping = dict(zip(actual_columns, expected_columns))
|
||||
df.rename(columns=column_mapping, inplace=True)
|
||||
|
||||
# Continue with processing the dataframe...
|
||||
records_saved = 0
|
||||
for _, row in df.iterrows():
|
||||
process_name = row['process_name']
|
||||
process_class = row['class']
|
||||
probability = row['probability_of_malware']
|
||||
|
||||
# Save the row to the database
|
||||
MalwarePrediction.objects.create(
|
||||
process_name=process_name,
|
||||
process_class=process_class,
|
||||
probability_of_malware=probability,
|
||||
model_type=4
|
||||
)
|
||||
records_saved += 1
|
||||
|
||||
return Response({"message": " SGD file saved successfully!"}, status=status.HTTP_201_CREATED)
|
||||
def get(self, request, *args, **kwargs):
|
||||
# Query all MalwarePrediction records from the database
|
||||
predictions = MalwarePrediction.objects.filter(model_type=4)
|
||||
|
||||
if not predictions.exists():
|
||||
return Response({"error": "No data available to generate graph."}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
@ -13,6 +13,11 @@
|
||||
</head>
|
||||
<body>
|
||||
<div class="tabSection">
|
||||
{% if message %}
|
||||
<div class="alert alert-warning" role="alert">
|
||||
{{ message }}
|
||||
</div>
|
||||
{% endif %}
|
||||
<div class="tabMalwareTitle">
|
||||
<p class="tabSectionMalware tabSectionActiveMalware">Alerts and Alarms</p>
|
||||
<!-- <p class="tabSectionMalware ">Alerts and Alarms</p> -->
|
||||
@ -285,14 +290,25 @@
|
||||
</div>
|
||||
<div class="tabSectionDetails">
|
||||
<pre id="MalwareLogActivity">
|
||||
|
||||
{% for line in logs %}
|
||||
<script>
|
||||
setInterval(fetchLogs, 2000);
|
||||
|
||||
</script>
|
||||
|
||||
<p>{{line}}</p>
|
||||
|
||||
{% endfor %}
|
||||
|
||||
</pre>
|
||||
|
||||
</div>
|
||||
<!-- {% if message %}
|
||||
<div class="alert alert-warning" role="alert">
|
||||
{{ message }}
|
||||
</div>
|
||||
{% endif %} -->
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -303,7 +319,7 @@
|
||||
<script src="{% static 'malware/js/PredictedChart.js' %}"></script>
|
||||
<script type="application/json" id="class-frequency-data">{{ class_frequency|safe }}</script>
|
||||
|
||||
<script type="text/javascript">
|
||||
<!-- <script type="text/javascript">
|
||||
google.charts.load("current", { packages: ['corechart'] });
|
||||
google.charts.setOnLoadCallback(drawChart);
|
||||
|
||||
@ -356,7 +372,145 @@
|
||||
var chart = new google.visualization.ColumnChart(document.getElementById("barMalwareChart"));
|
||||
chart.draw(view, options);
|
||||
}
|
||||
</script> -->
|
||||
|
||||
<!-- <script type="text/javascript">
|
||||
google.charts.load("current", { packages: ['corechart'] });
|
||||
google.charts.setOnLoadCallback(drawChart);
|
||||
|
||||
function drawChart() {
|
||||
// Function to fetch and draw the chart
|
||||
function updateChart() {
|
||||
// Use the Django context variable to populate the frequencies
|
||||
var classData = document.getElementById('class-frequency-data').textContent
|
||||
classData = classData.replace(/'/g, '"');
|
||||
var classFrequency = JSON.parse(classData);
|
||||
|
||||
var data = google.visualization.arrayToDataTable([
|
||||
["Element", "Density", { role: "style" }],
|
||||
["Ramnit", parseInt(classFrequency['Ramnit']), "#000080"],
|
||||
["Lollipop", parseInt(classFrequency['LolliPop']), "#0000ff"],
|
||||
["Kelihos_ver3", parseInt(classFrequency['Kelihos_ver3']), "#0081ff"],
|
||||
["Vundo", parseInt(classFrequency['Vundo']), "#17ffe2"],
|
||||
["Simda", parseInt(classFrequency['Simda']), "#7bff7b"],
|
||||
["Tracur", parseInt(classFrequency['Tracur']), "#e3ff15"],
|
||||
["Kelihos_ver1", parseInt(classFrequency['Kelihos_ver1']), "#ff9801"],
|
||||
["Obfuscator.ACY", parseInt(classFrequency['Obfuscator.ACY']), "#ff2200"],
|
||||
["Gatak", parseInt(classFrequency['Gatak']), "#810000"],
|
||||
]);
|
||||
|
||||
var view = new google.visualization.DataView(data);
|
||||
view.setColumns([0, 1, 2]);
|
||||
|
||||
var options = {
|
||||
title: "",
|
||||
height: 430,
|
||||
width: 900,
|
||||
legend: { position: "none" },
|
||||
backgroundColor: '#0c212b',
|
||||
tooltip: { trigger: 'none' },
|
||||
vAxis: {
|
||||
title: 'Count',
|
||||
viewWindow: {
|
||||
min: 0,
|
||||
max: 200
|
||||
},
|
||||
ticks: [0, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200],
|
||||
},
|
||||
hAxis: {
|
||||
title: 'Class',
|
||||
slantedText: true, // Tilt the text to prevent collisions
|
||||
},
|
||||
legend: { position: 'none' },
|
||||
};
|
||||
|
||||
var chart = new google.visualization.ColumnChart(document.getElementById("barMalwareChart"));
|
||||
chart.draw(view, options);
|
||||
}
|
||||
|
||||
// Initial draw
|
||||
updateChart();
|
||||
|
||||
// Set up real-time updates every 5 seconds
|
||||
setInterval(updateChart, 1000);
|
||||
}
|
||||
</script> -->
|
||||
|
||||
|
||||
<script type="text/javascript">
|
||||
google.charts.load("current", { packages: ['corechart'] });
|
||||
google.charts.setOnLoadCallback(drawChart);
|
||||
|
||||
function drawChart() {
|
||||
// Function to fetch and draw the chart
|
||||
function updateChart() {
|
||||
// Use the Django context variable to populate the frequencies
|
||||
var classData = document.getElementById('class-frequency-data').textContent;
|
||||
classData = classData.replace(/'/g, '"');
|
||||
var classFrequency = JSON.parse(classData);
|
||||
|
||||
// Check if classFrequency is empty or contains invalid data
|
||||
var isEmpty = Object.values(classFrequency).every(function(value) {
|
||||
return value === 0 || value === null || value === undefined;
|
||||
});
|
||||
|
||||
// If data is empty, show a placeholder or empty graph
|
||||
if (isEmpty) {
|
||||
document.getElementById("barMalwareChart").innerHTML = "<h3>No data available to display</h3>";
|
||||
return; // Stop further execution if no data
|
||||
}
|
||||
|
||||
// Prepare the data for the chart
|
||||
var data = google.visualization.arrayToDataTable([
|
||||
["Element", "Density", { role: "style" }],
|
||||
["Ramnit", parseInt(classFrequency['Ramnit']), "#000080"],
|
||||
["Lollipop", parseInt(classFrequency['LolliPop']), "#0000ff"],
|
||||
["Kelihos_ver3", parseInt(classFrequency['Kelihos_ver3']), "#0081ff"],
|
||||
["Vundo", parseInt(classFrequency['Vundo']), "#17ffe2"],
|
||||
["Simda", parseInt(classFrequency['Simda']), "#7bff7b"],
|
||||
["Tracur", parseInt(classFrequency['Tracur']), "#e3ff15"],
|
||||
["Kelihos_ver1", parseInt(classFrequency['Kelihos_ver1']), "#ff9801"],
|
||||
["Obfuscator.ACY", parseInt(classFrequency['Obfuscator.ACY']), "#ff2200"],
|
||||
["Gatak", parseInt(classFrequency['Gatak']), "#810000"],
|
||||
]);
|
||||
|
||||
var view = new google.visualization.DataView(data);
|
||||
view.setColumns([0, 1, 2]);
|
||||
|
||||
var options = {
|
||||
title: "",
|
||||
height: 430,
|
||||
width: 900,
|
||||
legend: { position: "none" },
|
||||
backgroundColor: '#0c212b',
|
||||
tooltip: { trigger: 'none' },
|
||||
vAxis: {
|
||||
title: 'Count',
|
||||
viewWindow: {
|
||||
min: 0,
|
||||
max: 200
|
||||
},
|
||||
ticks: [0, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200],
|
||||
},
|
||||
hAxis: {
|
||||
title: 'Class',
|
||||
slantedText: true, // Tilt the text to prevent collisions
|
||||
},
|
||||
legend: { position: 'none' },
|
||||
};
|
||||
|
||||
var chart = new google.visualization.ColumnChart(document.getElementById("barMalwareChart"));
|
||||
chart.draw(view, options);
|
||||
}
|
||||
|
||||
// Initial draw
|
||||
updateChart();
|
||||
|
||||
// Set up real-time updates every 5 seconds
|
||||
setInterval(updateChart, 1000);
|
||||
}
|
||||
</script>
|
||||
|
||||
<script type="application/json" id="class-average-data">{{ average|safe }}</script>
|
||||
<script>
|
||||
|
||||
|
||||
Binary file not shown.
@ -87,11 +87,11 @@ WSGI_APPLICATION = 'x_sys.wsgi.application'
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.postgresql',
|
||||
'NAME': 'webdefender',
|
||||
'USER': 'defenderuser',
|
||||
'NAME': 'xsysdb',
|
||||
'USER': 'tech4biz',
|
||||
'PASSWORD': 'Admin@123',
|
||||
'HOST':'localhost',
|
||||
'PORT': '5432',
|
||||
'HOST': 'localhost',
|
||||
'PORT': '5433',
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user