-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathtest_model.py
82 lines (71 loc) · 2.76 KB
/
test_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# Basic libraries
import os
import io
import sys
import math
import time
import random
import requests
import collections
import numpy as np
from os import walk
from joblib import dump, load
from tokenizers import ByteLevelBPETokenizer
from langdetect import detect
"""
How to run:
python3 test_model.py --tokenizer_folder tokenizer --threshold 0.5 --model_dir saved_models --website_to_test https://www.google.com
"""
# Parsing arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--tokenizer_folder", type=str, default = "tokenizer", help="Folder where tokenizer files have been placed")
parser.add_argument("--threshold", type=float, default = 0.5, help="Which threshold to use for testing")
parser.add_argument("--model_dir", type=str, default = "saved_models", help="Directory of trained models.")
parser.add_argument("--website_to_test", type=str, default = "https://www.google.com", help="Website to test")
args = parser.parse_args()
tokenizerFolder = args.tokenizer_folder
savedModelDirectory = args.model_dir
websiteToTest = args.website_to_test
threshold = args.threshold
# Loading files
# Load tokenization files
tokenizer = ByteLevelBPETokenizer(
tokenizerFolder + "/tokenizer.tok-vocab.json",
tokenizerFolder + "/tokenizer.tok-merges.txt",
)
tokenizerVocabSize = tokenizer.get_vocab_size()
print("Tokenizer files have been loaded and the vocab size is %d..." % tokenizerVocabSize)
# Load saved model
model = load(savedModelDirectory + "/phishytics-model.joblib")
print("Model loaded...")
# Load document frequency dictionary
docDict = np.load(savedModelDirectory + "/phishytics-model-tfidf-dictionary.npy", allow_pickle=True).item()
print("Document frequency dictionary loaded...")
# Testing
print("Loading webpage...")
try:
request = requests.get(websiteToTest)
webpageHtml = str(request.text)
webpageHtml = webpageHtml.replace("\n", " ")
except Exception as e:
print('\n',e)
print("\nAn error occurred, exiting now... ")
exit()
# Convert text into feature vector
output = tokenizer.encode(webpageHtml)
outputDict = collections.Counter(output.ids)
# Apply tfidf weighting
totalFilesUnderConsideration = docDict["totalFilesUnderConsideration"]
array = [0] * tokenizerVocabSize
for item in outputDict:
if len(docDict[item]) > 0:
array[item] = (outputDict[item]) * (math.log10( totalFilesUnderConsideration / len(docDict[item])))
# Getting predictions
predictionProbability = model.predict_proba([array])[0][1]
print("\n****************************\n--> Probability that the website is phishing: %.2f" % (predictionProbability * 100))
prediction = "NOT PHISHING"
if predictionProbability > threshold:
prediction = "PHISHING"
print("--> Based on your threshold of %.2f, this website is +++'%s'+++" % (threshold, prediction))
print("****************************")