diff --git a/LFIscanner.py b/LFIscanner.py
index 388cb4c..74fe619 100644
--- a/LFIscanner.py
+++ b/LFIscanner.py
@@ -10,6 +10,10 @@
import random
from threading import Lock
import json
+import urllib.parse
+import copy
+from urllib.parse import urlparse
+
if os.name == "nt":
os.system("cls")
@@ -30,22 +34,49 @@
Made by: Apollyon
-Based on: LFIScanner by R3LI4NT
+Based on: LFIScanner by R3LI4NT
+Do python LFIscanner.py -h for help
""")
parse = argparse.ArgumentParser()
parse.add_argument('-u','--url',help="Target URL",required=False)
parse.add_argument('-ulist','--url_list',help="Target multiple URLs from a file",required=False)
+parse.add_argument('-ta','--test_all',help="Test all given parameters for LFI [Only last one is tested by default]", action = "store_true",required=False)
+parse.add_argument('-to','--timeout',help="Set timeout for requests [10 seconds by default]", default=10,required=False)
parse.add_argument('-wiz','--wizard',help="Run the wizard, for beginner and first time users",required=False,default=False, action = "store_true")
parse.add_argument('-e','--extract',help="Extract content", action='store_true',required=False)
parse.add_argument('-p','--payload',help="Payloads file [Pre installed lists: all_os.txt , linux.txt , windows.txt]",required=False)
parse.add_argument('-t','--threads',help="Threads [5 by default]",default=5,required=False)
parse.add_argument('-pr','--proxy',help="Add a list of proxies to use [HTTP, HTTPS, SOCKS]",required=False)
-parse.add_argument('-auth','--authentication',help="Load headers and/or cookies from a file to run a scan while authenticated",required=False,default="auth.json")
+parse.add_argument('-auth','--authentication',help="Load headers and/or cookies and/or URL schema from a file to run a scan while authenticated",required=False,default="auth.json")
parse.add_argument('-save','--save_to_file',help="Save working LFI payloads by writing them to a file",required=False,default="LFI_scanner_saves.txt")
parse = parse.parse_args()
lock = Lock()
+to_test_all = False
+if parse.test_all:
+ to_test_all = True
+
+try:
+ req_timeout = int(parse.timeout)
+except:
+ print("[X] REQUEST TIMEOUT VALUE MUST BE INTEGER")
+
+
+def parse_url(url):
+
+ if r"https://" in url or r"http://" in url:
+ pass
+ else:
+ url = r"https://" + url #we consider it to be https by default
+
+ params = urllib.parse.urlparse(url).query
+
+ domain = urlparse(url).netloc
+ protocol = urlparse(url).scheme
+ path = urlparse(url).path
+
+ return (url , params , domain , protocol ,path)
def payload_counter(payload_file_path):
with open(payload_file_path, 'rb+') as f:
@@ -64,7 +95,8 @@ def load_internal_payloads(payload_path):
print("[X] SPECIFIED PAYLOADS NOT FOUND. PLEASE REINSTALL TOOL OR PAYLOAD FILE")
quit()
-def check_single_url_with_payload(x,payloads_per_thread,payload_path,target_url,cookies,headers,save_file_path,to_extract):
+def check_single_url_with_payload(x,payloads_per_thread,payload_path,target_url,cookies,headers,save_file_path,to_extract,req_timeout):
+
global proxies_but_dict
global proxy_running
payloads = load_internal_payloads(payload_path)
@@ -74,19 +106,19 @@ def check_single_url_with_payload(x,payloads_per_thread,payload_path,target_url,
print(f"Thread number {x+1} launched on URL {target_url} Checking payloads ...")
last_msg_was_error = False
for p in payloads:
- try:
+ #try:
if pointer_line > (x*payloads_per_thread) and pointer_line < ((x+1)*payloads_per_thread):
p = p.strip()
-
+
if proxy_running:
- query = requests.get(target_url+p , headers=headers , proxies=random.choice(proxies_but_dict), cookies=cookies)
+ query = requests.get(target_url+p , headers=headers , proxies=random.choice(proxies_but_dict), cookies=cookies, timeout=req_timeout)
else:
- query = requests.get(target_url+p , headers=headers, cookies=cookies)
+ query = requests.get(target_url+p , headers=headers, cookies=cookies, timeout=req_timeout)
payload_count = payload_count + 1
if payload_count%25 == 0:
print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Checked {payload_count} payloads...")
- if "root" and "bash" and r"/bin" in query.text and query.status_code//100 == 2:
+ if "root" in query.text and "bash" in query.text and r"/bin" in query.text and query.status_code//100 == 2:
print("="*10)
print(f"LFI DETECTED:\n URL + Payload: {target_url+p}\n\n")
if to_extract:
@@ -109,40 +141,112 @@ def check_single_url_with_payload(x,payloads_per_thread,payload_path,target_url,
print("="*10)
pointer_line = pointer_line + 1
last_msg_was_error = False
- except RequestException:
- if not last_msg_was_error:
- print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Error occured while making request")
- print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Sleeping for 3 seconds then retrying payloads untill error is resolved ...")
- last_msg_was_error = True
- time.sleep(3)
- else:
- time.sleep(3)
- except NameResolutionError:
- if not last_msg_was_error:
- print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Error occured while resolving domain name. Are you sure the specified website exists and you are connected to the internet ?")
- print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Sleeping for 3 seconds then retrying payloads untill error is resolved ...")
- last_msg_was_error = True
- time.sleep(3)
+ #except RequestException:
+ # if not last_msg_was_error:
+ # print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Error occured while making request")
+ # print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Sleeping for 3 seconds then retrying payloads untill error is resolved ...")
+ # last_msg_was_error = True
+ # time.sleep(3)
+ # else:
+ # time.sleep(3)
+ #except NameResolutionError:
+ # if not last_msg_was_error:
+ # print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Error occured while resolving domain name. Are you sure the specified website exists and you are connected to the internet ?")
+ # print(f"[!] Thread {x+1} | Running on URL: {target_url} | Status: Sleeping for 3 seconds then retrying payloads untill error is resolved ...")
+ # last_msg_was_error = True
+ # time.sleep(3)
+ # else:
+ # time.sleep(3)
+
+def url_parameterizing(x,payloads_per_thread,payload_path,target_url,cookies,headers,save_file_path,to_extract,url_schema_logins,special_cookies,special_headers,special_url_schema_logins,to_test_all,req_timeout):
+
+ #here were parse the target url tuple to allow scanning for each and every param and proper auth stuff
+ #structure of the tuple target url is ('full url', 'params', 'domain+tld', 'protocol' , 'path')
+
+ def add_url_password_username(domain , protocol , username , password):
+ #note to self and future devs, this function leads to loss of params which should be added later
+ something_raw = r"://"
+ url = f"{protocol}{something_raw}{username}:{password}@{domain}"
+ return url
+
+ actual_target_url = target_url[2]
+ if url_schema_logins:
+ if not special_url_schema_logins:
+ actual_target_url = add_url_password_username(target_url[2] , target_url[3] , url_schema_logins[0] , url_schema_logins[1])
+ else:
+ if target_url[0] not in special_url_schema_logins:
+ actual_target_url = add_url_password_username(target_url[2] , target_url[3] , url_schema_logins[0] , url_schema_logins[1])
else:
- time.sleep(3)
+ actual_target_url = add_url_password_username(target_url[2] , target_url[3] , special_url_schema_logins[target_url[0]][0] , special_url_schema_logins[target_url[0]][0])
+
+ if special_url_schema_logins:
+ if target_url[0] not in special_url_schema_logins:
+ actual_target_url = add_url_password_username(target_url[2] , target_url[3] , url_schema_logins[0] , url_schema_logins[1])
+ else:
+ actual_target_url = add_url_password_username(target_url[2] , target_url[3] , special_url_schema_logins[target_url[0]][0] , special_url_schema_logins[target_url[0]][0])
+
+ if special_cookies:
+ if target_url[2] in special_cookies:
+ cookies = special_cookies[target_url[2]]
+
+ if special_headers:
+ if target_url[2] in special_headers:
+ headers = special_headers[target_url[2]]
+
+ def param_parsers(params):
+ #this would return a dict of params along with their values the user entered
+ #for example ?a=123&b=456&c= becomes {"a" : 123 , "b": 456 , "c": ""}
+ #use this custom parser instead of urllibs inbuilt because the inbuilt ignores empty params ie the c param in the example given above
-def use_payload(x,payloads_per_thread,payload_path,target_url,targets_path,cookies,headers,save_file_path,to_extract):
+ params_json = {}
+ params_list = params.split("&")
+ for param_split in params_list:
+ param_split_list = param_split.split("=")
+ param_name , param_value = param_split_list[0] , param_split_list[1]
+ params_json[param_name] = param_value
+
+ return params_json
+
+ params_json = param_parsers(target_url[1])
+ if not params_json:
+ print(f"[X] Thread {x+1} | Running on URL: {target_url[0]} | Status: The URL doesnt have any parameter available for testing.")
+ quit()
+
+ # url reconstruction for the requests now
+ if to_test_all:
+ for j in range(len(params_json)): #we inject the Jth parameter every time
+
+ select_param = ""
+ actual_target_url_2 = f"{actual_target_url}{target_url[4]}?"
+ for param_count , param in enumerate(params_json):
+ if param_count != j:
+ actual_target_url_2 = f"{actual_target_url_2}{param}={params_json[param]}&"
+ else:
+ select_param = param
+
+ something_raw = r"://"
+ actual_target_url_2 = f"{target_url[3]}{something_raw}{actual_target_url_2}{select_param}="
+
+ print("a")
+ check_single_url_with_payload(x,payloads_per_thread,payload_path,actual_target_url_2,cookies,headers,save_file_path,to_extract,req_timeout)
+ else:
+ check_single_url_with_payload(x,payloads_per_thread,payload_path,target_url[0],cookies,headers,save_file_path,to_extract,req_timeout)
+
+def use_payload(x,payloads_per_thread,payload_path,target_url,targets_path,cookies,headers,save_file_path,to_extract,url_schema_logins,special_cookies,special_headers,special_url_schema_logins,to_test_all,req_timeout):
if not target_url:
if os.path.isfile(url_list_path):
with open(url_list_path) as targets_file:
for target in targets_file:
target = target.strip()
if target:
- if r"https://" in target or r"http://" in target:
- pass
- else:
- target = r"https://" + target
- check_single_url_with_payload(x,payloads_per_thread,payload_path,target,cookies,headers,save_file_path,to_extract)
+ target = parse_url(target)
+ url_parameterizing(x,payloads_per_thread,payload_path,target,cookies,headers,save_file_path,to_extract,url_schema_logins,special_cookies,special_headers,special_url_schema_logins,to_test_all,req_timeout)
else:
print("[X] NO TARGET URL SPECIFIED")
quit()
else:
- check_single_url_with_payload(x,payloads_per_thread,payload_path,target_url,cookies,headers,save_file_path,to_extract)
+ url_parameterizing(x,payloads_per_thread,payload_path,target_url,cookies,headers,save_file_path,to_extract,url_schema_logins,special_cookies,special_headers,special_url_schema_logins,to_test_all,req_timeout)
+
def count_payloads(payload_input):
match payload_input:
@@ -217,6 +321,11 @@ def load_proxies(proxy_path):
#default headers
def load_authentication(auth_path , headers , cookies):
+
+ special_cookies = {}
+ special_headers = {}
+ special_url_schema_logins = {}
+
if os.path.isfile(auth_path):
with open(auth_path , "r") as auth_file:
auth_data = auth_file.read()
@@ -225,15 +334,35 @@ def load_authentication(auth_path , headers , cookies):
if auth_data["auth_headers"]:
for header in auth_data["auth_headers"]:
headers[header] = auth_data["auth_headers"][header]
+
if auth_data["cookies"]:
cookies = auth_data["cookies"]
+
+ url_schema_logins = False
+ if auth_data["url_schema_login"]:
+ url_schema_logins = (auth_data["url_schema_login"][0] , auth_data["url_schema_login"][1],)
+
+
+ if auth_data["special_cookies"]:
+ special_cookies = auth_data["special_cookies"]
+
+ if auth_data["special_auth_headers"]:
+ special_headers = auth_data["special_auth_headers"]
+ for domain in auth_data["special_auth_headers"]:
+ for header in headers:
+ if header not in auth_data["special_auth_headers"][domain]:
+ auth_data["special_auth_headers"][domain][header] = headers[header]
+
+ if auth_data["special_url_schema_login"]:
+ special_url_schema_logins = auth_data["special_url_schema_login"]
+
else:
print("[X] AUTH FILE DOES NOT EXIST")
- return headers,cookies
+ return headers,cookies,url_schema_logins,special_cookies,special_headers,special_url_schema_logins
if parse.authentication:
- load_authentication(parse.authentication , headers , cookies)
+ headers,cookies,url_schema_logins,special_cookies,special_headers,special_url_schema_logins = load_authentication(parse.authentication , headers , cookies)
@@ -256,16 +385,11 @@ def load_authentication(auth_path , headers , cookies):
if parse.url:
print(f"[*] RUNNING ON TARGET ---> {parse.url}")
- if r"https://" in parse.url or r"http://" in parse.url:
- current_target = parse.url
- else:
- current_target = r"https://" + parse.url
-
+ current_target = parse_url(parse.url)
payloads_per_thread = payload_count//int(parse.threads)
-
for x in range(int(parse.threads)):
- threading.Thread(target=use_payload, args=(x,payloads_per_thread,payload_path,current_target,False,cookies,headers,save_file_path,to_extract)).start()
+ threading.Thread(target=use_payload, args=(x,payloads_per_thread,payload_path,current_target,False,cookies,headers,save_file_path,to_extract,url_schema_logins,special_cookies,special_headers,special_url_schema_logins , to_test_all,req_timeout)).start()
else:
print(f"[*] RUNNING ON MULTIPLE TARGETS")
url_list_path = parse.url_list.lower()
@@ -274,7 +398,7 @@ def load_authentication(auth_path , headers , cookies):
payloads_per_thread = payload_count//int(parse.threads)
for x in range(int(parse.threads)):
- threading.Thread(target=use_payload, args=(x,payloads_per_thread,payload_path,False,url_list_path,cookies,headers,save_file_path,to_extract)).start()
+ threading.Thread(target=use_payload, args=(x,payloads_per_thread,payload_path,False,url_list_path,cookies,headers,save_file_path,to_extract,url_schema_logins,special_cookies,special_headers,special_url_schema_logins , to_test_all,req_timeout)).start()
else:
print("[X] GIVEN TARGET URL FILE NOT FOUND")
else:
@@ -295,19 +419,37 @@ def load_authentication(auth_path , headers , cookies):
except:
threads_count = 5
- payload_file = input("YA-LFI Wizard | Enter the path to payload list you want to use [Builtins: [1]all_os.txt , [2]linux.txt , [3]windows.txt]:").strip().lower()
+ test_all = input("YA-LFI Wizard | Do you want to test all parameters (This will take considerably more time) ? (y)es/(n)o :").strip().lower()
+
+ match test_all:
+ case "1" | "y" | "yes" | "(y)es":
+ to_test_all = True
+ case "2" | "n" | "no" | "(n)o":
+ to_test_all = False
+ case _:
+ to_test_all = False
+
+ payload_file = input("YA-LFI Wizard | Enter the path to payload list you want to use [Builtins: [1]all_os.txt , [2]linux.txt , [3]windows.txt] :").strip().lower()
payload_count , payload_path = count_payloads(payload_file)
payloads_per_thread = payload_count//threads_count
- proxy_file = input("YA-LFI Wizard | Enter the path for the proxy file if you want to use them, leave blank or enter no if you dont want to use proxies").strip().lower()
+
+
+ req_timeout = input("YA-LFI Wizard | Enter the timeout value for each request in seconds :").strip().lower()
+ try:
+ req_timeout = int(req_timeout)
+ except:
+ print("[X] REQUEST TIMEOUT VALUE MUST BE INTEGER")
+
+ proxy_file = input("YA-LFI Wizard | Enter the path for the proxy file if you want to use them, leave blank or enter no if you dont want to use proxies :").strip().lower()
if proxy_file:
load_proxies(proxy_file)
auth_file = input("YA-LFI Wizard | Enter the path for auth headers and cookies if you want to use them, leave blank or enter no if you dont want to scan without auth :").strip().lower()
if auth_file:
- headers , cookies = load_authentication(auth_file , headers , cookies)
+ headers,cookies,url_schema_logins,special_cookies,special_headers,special_url_schema_logins = load_authentication(parse.authentication , headers , cookies)
save_file_path = input("YA-LFI Wizard | Enter the path for file where to save results, leave blank or enter no if you dont want to scan without auth :").strip().lower()
@@ -322,7 +464,7 @@ def load_authentication(auth_path , headers , cookies):
to_extract = False
for x in range(threads_count):
- threading.Thread(target=use_payload, args=(x,payloads_per_thread,payload_path,False,url_list_path,cookies,headers,save_file_path,to_extract)).start()
+ threading.Thread(target=use_payload, args=(x,payloads_per_thread,payload_path,False,url_list_path,cookies,headers,save_file_path,to_extract,url_schema_logins,special_cookies,special_headers,special_url_schema_logins , to_test_all,req_timeout)).start()
else:
print("[X] GIVEN TARGET URL FILE NOT FOUND")
@@ -330,10 +472,7 @@ def load_authentication(auth_path , headers , cookies):
current_target= input("YA-LFI Wizard | Enter the URL to scan :").strip()
print(f"[*] RUNNING ON TARGET ---> {current_target}")
- if r"https://" in current_target or r"http://" in current_target:
- current_target = current_target
- else:
- current_target = r"https://" + current_target
+ current_target = parse_url(current_target)
threads_count = input("YA-LFI Wizard | How many threads do you want to use :").strip()
try:
@@ -344,11 +483,27 @@ def load_authentication(auth_path , headers , cookies):
except:
threads_count = 5
+ test_all = input("YA-LFI Wizard | Do you want to test all parameters (This will take considerably more time) ? (y)es/(n)o :").strip().lower()
+
+ match test_all:
+ case "1" | "y" | "yes" | "(y)es":
+ to_test_all = True
+ case "2" | "n" | "no" | "(n)o":
+ to_test_all = False
+ case _:
+ to_test_all = False
+
payload_file = input("YA-LFI Wizard | Enter the path to payload list you want to use [Builtins: [1]all_os.txt , [2]linux.txt , [3]windows.txt]:").strip().lower()
payload_count , payload_path = count_payloads(payload_file)
payloads_per_thread = payload_count//threads_count
+ req_timeout = input("YA-LFI Wizard | Enter the timeout value for each request in seconds [10 seconds by default]:").strip().lower()
+ try:
+ req_timeout = int(req_timeout)
+ except:
+ print("[X] REQUEST TIMEOUT VALUE MUST BE INTEGER")
+
proxy_file = input("YA-LFI Wizard | Enter the path for the proxy file if you want to use them, leave blank or enter no if you dont want to use proxies").strip().lower()
if proxy_file:
load_proxies(proxy_file)
@@ -356,7 +511,7 @@ def load_authentication(auth_path , headers , cookies):
auth_file = input("YA-LFI Wizard | Enter the path for auth headers and cookies if you want to use them, leave blank or enter no if you dont want to scan without auth :").strip().lower()
if auth_file:
- headers , cookies = load_authentication(auth_file , headers , cookies)
+ headers,cookies,url_schema_logins,special_cookies,special_headers,special_url_schema_logins = load_authentication(parse.authentication , headers , cookies)
save_file_path = input("YA-LFI Wizard | Enter the path for file where to save results, leave blank or enter no if you dont want to scan without auth :").strip().lower()
@@ -374,7 +529,7 @@ def load_authentication(auth_path , headers , cookies):
payloads_per_thread = payload_count//threads_count
for x in range(threads_count):
- threading.Thread(target=use_payload, args=(x,payloads_per_thread,payload_path,current_target,False,cookies,headers,save_file_path,to_extract)).start()
+ threading.Thread(target=use_payload, args=(x,payloads_per_thread,payload_path,current_target,False,cookies,headers,save_file_path,to_extract,url_schema_logins,special_cookies,special_headers,special_url_schema_logins , to_test_all,req_timeout)).start()
case _:
print("[X] NOT A VALID OPTION PLEASE RE LAUNCH THE PROGRAM AND SELECT AN AVAILABLE OPTION")
diff --git a/README.md b/README.md
index 79a2fcb..9d73435 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@
### Yet another - local file inclusion scanner
##### By: Apollyon
-##### Based on: LFIScanner by R3LI4NT
+
## Commands
@@ -19,6 +19,8 @@
| -h / --help | Request help |
| -u / --url | Target Website |
| -ulist / --url_list | Target multiple websites from file |
+| -ta / --test_all | Test all parameters of the given URL |
+| -to / --timeout | Set the timeout for requests |
| -wiz / --wizard | Wizard for new users |
| -p / --payload | Payload file |
| -e / --extract | Extract content |
@@ -67,12 +69,19 @@ Windows wordlist
```
python LFIscanner.py -u https://example.com?param= -p windows.txt
```
-WARNING: DO NOT RUN WITHOUT A PARAMETER IN THE URL
![image](https://github.com/user-attachments/assets/4e07bcd8-21a2-43e4-8551-8006460f8ce7)
![image](https://github.com/user-attachments/assets/be6ae5a0-376b-4a95-899b-3f4d47c933fd)
+## Using with TOR
+
+If you want to use YA-LFI with TOR you can do the following
+- Run the tor service
+- Add socks5://127.0.0.1:9050 to the proxy list
+- Run YA-LFI with the proxies flag
+Tor uses the port 9050 for socks proxies by default, so if you have changed that change the port aswell
+You can also try @azuk4r's fork of YA-LFI [here](https://github.com/azuk4r/YA-LFI) which tries to implement tor rotation by defaults
## Other amazing third party wordlists
@@ -109,3 +118,7 @@ WARNING: DO NOT RUN WITHOUT A PARAMETER IN THE URL
?conf={payload}
```
[Source](https://book.hacktricks.xyz/pentesting-web/file-inclusion)
+
+## Credits
+##### Based on work by: LFIScanner by R3LI4NT
+##### Special thanks to @azuk4r for giving ideas and testing it out in its early stages
diff --git a/auth.json b/auth.json
index 70caad1..4896692 100644
--- a/auth.json
+++ b/auth.json
@@ -1,6 +1,23 @@
{
- "how_to_use":"You can write auth cookies to the cookies field such as {'username':'bob' , 'pass':123 , 'special_thing':special_thing}. For headers, you can edit the array as like this {'special_header_1':special_stuff , 'special_header_2':'special_stuff_more'}. You can leave this blank if you dont want to use any. These cookies and headers will be used for every request",
- "use_your_own":"This is just a template and you can use your own file, given they are configured in a similar manner. You need not include these two comments in your own file",
+ "how_to_use":"You can specify auth cookies, headers and schema login credentials for all normal requests here. We have also provided examples below. These credentials will be used for all requests to non special URLs. To create specific authentication stuff for special URLs, use the special login flows",
+
+ "cookies_example":{"username":"bob" , "pass":123 , "special_thing":"special_thing"},
+ "auth_headers_example":{"special_header_1":"special stuff" , "special_header_2":"special stuff more"},
+ "url_schema_login_example":["my_username" , "my_secret_password"],
+
"cookies":{},
- "auth_headers":{}
+ "auth_headers":{},
+ "url_schema_login":[],
+
+ "using_special_login_flows":"You can specify auth cookies, headers and schema login credentials for specific sites here. We have also provided examples below. These credentials will be only used for the specified URLs",
+
+ "special_cookies_example":{"test.com":{"special_cookie_one":"something special" , "special_cookie_two":"something even more special"} , "example.com":{"real_auth":1234}},
+ "special_auth_headers_example":{"test.com":{"special_header":"something special" , "even_special_header":"something even more special"} , "example.com":{"real_auth_header":1234}},
+ "special_url_schema_login_example":{"test.com":["real_username" , "totally_real_password"] , "example.com":["not_real_username" , "not_real_password"]},
+
+ "special_cookies":{},
+ "special_auth_headers":{},
+ "special_url_schema_login":{},
+
+ "use_your_own":"Dont worry about the comments and example creds they are ignored by the program. This is just a template and you can use your own file, given they are configured in a similar manner. You need not include these two comments in your own file"
}
\ No newline at end of file