forked from liftoff/pyminifier
-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
SOURCE: https://github.com/AlexandrDragunkin/pyminifier =v2.2.1 update …
…@2019 (origineel =v2.2 @ https://github.com/liftoff/pyminifier) - - - +(minor)Changes ; incl. PR liftoff#92 'fix a ZeroDivisonError when cumulative_size is zero' : liftoff@f593e6f (04 Aug 2017) incl. PR liftoff#57 'Fix reduce_operators() with check whether prev_tok is not None' : liftoff@3a95839 (12 Feb 2016) - - -
- Loading branch information
Showing
5 changed files
with
904 additions
and
29 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,16 +5,17 @@ | |
# For license information see LICENSE.txt | ||
|
||
# Meta | ||
__version__ = '2.2' | ||
__version_info__ = (2, 2) | ||
__license__ = "GPLv3" # See LICENSE.txt | ||
__version__ = '2.2.1' | ||
__version_info__ = (2, 2, 1) | ||
__license__ = "GPLv3" # See LICENSE.txt | ||
__author__ = 'Dan McDougall <[email protected]>' | ||
|
||
# TODO: Add the ability to mark variables, functions, classes, and methods for non-obfuscation. | ||
# TODO: Add the ability to selectively obfuscate identifiers inside strings (for metaprogramming stuff). | ||
# TODO: Add the ability to use a config file instead of just command line args. | ||
# TODO: Add the ability to save a file that allows for de-obfuscation later (or at least the ability to debug). | ||
# TODO: Separate out the individual functions of minification so that they can be chosen selectively like the obfuscation functions. | ||
# TODO: A conflict file entry in the windows operating system | ||
|
||
__doc__ = """\ | ||
**Python Minifier:** Reduces the size of (minifies) Python code for use on | ||
|
@@ -66,7 +67,11 @@ | |
""" | ||
|
||
# Import built-in modules | ||
import os, sys, re, io | ||
import os | ||
import sys | ||
import re | ||
import io | ||
|
||
from optparse import OptionParser | ||
from collections import Iterable | ||
|
||
|
@@ -86,32 +91,41 @@ | |
except ImportError: | ||
pass | ||
|
||
# define the name of the operating system 'nt'- windows | ||
os_name = os.name | ||
|
||
# Regexes | ||
multiline_indicator = re.compile('\\\\(\s*#.*)?\n') | ||
|
||
# The test.+() functions below are for testing pyminifier... | ||
|
||
|
||
def test_decorator(f): | ||
"""Decorator that does nothing""" | ||
return f | ||
|
||
|
||
def test_reduce_operators(): | ||
"""Test the case where an operator such as an open paren starts a line""" | ||
(a, b) = 1, 2 # The indentation level should be preserved | ||
(a, b) = 1, 2 # The indentation level should be preserved | ||
pass | ||
|
||
|
||
def test_empty_functions(): | ||
""" | ||
This is a test function. | ||
This should be replaced with 'def test_empty_functions(): pass' | ||
""" | ||
|
||
|
||
class test_class(object): | ||
"Testing indented decorators" | ||
|
||
@test_decorator | ||
def test_function(self): | ||
pass | ||
|
||
|
||
def test_function(): | ||
""" | ||
This function encapsulates the edge cases to prevent them from invading the | ||
|
@@ -120,8 +134,8 @@ def test_function(): | |
# This tests method obfuscation: | ||
method_obfuscate = test_class() | ||
method_obfuscate.test_function() | ||
foo = ("The # character in this string should " # This comment | ||
"not result in a syntax error") # ...and this one should go away | ||
foo = ("The # character in this string should " # This comment | ||
"not result in a syntax error") # ...and this one should go away | ||
test_multi_line_list = [ | ||
'item1', | ||
'item2', | ||
|
@@ -135,13 +149,14 @@ def test_function(): | |
# It may seem strange but the code below tests our docstring removal code. | ||
test_string_inside_operators = imaginary_function( | ||
"This string was indented but the tokenizer won't see it that way." | ||
) # To understand how this could mess up docstring removal code see the | ||
) # To understand how this could mess up docstring removal code see the | ||
# minification.minification.remove_comments_and_docstrings() function | ||
# starting at this line: | ||
# "elif token_type == tokenize.STRING:" | ||
# This tests remove_extraneous_spaces(): | ||
this_line_has_leading_indentation = '''<--That extraneous space should be | ||
removed''' # But not these spaces | ||
this_line_has_leading_indentation = '''<--That extraneous space should be | ||
removed''' # But not these spaces | ||
|
||
|
||
def is_iterable(obj): | ||
""" | ||
|
@@ -152,6 +167,7 @@ def is_iterable(obj): | |
return False | ||
return isinstance(obj, Iterable) | ||
|
||
|
||
def pyminify(options, files): | ||
""" | ||
Given an *options* object (from `optparse.OptionParser` or similar), | ||
|
@@ -177,7 +193,7 @@ def pyminify(options, files): | |
sys.exit(1) | ||
# Make our .pyz: | ||
compression.zip_pack(files, options) | ||
return None # Make sure we don't do anything else | ||
return None # Make sure we don't do anything else | ||
# Read in our prepend text (if any) | ||
prepend = None | ||
if options.prepend: | ||
|
@@ -195,8 +211,8 @@ def pyminify(options, files): | |
# obfuscation is stated) | ||
if options.use_nonlatin and not any(obfuscations): | ||
options.obfuscate = True | ||
if len(files) > 1: # We're dealing with more than one file | ||
name_generator = None # So we can tell if we need to obfuscate | ||
if len(files) > 1: # We're dealing with more than one file | ||
name_generator = None # So we can tell if we need to obfuscate | ||
if any(obfuscations): | ||
# Put together that will be used for all obfuscation functions: | ||
identifier_length = int(options.replacement_length) | ||
|
@@ -212,19 +228,22 @@ def pyminify(options, files): | |
else: | ||
name_generator = obfuscate.obfuscation_machine( | ||
identifier_length=identifier_length) | ||
table =[{}] | ||
cumulative_size = 0 # For size reduction stats | ||
cumulative_new = 0 # Ditto | ||
table = [{}] | ||
cumulative_size = 0 # For size reduction stats | ||
cumulative_new = 0 # Ditto | ||
for sourcefile in files: | ||
# Record how big the file is so we can compare afterwards | ||
filesize = os.path.getsize(sourcefile) | ||
cumulative_size += filesize | ||
# Get the module name from the path | ||
module = os.path.split(sourcefile)[1] | ||
module = ".".join(module.split('.')[:-1]) | ||
source = open(sourcefile).read() | ||
if os_name in ('nt',): | ||
source = open(sourcefile, encoding="utf8").read() | ||
else: | ||
source = open(sourcefile).read() | ||
tokens = token_utils.listified_tokenizer(source) | ||
if not options.nominify: # Perform minification | ||
if not options.nominify: # Perform minification | ||
source = minification.minify(tokens, options) | ||
# Have to re-tokenize for obfucation (it is quick): | ||
tokens = token_utils.listified_tokenizer(source) | ||
|
@@ -238,7 +257,7 @@ def pyminify(options, files): | |
table=table | ||
) | ||
# Convert back to text | ||
result = '' | ||
result = '# -*- coding: utf-8 -*-\n' | ||
if prepend: | ||
result += prepend | ||
result += token_utils.untokenize(tokens) | ||
|
@@ -257,16 +276,21 @@ def pyminify(options, files): | |
os.mkdir(options.destdir) | ||
# Need the path where the script lives for the next steps: | ||
filepath = os.path.split(sourcefile)[1] | ||
path = options.destdir + '/' + filepath # Put everything in destdir | ||
f = open(path, 'w') | ||
path = options.destdir + '/' + filepath # Put everything in destdir | ||
if os_name in ('nt',): | ||
f = open(path, 'w', encoding='utf-8') | ||
else: | ||
f = open(path, 'w') | ||
f.write(result) | ||
f.close() | ||
new_filesize = os.path.getsize(path) | ||
cumulative_new += new_filesize | ||
percent_saved = round((float(new_filesize) / float(filesize)) * 100, 2) if float(filesize)!=0 else 0 | ||
print(( | ||
percent_saved = round((float(new_filesize) / float(filesize)) * 100, 2) if float(filesize) != 0 else 0 | ||
print((( | ||
"{sourcefile} ({filesize}) reduced to {new_filesize} bytes " | ||
"({percent_saved}% of original size)").format(**locals())) | ||
"({percent_saved}% of original size)").format(**locals()))) | ||
#p_saved = round( | ||
# (float(cumulative_new) / float(cumulative_size) * 100), 2) | ||
if cumulative_size: | ||
p_saved = round( | ||
(float(cumulative_new) / float(cumulative_size) * 100), 2) | ||
|
@@ -279,11 +303,14 @@ def pyminify(options, files): | |
module = os.path.split(_file)[1] | ||
module = ".".join(module.split('.')[:-1]) | ||
filesize = os.path.getsize(_file) | ||
source = open(_file).read() | ||
if os_name in ('nt',): | ||
source = open(_file, encoding='utf-8').read() | ||
else: | ||
source = open(_file).read() | ||
# Convert the tokens from a tuple of tuples to a list of lists so we can | ||
# update in-place. | ||
tokens = token_utils.listified_tokenizer(source) | ||
if not options.nominify: # Perform minification | ||
if not options.nominify: # Perform minification | ||
source = minification.minify(tokens, options) | ||
# Convert back to tokens in case we're obfuscating | ||
tokens = token_utils.listified_tokenizer(source) | ||
|
@@ -316,9 +343,18 @@ def pyminify(options, files): | |
f.write(result) | ||
f.close() | ||
new_filesize = os.path.getsize(options.outfile) | ||
percent_saved = round(float(new_filesize)/float(filesize) * 100, 2) | ||
percent_saved = round(float(new_filesize) / float(filesize) * 100, 2) | ||
print(( | ||
"{_file} ({filesize}) reduced to {new_filesize} bytes " | ||
"({percent_saved}% of original size)".format(**locals()))) | ||
else: | ||
print(result) | ||
try: | ||
import pprint | ||
pprint.pprint(result) | ||
except Exception as inst: | ||
print(inst) | ||
pass | ||
|
||
|
||
|
||
|
Oops, something went wrong.