Skip to content

Commit

Permalink
flake8 on tools via autopep8 (cloud-custodian#1099)
Browse files Browse the repository at this point in the history
  • Loading branch information
JohnTheodore authored and kapilt committed Apr 26, 2017
1 parent a2c0b1a commit a1b2acd
Show file tree
Hide file tree
Showing 18 changed files with 108 additions and 411 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ install:
- pip install --upgrade coveralls
- python setup.py develop
script:
- flake8 tools/c7n_mailer
- flake8 tools
- make sphinx && py.test -v -n auto --cov=c7n tests tools/c7n_*
after_success:
coveralls
2 changes: 1 addition & 1 deletion tools/c7n_salactus/c7n_salactus/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def debug(f):
def _f(*args, **kw):
try:
f(*args, **kw)
except (SystemExit, KeyboardInterrupt) as e:
except (SystemExit, KeyboardInterrupt):
raise
except:
import traceback, sys, pdb
Expand Down
4 changes: 2 additions & 2 deletions tools/c7n_salactus/c7n_salactus/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ def accounts(self, accounts=()):
for k in self.data['bucket-size'].keys():
a, b = k.split(':')
accounts.setdefault(a, []).append(k)
return [Account(a, [Bucket(b, self.data) for b in buckets])
for a, buckets in accounts.items()]
return [Account(aa, [Bucket(bb, self.data) for bb in buckets])
for aa, buckets in accounts.items()]

def buckets(self, accounts=()):
if accounts:
Expand Down
22 changes: 10 additions & 12 deletions tools/c7n_salactus/c7n_salactus/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@

# Minimum size of the bucket before partitioning
PARTITION_BUCKET_SIZE_THRESHOLD = 100000
#PARTITION_BUCKET_SIZE_THRESHOLD = 20000
# PARTITION_BUCKET_SIZE_THRESHOLD = 20000

# Page size for keys found during partition
PARTITION_KEYSET_THRESHOLD = 500
Expand All @@ -83,7 +83,7 @@
('NextKeyMarker', 'NextVersionIdMarker')),
False: ('Contents', 'list_objects_v2',
('NextContinuationToken',))
}
}

connection = redis.Redis(host=REDIS_HOST)
# Increase timeouts to assist with non local regions, also
Expand Down Expand Up @@ -210,7 +210,7 @@ def bucket_key_count(client, bucket):
hour=0, minute=0, second=0, microsecond=0) - timedelta(1),
EndTime=datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0),
Period=60*60*24,
Period=60 * 60 * 24,
Statistics=['Minimum'])
response = client.get_metric_statistics(**params)
if not response['Datapoints']:
Expand Down Expand Up @@ -298,7 +298,6 @@ class CharSet(object):
ascii_lower_digits = set(string.ascii_lowercase + string.digits)
ascii_alphanum = set(string.ascii_letters + string.digits)


punctuation = set(string.punctuation)

@classmethod
Expand Down Expand Up @@ -478,7 +477,7 @@ def detect_partition_strategy(account_info, bucket, delimiters=('/', '-'), prefi
process_bucket_iterator, [account_info, bucket], prefixes)


@job('bucket-partition', timeout=3600*12, connection=connection)
@job('bucket-partition', timeout=3600 * 12, connection=connection)
def process_bucket_partitions(
account_info, bucket, prefix_set=('',), partition='/',
strategy=None, limit=4):
Expand Down Expand Up @@ -547,8 +546,8 @@ def statm(prefix):
if len(prefix_queue) > PARTITION_QUEUE_THRESHOLD:
log.info("Partition add friends, %s", statm(prefix))
for s_prefix_set in chunks(
prefix_queue[PARTITION_QUEUE_THRESHOLD-1:],
PARTITION_QUEUE_THRESHOLD-1):
prefix_queue[PARTITION_QUEUE_THRESHOLD - 1:],
PARTITION_QUEUE_THRESHOLD - 1):

for s in list(s_prefix_set):
if strategy.is_depth_exceeded(prefix):
Expand All @@ -562,13 +561,13 @@ def statm(prefix):
account_info, bucket,
prefix_set=s_prefix_set, partition=partition,
strategy=strategy, limit=limit)
prefix_queue = prefix_queue[:PARTITION_QUEUE_THRESHOLD-1]
prefix_queue = prefix_queue[:PARTITION_QUEUE_THRESHOLD - 1]

if keyset:
invoke(process_keyset, account_info, bucket, {contents_key: keyset})


@job('bucket-page-iterator', timeout=3600*24, connection=connection)
@job('bucket-page-iterator', timeout=3600 * 24, connection=connection)
def process_bucket_iterator(account_info, bucket,
prefix="", delimiter="", **continuation):
"""Bucket pagination
Expand All @@ -595,16 +594,15 @@ def process_bucket_iterator(account_info, bucket,
invoke(process_keyset, account_info, bucket, page)


@job('bucket-keyset-scan', timeout=3600*12, connection=connection)
@job('bucket-keyset-scan', timeout=3600 * 12, connection=connection)
def process_keyset(account_info, bucket, key_set):
session = get_session(account_info)
s3 = session.client('s3', region_name=bucket['region'], config=s3config)
processor = EncryptExtantKeys(keyconfig)
remediation_count = 0
denied_count = 0
contents_key, _, _ = BUCKET_OBJ_DESC[bucket['versioned']]
processor = (bucket['versioned'] and processor.process_version
or processor.process_key)
processor = (bucket['versioned'] and processor.process_version or processor.process_key)
connection.hincrby(
'keys-scanned', bucket_id(account_info, bucket['name']),
len(key_set.get(contents_key, [])))
Expand Down
5 changes: 2 additions & 3 deletions tools/c7n_salactus/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
version='0.1.4',
description="Cloud Custodian - Salactus S3",
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/capitalone/cloud-custodian",
license="Apache-2.0",
Expand All @@ -30,4 +30,3 @@
'c7n-salactus = c7n_salactus.cli:cli']},
install_requires=["c7n", "click", "rq", "redis"],
)

19 changes: 10 additions & 9 deletions tools/c7n_sentry/c7n_sentry/c7nsentry.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def process_log_event(event, context):
# Grab the actual error log payload
serialized = event['awslogs'].pop('data')
data = json.loads(zlib.decompress(
base64.b64decode(serialized), 16+zlib.MAX_WBITS))
base64.b64decode(serialized), 16 + zlib.MAX_WBITS))
msg = get_sentry_message(config, data)
if msg is None:
return
Expand Down Expand Up @@ -151,7 +151,7 @@ def process_log_group(config):
event_count = 0
log.debug("Querying log events with %s", params)
for p in paginator.paginate(**params):
#log.debug("Searched streams\n %s", ", ".join(
# log.debug("Searched streams\n %s", ", ".join(
# [s['logStreamName'] for s in p['searchedLogStreams']]))
for e in p['events']:
event_count += 1
Expand Down Expand Up @@ -180,7 +180,7 @@ def send_sentry_message(sentry_dsn, msg):
auth_header_keys = [
('sentry_timestamp', time.time()),
('sentry_client', client),
('sentry_version', '7'), # try 7?
('sentry_version', '7'), # try 7?
('sentry_key', key),
('sentry_secret', secret)]
auth_header = "Sentry %s" % ', '.join(
Expand Down Expand Up @@ -250,7 +250,7 @@ def get_sentry_message(config, data, log_client=None, is_lambda=True):
sentry_msg = {
'event_id': uuid.uuid4().hex,
'timestamp': datetime.fromtimestamp(
data['logEvents'][0]['timestamp']/1000).isoformat(),
data['logEvents'][0]['timestamp'] / 1000).isoformat(),
'user': {
'id': config['account_id'],
'username': config['account_name']},
Expand All @@ -271,7 +271,7 @@ def get_sentry_message(config, data, log_client=None, is_lambda=True):
sentry_msg['breadcrumbs'] = [
{'category': 'policy',
'message': e['message'],
'timestamp': e['timestamp']/1000} for e in breadcrumbs]
'timestamp': e['timestamp'] / 1000} for e in breadcrumbs]
return sentry_msg


Expand Down Expand Up @@ -393,7 +393,7 @@ def process_account(a):
log.info("creating account project %s", a['name'])
spost(endpoint + "teams/%s/%s/projects/" % (
options.sentry_org, team_name),
json={'name': a['name']})
json={'name': a['name']})

bagger = partial(
Bag,
Expand Down Expand Up @@ -429,7 +429,6 @@ def process_account(a):

return [process_account(a) for a in accounts]


with ThreadPoolExecutor(max_workers=3) as w:
futures = {}
for a in accounts:
Expand All @@ -453,7 +452,9 @@ def deploy(options):

def deploy_one(region_name, account, policy, sentry_dsn):
from c7n.mu import LambdaManager
session_factory = lambda: boto3.Session(region_name=region_name)

def session_factory():
return boto3.Session(region_name=region_name)
log_group_name = '/aws/lambda/custodian-{}'.format(policy['name'])
arn = 'arn:aws:logs:{}:{}:log-group:{}:*'.format(
region_name, account['account_id'], log_group_name)
Expand Down Expand Up @@ -484,7 +485,7 @@ def setup_parser():
common_parser(cmd_orgreplay)
cmd_orgreplay.set_defaults(command=orgreplay)
cmd_orgreplay.add_argument('--profile')
#cmd_orgreplay.add_argument('--role')
# cmd_orgreplay.add_argument('--role')
cmd_orgreplay.add_argument('--start')
cmd_orgreplay.add_argument('--end')
cmd_orgreplay.add_argument('--sentry-org', default="c7n")
Expand Down
5 changes: 2 additions & 3 deletions tools/c7n_sentry/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
version='0.1',
description="Cloud Custodian - Sentry",
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/capitalone/cloud-custodian",
license="Apache-2.0",
Expand All @@ -30,4 +30,3 @@
'c7n-sentry = c7n_sentry.c7nsentry:main']},
install_requires=["c7n"],
)

3 changes: 2 additions & 1 deletion tools/c7n_sentry/test_sentry.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def test_get_sentry_message(self):
project='custodian',
account_name='c7n-test',
account_id='9111411911411',
)
)
sentry_msg = c7nsentry.get_sentry_message(
config, {'logGroup': '/cloud-custodian/',
'logStream': 'night-policy', 'logEvents': [{
Expand All @@ -45,6 +45,7 @@ def test_preserve_full_message(self):
"FinalDBSnapshotIdentifier is not a valid identifier",
error['value'])


msg = """2016-07-07 19:14:24,160 - ERROR - custodian.output - Error while executing policy\nTraceback (most recent call last):\n File \"/usr/local/custodian/lib/python2.7/site-packages/c7n/policy.py\", line 191, in poll\n resources = self.resource_manager.resources()\n File \"/usr/local/custodian/lib/python2.7/site-packages/c7n/query.py\", line 141, in resources\n resources = self.augment(resources)\n File \"/usr/local/custodian/lib/python2.7/site-packages/c7n/resources/s3.py\", line 95, in augment\n results = filter(None, results)\n File \"/usr/local/custodian/lib/python2.7/site-packages/concurrent/futures/_base.py\", line 581, in result_iterator\n yield future.result()\n File \"/usr/local/custodian/lib/python2.7/site-packages/concurrent/futures/_base.py\", line 405, in result\n return self.__get_result()\n File \"/usr/local/custodian/lib/python2.7/site-packages/concurrent/futures/thread.py\", line 55, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/usr/local/custodian/lib/python2.7/site-packages/c7n/resources/s3.py\", line 126, in assemble_bucket\n v = method(Bucket=b['Name'])\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/client.py\", line 258, in _api_call\n return self._make_api_call(operation_name, kwargs)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/client.py\", line 537, in _make_api_call\n operation_model, request_dict)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/endpoint.py\", line 117, in make_request\n return self._send_request(request_dict, operation_model)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/endpoint.py\", line 146, in _send_request\n success_response, exception):\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/endpoint.py\", line 219, in _needs_retry\n caught_exception=caught_exception)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/hooks.py\", line 227, in emit\n return self._emit(event_name, kwargs)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/hooks.py\", line 210, in _emit\n response = handler(**kwargs)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/retryhandler.py\", line 183, in __call__\n if self._checker(attempts, response, caught_exception):\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/retryhandler.py\", line 251, in __call__\n caught_exception)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/retryhandler.py\", line 274, in _should_retry\n return self._checker(attempt_number, response, caught_exception)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/retryhandler.py\", line 314, in __call__\n caught_exception)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/retryhandler.py\", line 223, in __call__\n attempt_number, caught_exception)\n File \"/usr/local/custodian/lib/python2.7/site-packages/botocore/retryhandler.py\", line 356, in _check_caught_exception\n raise caught_exception\nSSLError: EOF occurred in violation of protocol (_ssl.c:765)"""


Expand Down
12 changes: 9 additions & 3 deletions tools/c7n_sphinxext/c7n_sphinxext/c7n_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,20 @@ def run(self):
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError("Unable to generate reference docs for %s, couldn't import module '%s'" % (model_name, module_name))
raise SphinxError(
"Unable to generate reference docs for %s, couldn't import module '%s'" %
(model_name, module_name))

model = getattr(module, model_name, None)
if model is None:
raise SphinxError("Unable to generate reference docs for %s, no model '%s' in %s" % (model_name, model_name, module_name))
raise SphinxError(
"Unable to generate reference docs for %s, no model '%s' in %s" %
(model_name, model_name, module_name))

if not hasattr(model, 'schema'):
raise SphinxError("Unable to generate reference docs for %s, model '%s' does not have a 'schema' attribute" % (model_name, model_name))
raise SphinxError(
"Unable to generate reference docs for %s, model '%s' does not\
have a 'schema' attribute" % (model_name, model_name))

schema = reformat_schema(model)

Expand Down
4 changes: 2 additions & 2 deletions tools/c7n_sphinxext/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
version='1.0',
description="Cloud Custodian - Sphinx Extensions",
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/capitalone/cloud-custodian",
license="Apache-2.0",
Expand Down
28 changes: 14 additions & 14 deletions tools/c7n_traildb/traildb.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ def _init(self):
# response text,
# request text,
# user text,

def insert(self, records):
self.cursor.executemany(
"insert into events values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
Expand All @@ -111,9 +112,9 @@ def reduce_records(x, y):
return y


#STOP = 42
# STOP = 42
#
#def store_records(output, q):
# def store_records(output, q):
# db = TrailDB(output)
# while True:
# results = q.get()
Expand Down Expand Up @@ -171,13 +172,13 @@ def process_records(records,
r.get('requestID', ''),
r.get('sourceIPAddress', ''),
uid,
# TODO make this optional, for now omit for size
# json.dumps(r['requestParameters']),
# json.dumps(r['responseElements']),
# json.dumps(r['userIdentity']),
# TODO make this optional, for now omit for size
# json.dumps(r['requestParameters']),
# json.dumps(r['responseElements']),
# json.dumps(r['userIdentity']),
r.get('errorCode', None),
r.get('errorMessage', None)
))
))
if data_dir:
if not user_records:
return
Expand All @@ -202,7 +203,7 @@ def process_bucket(
# PyPy has some memory leaks.... :-(
pool = Pool(maxtasksperchild=10)
t = time.time()
object_count = object_size = idx = 0
object_count = object_size = 0

log.info("Processing:%d cloud-trail %s" % (
cpu_count(),
Expand All @@ -223,7 +224,7 @@ def process_bucket(
trail_bucket=bucket_name)
db = TrailDB(output)

bsize = math.ceil(1000/float(cpu_count()))
bsize = math.ceil(1000 / float(cpu_count()))
for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
objects = page.get('Contents', ())
object_count += len(objects)
Expand All @@ -236,7 +237,7 @@ def process_bucket(
results = map(object_processor, chunks(objects, bsize))

st = time.time()
log.info("Loaded page time:%0.2fs", st-pt)
log.info("Loaded page time:%0.2fs", st - pt)

for r in results:
for fpath in r:
Expand All @@ -248,10 +249,10 @@ def process_bucket(
l = t
t = time.time()

log.info("Stored page time:%0.2fs", t-st)
log.info("Stored page time:%0.2fs", t - st)
log.info(
"Processed paged time:%0.2f size:%s count:%s" % (
t-l, object_size, object_count))
t - l, object_size, object_count))
if objects:
log.info('Last Page Key: %s', objects[-1]['Key'])

Expand Down Expand Up @@ -295,7 +296,6 @@ def main():
parser = setup_parser()
options = parser.parse_args()


if options.tmpdir and not os.path.exists(options.tmpdir):
os.makedirs(options.tmpdir)
prefix = get_bucket_path(options)
Expand All @@ -309,7 +309,7 @@ def main():
options.source,
options.not_source,
options.tmpdir
)
)


if __name__ == '__main__':
Expand Down
Loading

0 comments on commit a1b2acd

Please sign in to comment.