Skip to content

Commit

Permalink
update azure-storage to 0.33
Browse files Browse the repository at this point in the history
  • Loading branch information
michiya committed Oct 9, 2016
1 parent 33811df commit b484781
Show file tree
Hide file tree
Showing 4 changed files with 92 additions and 78 deletions.
21 changes: 7 additions & 14 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ the standard Python logging APIs to Microsoft Azure Storage.
Dependencies
------------

* azure-storage
* azure-storage 0.33 or newer

Installation
------------
Expand Down Expand Up @@ -57,7 +57,7 @@ property of a table entity along with some system-defined properties
| XXXXX | XXXXXXXXX | YYYY-MM-DD ... | log message |
+--------------+-----------+----------------+-------------+

* *class* azure_storage_logging.handlers.TableStorageHandler(*account_name=None, account_key=None, protocol='https', table='logs', batch_size=0, extra_properties=None, partition_key_formatter=None, row_key_formatter=None*)
* *class* azure_storage_logging.handlers.TableStorageHandler(*account_name=None, account_key=None, protocol='https', table='logs', batch_size=0, extra_properties=None, partition_key_formatter=None, row_key_formatter=None, is_emulated=False*)

Returns a new instance of the **TableStorageHandler** class.
The instance is initialized with the name and the key of your
Expand Down Expand Up @@ -154,7 +154,7 @@ and it pushes log messages to specified Azure storage queue.
You can pop log messages from the queue in other applications
using Azure Storage client libraries.

* *class* azure_storage_logging.handlers.QueueStorageHandler(*account_name=None, account_key=None, protocol='https', queue='logs', message_ttl=None, visibility_timeout=None, base64_encoding=False*)
* *class* azure_storage_logging.handlers.QueueStorageHandler(*account_name=None, account_key=None, protocol='https', queue='logs', message_ttl=None, visibility_timeout=None, base64_encoding=False, is_emulated=False*)

Returns a new instance of the **QueueStorageHandler** class.
The instance is initialized with the name and the key of your
Expand Down Expand Up @@ -198,7 +198,7 @@ The **BlobStorageRotatingFileHandler** class is a subclass of
log file rotation and stores the outdated one in Azure blob storage
container when the current file reaches a certain size.

* *class* azure_storage_logging.handlers.BlobStorageRotatingFileHandler(*filename, mode='a', maxBytes=0, encoding=None, delay=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*)
* *class* azure_storage_logging.handlers.BlobStorageRotatingFileHandler(*filename, mode='a', maxBytes=0, encoding=None, delay=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*, is_emulated=False)

Returns a new instance of the **BlobStorageRotatingFileHandler**
class. The instance is initialized with the name and the key of your
Expand Down Expand Up @@ -265,7 +265,7 @@ The **BlobStorageTimedRotatingFileHandler** class is a subclass of
log file rotation and stores the outdated one to Azure blob storage
container at certain timed intervals.

* *class* azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler(*filename, when='h', interval=1, encoding=None, delay=False, utc=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*)
* *class* azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler(*filename, when='h', interval=1, encoding=None, delay=False, utc=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*, is_emulated=False)

Returns a new instance of the **BlobStorageTimedRotatingFileHandler**
class. The instance is initialized with the name and the key of your
Expand Down Expand Up @@ -410,15 +410,8 @@ three different types of storage from the logger:
Notice
------

* Follow the instructions below if you want to use this package with
Azure storage emulator that is bundled with Microsoft Azure SDK:

* If your application is not going to run on Azure compute
emulator, set ``EMULATED`` environment variable as ``True`` at first.

* specify nothing for the *account_name* and the *account_key*,
and specify ``http`` for the *protocol* at initialization of
the logging handlers.
* Set *is_emulated* to ``True`` at initialization of the logging handlers
if you want to use this package with Azure storage emulator.

License
-------
Expand Down
85 changes: 50 additions & 35 deletions azure_storage_logging/handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from azure.storage.blob import BlockBlobService
from azure.storage.blob.models import ContentSettings
from azure.storage.queue import QueueService
from azure.storage.table import TableService
from azure.storage.table import TableBatch, TableService

_PY3 = sys.version_info[0] == 3

Expand All @@ -48,8 +48,12 @@ def __init__(self,
zip_compression=False,
max_connections=1,
max_retries=5,
retry_wait=1.0):
self.service = BlockBlobService(account_name, account_key, protocol)
retry_wait=1.0,
is_emulated=False):
self.service = BlockBlobService(account_name=account_name,
account_key=account_key,
is_emulated=is_emulated,
protocol=protocol)
self.container_created = False
hostname = gethostname()
self.meta = {'hostname': hostname.replace('_', '-'),
Expand Down Expand Up @@ -82,7 +86,7 @@ def put_file_into_storage(self, dirName, fileName):
suffix, content_type = '', 'text/plain'
self.service.create_blob_from_path(container_name=self.container,
blob_name=fileName+suffix,
file_path=fileName,
file_path=file_path,
content_settings=ContentSettings(content_type=content_type),
max_connections=self.max_connections
) # max_retries and retry_wait no longer arguments in azure 0.33
Expand Down Expand Up @@ -113,7 +117,8 @@ def __init__(self,
zip_compression=False,
max_connections=1,
max_retries=5,
retry_wait=1.0):
retry_wait=1.0,
is_emulated=False):
meta = {'hostname': gethostname(), 'process': os.getpid()}
RotatingFileHandler.__init__(self,
filename % meta,
Expand All @@ -123,14 +128,15 @@ def __init__(self,
encoding=encoding,
delay=delay)
_BlobStorageFileHandler.__init__(self,
account_name,
account_key,
protocol,
container,
zip_compression,
max_connections,
max_retries,
retry_wait)
account_name=account_name,
account_key=account_key,
protocol=protocol,
container=container,
zip_compression=zip_compression,
max_connections=max_connections,
max_retries=max_retries,
retry_wait=retry_wait,
is_emulated=is_emulated)

def doRollover(self):
"""
Expand Down Expand Up @@ -172,7 +178,8 @@ def __init__(self,
zip_compression=False,
max_connections=1,
max_retries=5,
retry_wait=1.0):
retry_wait=1.0,
is_emulated=False):
meta = {'hostname': gethostname(), 'process': os.getpid()}
TimedRotatingFileHandler.__init__(self,
filename % meta,
Expand All @@ -183,14 +190,15 @@ def __init__(self,
delay=delay,
utc=utc)
_BlobStorageFileHandler.__init__(self,
account_name,
account_key,
protocol,
container,
zip_compression,
max_connections,
max_retries,
retry_wait)
account_name=account_name,
account_key=account_key,
protocol=protocol,
container=container,
zip_compression=zip_compression,
max_connections=max_connections,
max_retries=max_retries,
retry_wait=retry_wait,
is_emulated=is_emulated)

def emit(self, record):
"""
Expand Down Expand Up @@ -233,13 +241,15 @@ def __init__(self,
message_ttl=None,
visibility_timeout=None,
base64_encoding=False,
is_emulated=False,
):
"""
Initialize the handler.
"""
logging.Handler.__init__(self)
self.service = QueueService(account_name=account_name,
account_key=account_key,
is_emulated=is_emulated,
protocol=protocol)
self.meta = {'hostname': gethostname(), 'process': os.getpid()}
self.queue = _formatName(queue, self.meta)
Expand Down Expand Up @@ -272,6 +282,10 @@ def emit(self, record):
def _encode_text(self, text):
if self.base64_encoding:
text = b64encode(text.encode('utf-8')).decode('ascii')
# fallback for the breaking change in azure-storage 0.33
elif sys.version_info < (3,):
if not isinstance(text, unicode):
text = text.decode('utf-8')
return text


Expand All @@ -290,13 +304,15 @@ def __init__(self,
extra_properties=None,
partition_key_formatter=None,
row_key_formatter=None,
is_emulated=False,
):
"""
Initialize the handler.
"""
logging.Handler.__init__(self)
self.service = TableService(account_name=account_name,
account_key=account_key,
is_emulated=is_emulated,
protocol=protocol)
self.meta = {'hostname': gethostname(), 'process': os.getpid()}
self.table = _formatName(table, self.meta)
Expand Down Expand Up @@ -327,10 +343,10 @@ def __init__(self,
self.extra_property_formatters[extra] = f
self.extra_property_names[extra] = self._getFormatName(extra)
# the storage emulator doesn't support batch operations
if batch_size <= 1 or self.service.use_local_storage:
self.batch = False
if batch_size <= 1 or is_emulated:
self.batch = None
else:
self.batch = True
self.batch = TableBatch()
if batch_size > TableStorageHandler.MAX_BATCH_SIZE:
self.batch_size = TableStorageHandler.MAX_BATCH_SIZE
else:
Expand Down Expand Up @@ -369,8 +385,6 @@ def emit(self, record):
try:
if not self.ready:
self.service.create_table(self.table)
if self.batch:
self.service.begin_batch()
self.ready = True
# generate partition key for the entity
record.hostname = self.meta['hostname']
Expand All @@ -394,12 +408,13 @@ def emit(self, record):
copy.rowno = self.rowno
row_key = self.row_key_formatter.format(copy)
# add entitiy to the table
self.service.insert_or_replace_entity(self.table,
partition_key,
row_key,
entity)
# commit the ongoing batch if it reaches the high mark
if self.batch:
entity['PartitionKey'] = partition_key
entity['RowKey'] = row_key
if not self.batch:
self.service.insert_or_replace_entity(self.table, entity)
else:
self.batch.insert_or_replace_entity(entity)
# commit the ongoing batch if it reaches the high mark
self.rowno += 1
if self.rowno >= self.batch_size:
self.flush()
Expand All @@ -414,10 +429,10 @@ def flush(self):
"""
if self.batch and self.rowno > 0:
try:
self.service.commit_batch()
self.service.commit_batch(self.table, self.batch)
finally:
self.rowno = 0
self.service.begin_batch()
self.batch = TableBatch()

def setFormatter(self, fmt):
"""
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Logging',
]

Expand Down
Loading

0 comments on commit b484781

Please sign in to comment.