From b4847816e4c458a9fe51313729b87953deeccb16 Mon Sep 17 00:00:00 2001 From: Michiya Takahashi Date: Mon, 10 Oct 2016 00:46:50 +0900 Subject: [PATCH] update azure-storage to 0.33 --- README.rst | 21 +++----- azure_storage_logging/handlers.py | 85 ++++++++++++++++++------------- setup.py | 1 + tests/tests.py | 63 ++++++++++++----------- 4 files changed, 92 insertions(+), 78 deletions(-) diff --git a/README.rst b/README.rst index fb0755e..3225626 100644 --- a/README.rst +++ b/README.rst @@ -13,7 +13,7 @@ the standard Python logging APIs to Microsoft Azure Storage. Dependencies ------------ -* azure-storage +* azure-storage 0.33 or newer Installation ------------ @@ -57,7 +57,7 @@ property of a table entity along with some system-defined properties | XXXXX | XXXXXXXXX | YYYY-MM-DD ... | log message | +--------------+-----------+----------------+-------------+ -* *class* azure_storage_logging.handlers.TableStorageHandler(*account_name=None, account_key=None, protocol='https', table='logs', batch_size=0, extra_properties=None, partition_key_formatter=None, row_key_formatter=None*) +* *class* azure_storage_logging.handlers.TableStorageHandler(*account_name=None, account_key=None, protocol='https', table='logs', batch_size=0, extra_properties=None, partition_key_formatter=None, row_key_formatter=None, is_emulated=False*) Returns a new instance of the **TableStorageHandler** class. The instance is initialized with the name and the key of your @@ -154,7 +154,7 @@ and it pushes log messages to specified Azure storage queue. You can pop log messages from the queue in other applications using Azure Storage client libraries. -* *class* azure_storage_logging.handlers.QueueStorageHandler(*account_name=None, account_key=None, protocol='https', queue='logs', message_ttl=None, visibility_timeout=None, base64_encoding=False*) +* *class* azure_storage_logging.handlers.QueueStorageHandler(*account_name=None, account_key=None, protocol='https', queue='logs', message_ttl=None, visibility_timeout=None, base64_encoding=False, is_emulated=False*) Returns a new instance of the **QueueStorageHandler** class. The instance is initialized with the name and the key of your @@ -198,7 +198,7 @@ The **BlobStorageRotatingFileHandler** class is a subclass of log file rotation and stores the outdated one in Azure blob storage container when the current file reaches a certain size. -* *class* azure_storage_logging.handlers.BlobStorageRotatingFileHandler(*filename, mode='a', maxBytes=0, encoding=None, delay=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*) +* *class* azure_storage_logging.handlers.BlobStorageRotatingFileHandler(*filename, mode='a', maxBytes=0, encoding=None, delay=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*, is_emulated=False) Returns a new instance of the **BlobStorageRotatingFileHandler** class. The instance is initialized with the name and the key of your @@ -265,7 +265,7 @@ The **BlobStorageTimedRotatingFileHandler** class is a subclass of log file rotation and stores the outdated one to Azure blob storage container at certain timed intervals. -* *class* azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler(*filename, when='h', interval=1, encoding=None, delay=False, utc=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*) +* *class* azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler(*filename, when='h', interval=1, encoding=None, delay=False, utc=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*, is_emulated=False) Returns a new instance of the **BlobStorageTimedRotatingFileHandler** class. The instance is initialized with the name and the key of your @@ -410,15 +410,8 @@ three different types of storage from the logger: Notice ------ -* Follow the instructions below if you want to use this package with - Azure storage emulator that is bundled with Microsoft Azure SDK: - - * If your application is not going to run on Azure compute - emulator, set ``EMULATED`` environment variable as ``True`` at first. - - * specify nothing for the *account_name* and the *account_key*, - and specify ``http`` for the *protocol* at initialization of - the logging handlers. +* Set *is_emulated* to ``True`` at initialization of the logging handlers + if you want to use this package with Azure storage emulator. License ------- diff --git a/azure_storage_logging/handlers.py b/azure_storage_logging/handlers.py index a432a27..cc98694 100644 --- a/azure_storage_logging/handlers.py +++ b/azure_storage_logging/handlers.py @@ -25,7 +25,7 @@ from azure.storage.blob import BlockBlobService from azure.storage.blob.models import ContentSettings from azure.storage.queue import QueueService -from azure.storage.table import TableService +from azure.storage.table import TableBatch, TableService _PY3 = sys.version_info[0] == 3 @@ -48,8 +48,12 @@ def __init__(self, zip_compression=False, max_connections=1, max_retries=5, - retry_wait=1.0): - self.service = BlockBlobService(account_name, account_key, protocol) + retry_wait=1.0, + is_emulated=False): + self.service = BlockBlobService(account_name=account_name, + account_key=account_key, + is_emulated=is_emulated, + protocol=protocol) self.container_created = False hostname = gethostname() self.meta = {'hostname': hostname.replace('_', '-'), @@ -82,7 +86,7 @@ def put_file_into_storage(self, dirName, fileName): suffix, content_type = '', 'text/plain' self.service.create_blob_from_path(container_name=self.container, blob_name=fileName+suffix, - file_path=fileName, + file_path=file_path, content_settings=ContentSettings(content_type=content_type), max_connections=self.max_connections ) # max_retries and retry_wait no longer arguments in azure 0.33 @@ -113,7 +117,8 @@ def __init__(self, zip_compression=False, max_connections=1, max_retries=5, - retry_wait=1.0): + retry_wait=1.0, + is_emulated=False): meta = {'hostname': gethostname(), 'process': os.getpid()} RotatingFileHandler.__init__(self, filename % meta, @@ -123,14 +128,15 @@ def __init__(self, encoding=encoding, delay=delay) _BlobStorageFileHandler.__init__(self, - account_name, - account_key, - protocol, - container, - zip_compression, - max_connections, - max_retries, - retry_wait) + account_name=account_name, + account_key=account_key, + protocol=protocol, + container=container, + zip_compression=zip_compression, + max_connections=max_connections, + max_retries=max_retries, + retry_wait=retry_wait, + is_emulated=is_emulated) def doRollover(self): """ @@ -172,7 +178,8 @@ def __init__(self, zip_compression=False, max_connections=1, max_retries=5, - retry_wait=1.0): + retry_wait=1.0, + is_emulated=False): meta = {'hostname': gethostname(), 'process': os.getpid()} TimedRotatingFileHandler.__init__(self, filename % meta, @@ -183,14 +190,15 @@ def __init__(self, delay=delay, utc=utc) _BlobStorageFileHandler.__init__(self, - account_name, - account_key, - protocol, - container, - zip_compression, - max_connections, - max_retries, - retry_wait) + account_name=account_name, + account_key=account_key, + protocol=protocol, + container=container, + zip_compression=zip_compression, + max_connections=max_connections, + max_retries=max_retries, + retry_wait=retry_wait, + is_emulated=is_emulated) def emit(self, record): """ @@ -233,6 +241,7 @@ def __init__(self, message_ttl=None, visibility_timeout=None, base64_encoding=False, + is_emulated=False, ): """ Initialize the handler. @@ -240,6 +249,7 @@ def __init__(self, logging.Handler.__init__(self) self.service = QueueService(account_name=account_name, account_key=account_key, + is_emulated=is_emulated, protocol=protocol) self.meta = {'hostname': gethostname(), 'process': os.getpid()} self.queue = _formatName(queue, self.meta) @@ -272,6 +282,10 @@ def emit(self, record): def _encode_text(self, text): if self.base64_encoding: text = b64encode(text.encode('utf-8')).decode('ascii') + # fallback for the breaking change in azure-storage 0.33 + elif sys.version_info < (3,): + if not isinstance(text, unicode): + text = text.decode('utf-8') return text @@ -290,6 +304,7 @@ def __init__(self, extra_properties=None, partition_key_formatter=None, row_key_formatter=None, + is_emulated=False, ): """ Initialize the handler. @@ -297,6 +312,7 @@ def __init__(self, logging.Handler.__init__(self) self.service = TableService(account_name=account_name, account_key=account_key, + is_emulated=is_emulated, protocol=protocol) self.meta = {'hostname': gethostname(), 'process': os.getpid()} self.table = _formatName(table, self.meta) @@ -327,10 +343,10 @@ def __init__(self, self.extra_property_formatters[extra] = f self.extra_property_names[extra] = self._getFormatName(extra) # the storage emulator doesn't support batch operations - if batch_size <= 1 or self.service.use_local_storage: - self.batch = False + if batch_size <= 1 or is_emulated: + self.batch = None else: - self.batch = True + self.batch = TableBatch() if batch_size > TableStorageHandler.MAX_BATCH_SIZE: self.batch_size = TableStorageHandler.MAX_BATCH_SIZE else: @@ -369,8 +385,6 @@ def emit(self, record): try: if not self.ready: self.service.create_table(self.table) - if self.batch: - self.service.begin_batch() self.ready = True # generate partition key for the entity record.hostname = self.meta['hostname'] @@ -394,12 +408,13 @@ def emit(self, record): copy.rowno = self.rowno row_key = self.row_key_formatter.format(copy) # add entitiy to the table - self.service.insert_or_replace_entity(self.table, - partition_key, - row_key, - entity) - # commit the ongoing batch if it reaches the high mark - if self.batch: + entity['PartitionKey'] = partition_key + entity['RowKey'] = row_key + if not self.batch: + self.service.insert_or_replace_entity(self.table, entity) + else: + self.batch.insert_or_replace_entity(entity) + # commit the ongoing batch if it reaches the high mark self.rowno += 1 if self.rowno >= self.batch_size: self.flush() @@ -414,10 +429,10 @@ def flush(self): """ if self.batch and self.rowno > 0: try: - self.service.commit_batch() + self.service.commit_batch(self.table, self.batch) finally: self.rowno = 0 - self.service.begin_batch() + self.batch = TableBatch() def setFormatter(self, fmt): """ diff --git a/setup.py b/setup.py index e80a6fc..16f40e1 100644 --- a/setup.py +++ b/setup.py @@ -12,6 +12,7 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', 'Topic :: System :: Logging', ] diff --git a/tests/tests.py b/tests/tests.py index e7c6d73..1216579 100644 --- a/tests/tests.py +++ b/tests/tests.py @@ -14,7 +14,7 @@ from threading import current_thread from tempfile import mkdtemp -from azure.storage.blob import BlobService +from azure.storage.blob import BlockBlobService from azure.storage.queue import QueueService from azure.storage.table import TableService @@ -31,7 +31,6 @@ _EMULATED = not ACCOUNT_NAME and not ACCOUNT_KEY if _EMULATED: - os.environ.update({'EMULATED': 'True'}) ACCOUNT_NAME = None ACCOUNT_KEY = None @@ -62,7 +61,7 @@ 'rotation': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'level': 'DEBUG', 'class': 'azure_storage_logging.handlers.BlobStorageRotatingFileHandler', 'filename': os.path.join(_LOGFILE_TMPDIR, 'rotation.log'), @@ -73,7 +72,7 @@ 'rotation_with_parallel_upload': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'level': 'DEBUG', 'class': 'azure_storage_logging.handlers.BlobStorageRotatingFileHandler', 'filename': os.path.join(_LOGFILE_TMPDIR, 'rotation_with_parallel_upload.log'), @@ -85,7 +84,7 @@ 'rotation_with_zip_compression': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'level': 'DEBUG', 'class': 'azure_storage_logging.handlers.BlobStorageRotatingFileHandler', 'filename': os.path.join(_LOGFILE_TMPDIR, 'zip_compression_at_rotation.log'), @@ -98,7 +97,7 @@ 'timed_rotation': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'level': 'DEBUG', 'class': 'azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler', 'formatter': 'verbose', @@ -111,7 +110,7 @@ 'timed_rotation_with_zip_compression': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'level': 'DEBUG', 'class': 'azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler', 'formatter': 'verbose', @@ -126,7 +125,7 @@ 'queue': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'queue': 'queue-storage-handler-test', 'level': 'INFO', 'class': 'azure_storage_logging.handlers.QueueStorageHandler', @@ -135,7 +134,7 @@ 'message_ttl': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'queue': 'queue-storage-handler-test', 'level': 'INFO', 'class': 'azure_storage_logging.handlers.QueueStorageHandler', @@ -145,7 +144,7 @@ 'visibility_timeout': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'queue': 'queue-storage-handler-test', 'level': 'INFO', 'class': 'azure_storage_logging.handlers.QueueStorageHandler', @@ -155,7 +154,7 @@ 'base64_encoding': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'queue': 'queue-storage-handler-test', 'level': 'INFO', 'class': 'azure_storage_logging.handlers.QueueStorageHandler', @@ -166,7 +165,7 @@ 'table': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'table': 'TableStorageHandlerTest', 'level': 'INFO', 'class': 'azure_storage_logging.handlers.TableStorageHandler', @@ -175,7 +174,7 @@ 'batch': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'table': 'TableStorageHandlerTest', 'level': 'INFO', 'class': 'azure_storage_logging.handlers.TableStorageHandler', @@ -186,7 +185,7 @@ 'extra_properties': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'table': 'TableStorageHandlerTest', 'level': 'INFO', 'class': 'azure_storage_logging.handlers.TableStorageHandler', @@ -204,7 +203,7 @@ 'custom_keys': { 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY, - 'protocol': 'https', + 'is_emulated': _EMULATED, 'table': 'TableStorageHandlerTest', 'level': 'INFO', 'class': 'azure_storage_logging.handlers.TableStorageHandler', @@ -312,7 +311,9 @@ def _get_container_name(self, handler_name): return container def setUp(self): - self.service = BlobService(ACCOUNT_NAME, ACCOUNT_KEY) + self.service = BlockBlobService(account_name=ACCOUNT_NAME, + account_key=ACCOUNT_KEY, + is_emulated=_EMULATED) # ensure that there's no log file in the container before each test containers = [c.name for c in self.service.list_containers()] for handler in LOGGING['handlers']: @@ -350,7 +351,7 @@ def _test_rotation(self, logger_name): '%Y-%m-%d_%H-%M-%S') self.assertGreater(rotated_at, started_at) self.assertLessEqual(rotated_at, datetime.utcnow()) - self.assertEqual(blob.properties.content_type, 'text/plain') + self.assertEqual(blob.properties.content_settings.content_type, 'text/plain') self.assertAlmostEqual(blob.properties.content_length, max_bytes, delta=1000) @@ -393,7 +394,7 @@ def test_rotation_with_zip_compression(self): '%Y-%m-%d_%H-%M-%S') self.assertGreater(rotated_at, started_at) self.assertLessEqual(rotated_at, datetime.utcnow()) - self.assertEqual(blob.properties.content_type, 'application/zip') + self.assertEqual(blob.properties.content_settings.content_type, 'application/zip') self.assertLess(blob.properties.content_length, max_bytes // 2) # confirm that the blob is a zip file @@ -447,9 +448,9 @@ def test_timed_rotation(self): blobs = iter(self.service.list_blobs(container, prefix=basename)) blob = next(blobs) self.assertTrue(blob.name.startswith(basename)) - self.assertEqual(blob.properties.content_type, 'text/plain') - blob_text = self.service.get_blob(container, blob.name) - self.assertRegex(blob_text.decode('utf-8'), log_text_1) + self.assertEqual(blob.properties.content_settings.content_type, 'text/plain') + blob_text = self.service.get_blob_to_text(container, blob.name) + self.assertRegex(blob_text.content, log_text_1) # confirm that there's no more blob in the container with self.assertRaises(StopIteration): @@ -482,7 +483,7 @@ def test_timed_rotation_with_zip_compression(self): blob = next(blobs) self.assertTrue(blob.name.startswith(basename)) self.assertTrue(blob.name.endswith('.zip')) - self.assertEqual(blob.properties.content_type, 'application/zip') + self.assertEqual(blob.properties.content_settings.content_type, 'application/zip') # confirm that the blob is a zip file zipfile_path = os.path.join(_LOGFILE_TMPDIR, blob.name) @@ -511,7 +512,9 @@ def test_timed_rotation_with_zip_compression(self): class QueueStorageHandlerTest(_TestCase): def setUp(self): - self.service = QueueService(ACCOUNT_NAME, ACCOUNT_KEY) + self.service = QueueService(account_name=ACCOUNT_NAME, + account_key=ACCOUNT_KEY, + is_emulated=_EMULATED) # ensure that there's no message on the queue before each test queues = set() for cfg in LOGGING['handlers'].values(): @@ -538,7 +541,7 @@ def test_logging(self): text_expected = "INFO %s" % log_text if _get_handler_config_value(handler_name, 'base64_encoding'): text_expected = _base64_encode(text_expected) - self.assertEqual(message.message_text, text_expected) + self.assertEqual(message.content, text_expected) # confirm that there's no more message in the queue with self.assertRaises(StopIteration): @@ -561,7 +564,7 @@ def test_message_ttl(self): text_expected = 'INFO %s' % log_text if _get_handler_config_value(handler_name, 'base64_encoding'): text_expected = _base64_encode(text_expected) - self.assertEqual(message.message_text, text_expected) + self.assertEqual(message.content, text_expected) # confirm that there's no more message in the queue with self.assertRaises(StopIteration): @@ -598,7 +601,7 @@ def test_visibility_timeout(self): text_expected = 'INFO %s' % log_text if _get_handler_config_value(handler_name, 'base64_encoding'): text_expected = _base64_encode(text_expected) - self.assertEqual(message.message_text, text_expected) + self.assertEqual(message.content, text_expected) # confirm that there's no more message in the queue with self.assertRaises(StopIteration): @@ -621,7 +624,7 @@ def test_base64_encoding(self): text_expected = "INFO %s" % log_text if _get_handler_config_value(handler_name, 'base64_encoding'): text_expected = _base64_encode(text_expected) - self.assertEqual(message.message_text, text_expected) + self.assertEqual(message.content, text_expected) # confirm that there's no more message in the queue with self.assertRaises(StopIteration): @@ -658,13 +661,15 @@ def _get_row_key_formatter_name(self, handler_name): return self._get_formatter_name(handler_name, 'row_key_formatter') def setUp(self): - self.service = TableService(ACCOUNT_NAME, ACCOUNT_KEY) + self.service = TableService(account_name=ACCOUNT_NAME, + account_key=ACCOUNT_KEY, + is_emulated=_EMULATED) # ensure that there's no entity in the table before each test tables = set() for cfg in LOGGING['handlers'].values(): if 'table' in cfg: tables.add(cfg['table']) - for table in self.service.query_tables(): + for table in self.service.list_tables(): if table.name in tables: for entity in self.service.query_entities(table.name): self.service.delete_entity(table.name,