Merge "change dirty logs to work off a whitelist"
diff --git a/etc/whitelist.yaml b/etc/whitelist.yaml
deleted file mode 100644
index 2d8b741..0000000
--- a/etc/whitelist.yaml
+++ /dev/null
@@ -1,233 +0,0 @@
-n-cpu:
- - module: "nova.virt.libvirt.driver"
- message: "During wait destroy, instance disappeared"
- - module: "glanceclient.common.http"
- message: "Request returned failure status"
- - module: "nova.openstack.common.periodic_task"
- message: "Error during ComputeManager\\.update_available_resource: \
- 'NoneType' object is not iterable"
- - module: "nova.compute.manager"
- message: "Possibly task preempted"
- - module: "nova.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "nova.network.api"
- message: "Failed storing info cache"
- - module: "nova.compute.manager"
- message: "Error while trying to clean up image"
- - module: "nova.virt.libvirt.driver"
- message: "Error injecting data into image.*\\(Unexpected error while \
- running command"
- - module: "nova.compute.manager"
- message: "Instance failed to spawn"
- - module: "nova.compute.manager"
- message: "Error: Unexpected error while running command"
- - module: "nova.virt.libvirt.driver"
- message: "Error from libvirt during destroy"
- - module: "nova.virt.libvirt.vif"
- message: "Failed while unplugging vif"
- - module: "nova.openstack.common.loopingcal"
- message: "in fixed duration looping call"
- - module: "nova.virt.libvirt.driver"
- message: "Getting disk size of instance"
- - module: "nova.virt.libvirt.driver"
- message: "No such file or directory: '/opt/stack/data/nova/instances"
- - module: "nova.virt.libvirt.driver"
- message: "Nova requires libvirt version 0\\.9\\.11 or greater"
- - module: "nova.compute.manager"
- message: "error during stop\\(\\) in sync_power_state"
- - module: "nova.compute.manager"
- message: "Instance failed network setup after 1 attempt"
- - module: "nova.compute.manager"
- message: "Periodic sync_power_state task had an error"
- - module: "nova.virt.driver"
- message: "Info cache for instance .* could not be found"
-
-g-api:
- - module: "glance.store.sheepdog"
- message: "Error in store configuration: Unexpected error while \
- running command"
- - module: "swiftclient"
- message: "Container HEAD failed: .*404 Not Found"
- - module: "glance.api.middleware.cache"
- message: "however the registry did not contain metadata for that image"
- - module: "oslo.messaging.notify._impl_messaging"
- message: ".*"
-
-ceilometer-acompute:
- - module: "ceilometer.compute.pollsters.disk"
- message: "Unable to read from monitor: Connection reset by peer"
- - module: "ceilometer.compute.pollsters.disk"
- message: "Requested operation is not valid: domain is not running"
- - module: "ceilometer.compute.pollsters.net"
- message: "Requested operation is not valid: domain is not running"
- - module: "ceilometer.compute.pollsters.disk"
- message: "Domain not found: no domain with matching uuid"
- - module: "ceilometer.compute.pollsters.net"
- message: "Domain not found: no domain with matching uuid"
- - module: "ceilometer.compute.pollsters.net"
- message: "No module named libvirt"
- - module: "ceilometer.compute.pollsters.net"
- message: "Unable to write to monitor: Broken pipe"
- - module: "ceilometer.compute.pollsters.cpu"
- message: "Domain not found: no domain with matching uuid"
- - module: "ceilometer.compute.pollsters.net"
- message: ".*"
- - module: "ceilometer.compute.pollsters.disk"
- message: ".*"
-
-ceilometer-acentral:
- - module: "ceilometer.central.manager"
- message: "403 Forbidden"
- - module: "ceilometer.central.manager"
- message: "get_samples\\(\\) got an unexpected keyword argument 'resources'"
-
-ceilometer-alarm-evaluator:
- - module: "ceilometer.alarm.service"
- message: "alarm evaluation cycle failed"
- - module: "ceilometer.alarm.evaluator.threshold"
- message: ".*"
-
-ceilometer-api:
- - module: "wsme.api"
- message: ".*"
-
-h-api:
- - module: "root"
- message: "Returning 400 to user: The server could not comply with \
- the request since it is either malformed or otherwise incorrect"
- - module: "root"
- message: "Unexpected error occurred serving API: Request limit \
- exceeded: Template exceeds maximum allowed size"
- - module: "root"
- message: "Unexpected error occurred serving API: The Stack \
- .*could not be found"
-
-h-eng:
- - module: "heat.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "heat.openstack.common.rpc.common"
- message: "The Stack .* could not be found"
-
-n-api:
- - module: "glanceclient.common.http"
- message: "Request returned failure status"
- - module: "nova.api.openstack"
- message: "Caught error: Quota exceeded for"
- - module: "nova.compute.api"
- message: "ServerDiskConfigTest"
- - module: "nova.compute.api"
- message: "ServersTest"
- - module: "nova.compute.api"
- message: "\\{u'kernel_id'.*u'ramdisk_id':"
- - module: "nova.api.openstack.wsgi"
- message: "takes exactly 4 arguments"
- - module: "nova.api.openstack"
- message: "Caught error: Instance .* could not be found"
- - module: "nova.api.metadata.handler"
- message: "Failed to get metadata for instance id:"
-
-n-cond:
- - module: "nova.notifications"
- message: "Failed to send state update notification"
- - module: "nova.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "nova.openstack.common.rpc.common"
- message: "but the actual state is deleting to caller"
- - module: "nova.openstack.common.rpc.common"
- message: "Traceback \\(most recent call last"
- - module: "nova.openstack.common.threadgroup"
- message: "Service with host .* topic conductor exists."
-
-n-sch:
- - module: "nova.scheduler.filter_scheduler"
- message: "Error from last host: "
-
-n-net:
- - module: "nova.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "nova.openstack.common.rpc.common"
- message: "'NoneType' object has no attribute '__getitem__'"
- - module: "nova.openstack.common.rpc.common"
- message: "Instance .* could not be found"
-
-c-api:
- - module: "cinder.api.middleware.fault"
- message: "Caught error: Volume .* could not be found"
- - module: "cinder.api.middleware.fault"
- message: "Caught error: Snapshot .* could not be found"
- - module: "cinder.api.openstack.wsgi"
- message: "argument must be a string or a number, not 'NoneType'"
- - module: "cinder.volume.api"
- message: "Volume status must be available to reserve"
-
-c-vol:
- - module: "cinder.brick.iscsi.iscsi"
- message: "Failed to create iscsi target for volume id"
- - module: "cinder.brick.local_dev.lvm"
- message: "stat failed: No such file or directory"
- - module: "cinder.brick.local_dev.lvm"
- message: "LV stack-volumes.*in use: not deactivating"
- - module: "cinder.brick.local_dev.lvm"
- message: "Can't remove open logical volume"
-
-ceilometer-collector:
- - module: "stevedore.extension"
- message: ".*"
- - module: "ceilometer.collector.dispatcher.database"
- message: "duplicate key value violates unique constraint"
- - module: "ceilometer.collector.dispatcher.database"
- message: "Failed to record metering data: QueuePool limit"
- - module: "ceilometer.dispatcher.database"
- message: "\\(DataError\\) integer out of range"
- - module: "ceilometer.collector.dispatcher.database"
- message: "Failed to record metering data: .* integer out of range"
- - module: "ceilometer.collector.dispatcher.database"
- message: "Failed to record metering data: .* integer out of range"
- - module: "ceilometer.openstack.common.db.sqlalchemy.session"
- message: "DB exception wrapped"
-
-q-agt:
- - module: "neutron.agent.linux.ovs_lib"
- message: "Unable to execute.*Exception:"
-
-q-dhcp:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
- - module: "neutron.agent.dhcp_agent"
- message: "Unable to enable dhcp"
- - module: "neutron.agent.dhcp_agent"
- message: "Network .* RPC info call failed"
-
-q-l3:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
- - module: "neutron.agent.l3_agent"
- message: "Failed synchronizing routers"
-
-q-vpn:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
-
-q-lbaas:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
- - module: "neutron.services.loadbalancer.drivers.haproxy.agent_manager"
- message: "Error upating stats"
- - module: "neutron.services.loadbalancer.drivers.haproxy.agent_manager"
- message: "Unable to destroy device for pool"
-
-q-svc:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
- - module: "neutron.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "neutron.openstack.common.rpc.common"
- message: "(Network|Pool|Subnet|Agent|Port) .* could not be found"
- - module: "neutron.api.v2.resource"
- message: ".* failed"
- - module: ".*"
- message: ".*"
-
-s-proxy:
- - module: "proxy-server"
- message: "Timeout talking to memcached"
diff --git a/tools/check_logs.py b/tools/check_logs.py
index edf95a1..e28c230 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -30,9 +30,30 @@
dump_all_errors = True
# As logs are made clean, add to this set
-must_be_clean = set(['c-sch', 'g-reg', 'ceilometer-alarm-notifier',
- 'ceilometer-collector', 'horizon', 'n-crt', 'n-obj',
- 'q-vpn'])
+allowed_dirty = set([
+ 'c-api',
+ 'ceilometer-acentral',
+ 'ceilometer-acompute',
+ 'ceilometer-alarm-evaluator',
+ 'ceilometer-anotification',
+ 'ceilometer-api',
+ 'c-vol',
+ 'g-api',
+ 'h-api',
+ 'h-eng',
+ 'ir-cond',
+ 'n-api',
+ 'n-cpu',
+ 'n-net',
+ 'n-sch',
+ 'q-agt',
+ 'q-dhcp',
+ 'q-lbaas',
+ 'q-meta',
+ 'q-metering',
+ 'q-svc',
+ 'q-vpn',
+ 's-proxy'])
def process_files(file_specs, url_specs, whitelists):
@@ -69,12 +90,12 @@
break
if not whitelisted or dump_all_errors:
if print_log_name:
- print("Log File: %s" % name)
+ print("Log File Has Errors: %s" % name)
print_log_name = False
if not whitelisted:
had_errors = True
print("*** Not Whitelisted ***"),
- print(line)
+ print(line.rstrip())
return had_errors
@@ -135,8 +156,8 @@
return 0
failed = False
for log in logs_with_errors:
- if log in must_be_clean:
- print("FAILED: %s" % log)
+ if log not in allowed_dirty:
+ print("Log: %s not allowed to have ERRORS or TRACES" % log)
failed = True
if failed:
return 1