ed13d4a2dc
According to John Dickinson: "This isn't an error. It's logged at an error level because it may hint at other problems, and it's something an operator needs to know about, but it is not an unhandled failure condition. (Think of it similarly how in swift a server being down is an "error", but it's something that swift seamlessly works around.)" Closes-Bug: #1260894 Change-Id: I41e55c5e34ee214727fbbd7b9daa1f6ea9bf8050
224 lines
8.8 KiB
YAML
224 lines
8.8 KiB
YAML
n-cpu:
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "During wait destroy, instance disappeared"
|
|
- module: "glanceclient.common.http"
|
|
message: "Request returned failure status"
|
|
- module: "nova.openstack.common.periodic_task"
|
|
message: "Error during ComputeManager\\.update_available_resource: \
|
|
'NoneType' object is not iterable"
|
|
- module: "nova.compute.manager"
|
|
message: "Possibly task preempted"
|
|
- module: "nova.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "nova.network.api"
|
|
message: "Failed storing info cache"
|
|
- module: "nova.compute.manager"
|
|
message: "Error while trying to clean up image"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "Error injecting data into image.*\\(Unexpected error while \
|
|
running command"
|
|
- module: "nova.compute.manager"
|
|
message: "Instance failed to spawn"
|
|
- module: "nova.compute.manager"
|
|
message: "Error: Unexpected error while running command"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "Error from libvirt during destroy"
|
|
- module: "nova.virt.libvirt.vif"
|
|
message: "Failed while unplugging vif"
|
|
- module: "nova.openstack.common.loopingcal"
|
|
message: "in fixed duration looping call"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "Getting disk size of instance"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "No such file or directory: '/opt/stack/data/nova/instances"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "Nova requires libvirt version 0\\.9\\.11 or greater"
|
|
- module: "nova.compute.manager"
|
|
message: "error during stop\\(\\) in sync_power_state"
|
|
- module: "nova.compute.manager"
|
|
message: "Instance failed network setup after 1 attempt"
|
|
- module: "nova.compute.manager"
|
|
message: "Periodic sync_power_state task had an error"
|
|
|
|
g-api:
|
|
- module: "glance.store.sheepdog"
|
|
message: "Error in store configuration: Unexpected error while \
|
|
running command"
|
|
- module: "swiftclient"
|
|
message: "Container HEAD failed: .*404 Not Found"
|
|
- module: "glance.api.middleware.cache"
|
|
message: "however the registry did not contain metadata for that image"
|
|
- module: "oslo.messaging.notify._impl_messaging"
|
|
message: ".*"
|
|
|
|
ceilometer-acompute:
|
|
- module: "ceilometer.compute.pollsters.disk"
|
|
message: "Unable to read from monitor: Connection reset by peer"
|
|
- module: "ceilometer.compute.pollsters.disk"
|
|
message: "Requested operation is not valid: domain is not running"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: "Requested operation is not valid: domain is not running"
|
|
- module: "ceilometer.compute.pollsters.disk"
|
|
message: "Domain not found: no domain with matching uuid"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: "Domain not found: no domain with matching uuid"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: "No module named libvirt"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: "Unable to write to monitor: Broken pipe"
|
|
- module: "ceilometer.compute.pollsters.cpu"
|
|
message: "Domain not found: no domain with matching uuid"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: ".*"
|
|
- module: "ceilometer.compute.pollsters.disk"
|
|
message: ".*"
|
|
|
|
ceilometer-alarm-evaluator:
|
|
- module: "ceilometer.alarm.service"
|
|
message: "alarm evaluation cycle failed"
|
|
- module: "ceilometer.alarm.evaluator.threshold"
|
|
message: ".*"
|
|
|
|
ceilometer-api:
|
|
- module: "wsme.api"
|
|
message: ".*"
|
|
|
|
h-api:
|
|
- module: "root"
|
|
message: "Returning 400 to user: The server could not comply with \
|
|
the request since it is either malformed or otherwise incorrect"
|
|
- module: "root"
|
|
message: "Unexpected error occurred serving API: Request limit \
|
|
exceeded: Template exceeds maximum allowed size"
|
|
- module: "root"
|
|
message: "Unexpected error occurred serving API: The Stack \
|
|
.*could not be found"
|
|
|
|
h-eng:
|
|
- module: "heat.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "heat.openstack.common.rpc.common"
|
|
message: "The Stack .* could not be found"
|
|
|
|
n-api:
|
|
- module: "glanceclient.common.http"
|
|
message: "Request returned failure status"
|
|
- module: "nova.api.openstack"
|
|
message: "Caught error: Quota exceeded for"
|
|
- module: "nova.compute.api"
|
|
message: "ServerDiskConfigTest"
|
|
- module: "nova.compute.api"
|
|
message: "ServersTest"
|
|
- module: "nova.compute.api"
|
|
message: "\\{u'kernel_id'.*u'ramdisk_id':"
|
|
- module: "nova.api.openstack.wsgi"
|
|
message: "takes exactly 4 arguments"
|
|
- module: "nova.api.openstack"
|
|
message: "Caught error: Instance .* could not be found"
|
|
- module: "nova.api.metadata.handler"
|
|
message: "Failed to get metadata for instance id:"
|
|
|
|
n-cond:
|
|
- module: "nova.notifications"
|
|
message: "Failed to send state update notification"
|
|
- module: "nova.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "nova.openstack.common.rpc.common"
|
|
message: "but the actual state is deleting to caller"
|
|
- module: "nova.openstack.common.rpc.common"
|
|
message: "Traceback \\(most recent call last"
|
|
|
|
n-sch:
|
|
- module: "nova.scheduler.filter_scheduler"
|
|
message: "Error from last host: "
|
|
|
|
n-net:
|
|
- module: "nova.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "nova.openstack.common.rpc.common"
|
|
message: "'NoneType' object has no attribute '__getitem__'"
|
|
- module: "nova.openstack.common.rpc.common"
|
|
message: "Instance .* could not be found"
|
|
|
|
c-api:
|
|
- module: "cinder.api.middleware.fault"
|
|
message: "Caught error: Volume .* could not be found"
|
|
- module: "cinder.api.middleware.fault"
|
|
message: "Caught error: Snapshot .* could not be found"
|
|
- module: "cinder.api.openstack.wsgi"
|
|
message: "argument must be a string or a number, not 'NoneType'"
|
|
- module: "cinder.volume.api"
|
|
message: "Volume status must be available to reserve"
|
|
|
|
c-vol:
|
|
- module: "cinder.brick.iscsi.iscsi"
|
|
message: "Failed to create iscsi target for volume id"
|
|
- module: "cinder.brick.local_dev.lvm"
|
|
message: "stat failed: No such file or directory"
|
|
- module: "cinder.brick.local_dev.lvm"
|
|
message: "LV stack-volumes.*in use: not deactivating"
|
|
- module: "cinder.brick.local_dev.lvm"
|
|
message: "Can't remove open logical volume"
|
|
|
|
ceilometer-collector:
|
|
- module: "stevedore.extension"
|
|
message: ".*"
|
|
- module: "ceilometer.collector.dispatcher.database"
|
|
message: "duplicate key value violates unique constraint"
|
|
- module: "ceilometer.collector.dispatcher.database"
|
|
message: "Failed to record metering data: QueuePool limit"
|
|
- module: "ceilometer.dispatcher.database"
|
|
message: "\\(DataError\\) integer out of range"
|
|
- module: "ceilometer.collector.dispatcher.database"
|
|
message: "Failed to record metering data: .* integer out of range"
|
|
- module: "ceilometer.collector.dispatcher.database"
|
|
message: "Failed to record metering data: .* integer out of range"
|
|
- module: "ceilometer.openstack.common.db.sqlalchemy.session"
|
|
message: "DB exception wrapped"
|
|
|
|
q-agt:
|
|
- module: "neutron.agent.linux.ovs_lib"
|
|
message: "Unable to execute.*Exception:"
|
|
|
|
q-dhcp:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
- module: "neutron.agent.dhcp_agent"
|
|
message: "Unable to enable dhcp"
|
|
- module: "neutron.agent.dhcp_agent"
|
|
message: "Network .* RPC info call failed"
|
|
|
|
q-l3:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
- module: "neutron.agent.l3_agent"
|
|
message: "Failed synchronizing routers"
|
|
|
|
q-vpn:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
|
|
q-lbaas:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
- module: "neutron.services.loadbalancer.drivers.haproxy.agent_manager"
|
|
message: "Error upating stats"
|
|
- module: "neutron.services.loadbalancer.drivers.haproxy.agent_manager"
|
|
message: "Unable to destroy device for pool"
|
|
|
|
q-svc:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
- module: "neutron.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "neutron.openstack.common.rpc.common"
|
|
message: "(Network|Pool|Subnet|Agent|Port) .* could not be found"
|
|
- module: "neutron.api.v2.resource"
|
|
message: ".* failed"
|
|
- module: ".*"
|
|
message: ".*"
|
|
|
|
s-proxy:
|
|
- module: "proxy-server"
|
|
message: "Timeout talking to memcached"
|