1acacd3ad4
These log errors have hit the large ops jobs several times. There is one in ceilometer acentral and one in nova conductor. Change-Id: Idc30110085e95c615958fc5f90e86417855e6d7a
234 lines
9.2 KiB
YAML
234 lines
9.2 KiB
YAML
n-cpu:
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "During wait destroy, instance disappeared"
|
|
- module: "glanceclient.common.http"
|
|
message: "Request returned failure status"
|
|
- module: "nova.openstack.common.periodic_task"
|
|
message: "Error during ComputeManager\\.update_available_resource: \
|
|
'NoneType' object is not iterable"
|
|
- module: "nova.compute.manager"
|
|
message: "Possibly task preempted"
|
|
- module: "nova.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "nova.network.api"
|
|
message: "Failed storing info cache"
|
|
- module: "nova.compute.manager"
|
|
message: "Error while trying to clean up image"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "Error injecting data into image.*\\(Unexpected error while \
|
|
running command"
|
|
- module: "nova.compute.manager"
|
|
message: "Instance failed to spawn"
|
|
- module: "nova.compute.manager"
|
|
message: "Error: Unexpected error while running command"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "Error from libvirt during destroy"
|
|
- module: "nova.virt.libvirt.vif"
|
|
message: "Failed while unplugging vif"
|
|
- module: "nova.openstack.common.loopingcal"
|
|
message: "in fixed duration looping call"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "Getting disk size of instance"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "No such file or directory: '/opt/stack/data/nova/instances"
|
|
- module: "nova.virt.libvirt.driver"
|
|
message: "Nova requires libvirt version 0\\.9\\.11 or greater"
|
|
- module: "nova.compute.manager"
|
|
message: "error during stop\\(\\) in sync_power_state"
|
|
- module: "nova.compute.manager"
|
|
message: "Instance failed network setup after 1 attempt"
|
|
- module: "nova.compute.manager"
|
|
message: "Periodic sync_power_state task had an error"
|
|
- module: "nova.virt.driver"
|
|
message: "Info cache for instance .* could not be found"
|
|
|
|
g-api:
|
|
- module: "glance.store.sheepdog"
|
|
message: "Error in store configuration: Unexpected error while \
|
|
running command"
|
|
- module: "swiftclient"
|
|
message: "Container HEAD failed: .*404 Not Found"
|
|
- module: "glance.api.middleware.cache"
|
|
message: "however the registry did not contain metadata for that image"
|
|
- module: "oslo.messaging.notify._impl_messaging"
|
|
message: ".*"
|
|
|
|
ceilometer-acompute:
|
|
- module: "ceilometer.compute.pollsters.disk"
|
|
message: "Unable to read from monitor: Connection reset by peer"
|
|
- module: "ceilometer.compute.pollsters.disk"
|
|
message: "Requested operation is not valid: domain is not running"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: "Requested operation is not valid: domain is not running"
|
|
- module: "ceilometer.compute.pollsters.disk"
|
|
message: "Domain not found: no domain with matching uuid"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: "Domain not found: no domain with matching uuid"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: "No module named libvirt"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: "Unable to write to monitor: Broken pipe"
|
|
- module: "ceilometer.compute.pollsters.cpu"
|
|
message: "Domain not found: no domain with matching uuid"
|
|
- module: "ceilometer.compute.pollsters.net"
|
|
message: ".*"
|
|
- module: "ceilometer.compute.pollsters.disk"
|
|
message: ".*"
|
|
|
|
ceilometer-acentral:
|
|
- module: "ceilometer.central.manager"
|
|
message: "403 Forbidden"
|
|
- module: "ceilometer.central.manager"
|
|
message: "get_samples\\(\\) got an unexpected keyword argument 'resources'"
|
|
|
|
ceilometer-alarm-evaluator:
|
|
- module: "ceilometer.alarm.service"
|
|
message: "alarm evaluation cycle failed"
|
|
- module: "ceilometer.alarm.evaluator.threshold"
|
|
message: ".*"
|
|
|
|
ceilometer-api:
|
|
- module: "wsme.api"
|
|
message: ".*"
|
|
|
|
h-api:
|
|
- module: "root"
|
|
message: "Returning 400 to user: The server could not comply with \
|
|
the request since it is either malformed or otherwise incorrect"
|
|
- module: "root"
|
|
message: "Unexpected error occurred serving API: Request limit \
|
|
exceeded: Template exceeds maximum allowed size"
|
|
- module: "root"
|
|
message: "Unexpected error occurred serving API: The Stack \
|
|
.*could not be found"
|
|
|
|
h-eng:
|
|
- module: "heat.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "heat.openstack.common.rpc.common"
|
|
message: "The Stack .* could not be found"
|
|
|
|
n-api:
|
|
- module: "glanceclient.common.http"
|
|
message: "Request returned failure status"
|
|
- module: "nova.api.openstack"
|
|
message: "Caught error: Quota exceeded for"
|
|
- module: "nova.compute.api"
|
|
message: "ServerDiskConfigTest"
|
|
- module: "nova.compute.api"
|
|
message: "ServersTest"
|
|
- module: "nova.compute.api"
|
|
message: "\\{u'kernel_id'.*u'ramdisk_id':"
|
|
- module: "nova.api.openstack.wsgi"
|
|
message: "takes exactly 4 arguments"
|
|
- module: "nova.api.openstack"
|
|
message: "Caught error: Instance .* could not be found"
|
|
- module: "nova.api.metadata.handler"
|
|
message: "Failed to get metadata for instance id:"
|
|
|
|
n-cond:
|
|
- module: "nova.notifications"
|
|
message: "Failed to send state update notification"
|
|
- module: "nova.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "nova.openstack.common.rpc.common"
|
|
message: "but the actual state is deleting to caller"
|
|
- module: "nova.openstack.common.rpc.common"
|
|
message: "Traceback \\(most recent call last"
|
|
- module: "nova.openstack.common.threadgroup"
|
|
message: "Service with host .* topic conductor exists."
|
|
|
|
n-sch:
|
|
- module: "nova.scheduler.filter_scheduler"
|
|
message: "Error from last host: "
|
|
|
|
n-net:
|
|
- module: "nova.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "nova.openstack.common.rpc.common"
|
|
message: "'NoneType' object has no attribute '__getitem__'"
|
|
- module: "nova.openstack.common.rpc.common"
|
|
message: "Instance .* could not be found"
|
|
|
|
c-api:
|
|
- module: "cinder.api.middleware.fault"
|
|
message: "Caught error: Volume .* could not be found"
|
|
- module: "cinder.api.middleware.fault"
|
|
message: "Caught error: Snapshot .* could not be found"
|
|
- module: "cinder.api.openstack.wsgi"
|
|
message: "argument must be a string or a number, not 'NoneType'"
|
|
- module: "cinder.volume.api"
|
|
message: "Volume status must be available to reserve"
|
|
|
|
c-vol:
|
|
- module: "cinder.brick.iscsi.iscsi"
|
|
message: "Failed to create iscsi target for volume id"
|
|
- module: "cinder.brick.local_dev.lvm"
|
|
message: "stat failed: No such file or directory"
|
|
- module: "cinder.brick.local_dev.lvm"
|
|
message: "LV stack-volumes.*in use: not deactivating"
|
|
- module: "cinder.brick.local_dev.lvm"
|
|
message: "Can't remove open logical volume"
|
|
|
|
ceilometer-collector:
|
|
- module: "stevedore.extension"
|
|
message: ".*"
|
|
- module: "ceilometer.collector.dispatcher.database"
|
|
message: "duplicate key value violates unique constraint"
|
|
- module: "ceilometer.collector.dispatcher.database"
|
|
message: "Failed to record metering data: QueuePool limit"
|
|
- module: "ceilometer.dispatcher.database"
|
|
message: "\\(DataError\\) integer out of range"
|
|
- module: "ceilometer.collector.dispatcher.database"
|
|
message: "Failed to record metering data: .* integer out of range"
|
|
- module: "ceilometer.collector.dispatcher.database"
|
|
message: "Failed to record metering data: .* integer out of range"
|
|
- module: "ceilometer.openstack.common.db.sqlalchemy.session"
|
|
message: "DB exception wrapped"
|
|
|
|
q-agt:
|
|
- module: "neutron.agent.linux.ovs_lib"
|
|
message: "Unable to execute.*Exception:"
|
|
|
|
q-dhcp:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
- module: "neutron.agent.dhcp_agent"
|
|
message: "Unable to enable dhcp"
|
|
- module: "neutron.agent.dhcp_agent"
|
|
message: "Network .* RPC info call failed"
|
|
|
|
q-l3:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
- module: "neutron.agent.l3_agent"
|
|
message: "Failed synchronizing routers"
|
|
|
|
q-vpn:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
|
|
q-lbaas:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
- module: "neutron.services.loadbalancer.drivers.haproxy.agent_manager"
|
|
message: "Error upating stats"
|
|
- module: "neutron.services.loadbalancer.drivers.haproxy.agent_manager"
|
|
message: "Unable to destroy device for pool"
|
|
|
|
q-svc:
|
|
- module: "neutron.common.legacy"
|
|
message: "Skipping unknown group key: firewall_driver"
|
|
- module: "neutron.openstack.common.rpc.amqp"
|
|
message: "Exception during message handling"
|
|
- module: "neutron.openstack.common.rpc.common"
|
|
message: "(Network|Pool|Subnet|Agent|Port) .* could not be found"
|
|
- module: "neutron.api.v2.resource"
|
|
message: ".* failed"
|
|
- module: ".*"
|
|
message: ".*"
|
|
|
|
s-proxy:
|
|
- module: "proxy-server"
|
|
message: "Timeout talking to memcached"
|