For a complete list of all available configuration options for each OpenStack Compute service, run bin/nova-<servicename> --help.
Configuration option=Default value | (Type) Description |
enable_new_services=True | (BoolOpt)Services to be added to the available pool on create |
enabled_apis=['ec2', 'osapi_compute', 'metadata'] | (ListOpt)a list of APIs to enable by default |
enabled_ssl_apis=[] | (ListOpt)a list of APIs with enabled SSL |
instance_name_template=instance-%08x | (StrOpt)Template string to be used to generate instance names |
multi_instance_display_name_template=%(name)s-%(uuid)s | (StrOpt)When creating multiple instances with a single request using the os-multiple-create API extension, this template will be used to build the display name for each instance. The benefit is that the instances end up with different hostnames. To restore legacy behavior of every instance having the same name, set this option to "%(name)s". Valid keys for the template are: name, uuid, count. |
non_inheritable_image_properties=['cache_in_nova', 'bittorrent'] | (ListOpt)These are image properties which a snapshot should not inherit from an instance |
null_kernel=nokernel | (StrOpt)kernel image that indicates not to use a kernel, but to use a raw disk image instead |
osapi_compute_ext_list=[] | (ListOpt)Specify list of extensions to load when using osapi_compute_extension option with nova.api.openstack.compute.contrib.select_extensions |
osapi_compute_extension=['nova.api.openstack.compute.contrib.standard_extensions'] | (MultiStrOpt)osapi compute extension to load |
osapi_compute_link_prefix=None | (StrOpt)Base URL that will be presented to users in links to the OpenStack Compute API |
osapi_compute_listen=0.0.0.0 | (StrOpt)IP address for OpenStack API to listen |
osapi_compute_listen_port=8774 | (IntOpt)list port for osapi compute |
osapi_compute_workers=None | (IntOpt)Number of workers for OpenStack API service |
osapi_hide_server_address_states=['building'] | (ListOpt)List of instance states that should hide network info |
servicegroup_driver=db | (StrOpt)The driver for servicegroup service (valid options are: db, zk, mc) |
snapshot_name_template=snapshot-%s | (StrOpt)Template string to be used to generate snapshot names |
use_forwarded_for=False | (BoolOpt)Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. |
Configuration option=Default value | (Type) Description |
api_rate_limit=True | (BoolOpt)whether to rate limit the api |
auth_strategy=noauth | (StrOpt)The strategy to use for auth: noauth or keystone. |
Configuration option=Default value | (Type) Description |
default_availability_zone=nova | (StrOpt)default compute node availability_zone |
default_schedule_zone=None | (StrOpt)availability zone to use when user doesn't specify one |
internal_service_availability_zone=internal | (StrOpt)availability_zone to show internal services under |
Configuration option=Default value | (Type) Description |
db_backend=sqlalchemy | (StrOpt)The backend to use for db |
db_backend=sqlalchemy | (StrOpt)The backend to use for bare-metal database |
deploy_kernel=None | (StrOpt)Default kernel image ID used in deployment phase |
deploy_ramdisk=None | (StrOpt)Default ramdisk image ID used in deployment phase |
driver=nova.virt.baremetal.pxe.PXE | (StrOpt)Baremetal driver back-end (pxe or tilera) |
driver=nova.cells.rpc_driver.CellsRPCDriver | (StrOpt)Cells communication driver to use |
instance_type_extra_specs=[] | (ListOpt)a list of additional capabilities corresponding to instance_type_extra_specs for this compute host to advertise. Valid entries are name=value, pairs For example, "key1:val1, key2:val2" |
ipmi_power_retry=5 | (IntOpt)maximal number of retries for IPMI operations |
net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template | (StrOpt)Template file for injected network config |
power_manager=nova.virt.baremetal.ipmi.IPMI | (StrOpt)Baremetal power management method |
pxe_append_params=None | (StrOpt)additional append parameters for baremetal PXE boot |
pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template | (StrOpt)Template file for PXE configuration |
pxe_deploy_timeout=0 | (IntOpt)Timeout for PXE deployments. Default: 0 (unlimited) |
sql_connection=sqlite:////home/fifieldt/temp/nova/nova/openstack/common/db/$sqlite_db | (StrOpt)The SQLAlchemy connection string used to connect to the database |
sql_connection=sqlite:///$state_path/baremetal_$sqlite_db | (StrOpt)The SQLAlchemy connection string used to connect to the bare-metal database |
terminal=shellinaboxd | (StrOpt)path to baremetal terminal program |
terminal_cert_dir=None | (StrOpt)path to baremetal terminal SSL cert(PEM) |
terminal_pid_dir=$state_path/baremetal/console | (StrOpt)path to directory stores pidfiles of baremetal_terminal |
tftp_root=/tftpboot | (StrOpt)Baremetal compute node's tftp root path |
use_unsafe_iscsi=False | (BoolOpt)Do not set this out of dev/test environments. If a node does not have a fixed PXE IP address, volumes are exported with globally opened ACL |
vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver | (StrOpt)Baremetal VIF driver. |
virtual_power_host_key=None | (StrOpt)ssh key for virtual power host_user |
virtual_power_host_pass= | (StrOpt)password for virtual power host_user |
virtual_power_host_user= | (StrOpt)user to execute virtual power commands as |
virtual_power_ssh_host= | (StrOpt)ip or name to virtual power host |
virtual_power_ssh_port=22 | (IntOpt)Port to use for ssh to virtual power host |
virtual_power_type=virsh | (StrOpt)base command to use for virtual power(vbox,virsh) |
Configuration option=Default value | (Type) Description |
ca_file=cacert.pem | (StrOpt)Filename of root CA |
ca_path=$state_path/CA | (StrOpt)Where we keep our root CA |
cert_manager=nova.cert.manager.CertManager | (StrOpt)full class name for the Manager for cert |
cert_topic=cert | (StrOpt)the topic cert nodes listen on |
crl_file=crl.pem | (StrOpt)Filename of root Certificate Revocation List |
key_file=private/cakey.pem | (StrOpt)Filename of private key |
keys_path=$state_path/keys | (StrOpt)Where we keep our keys |
project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s | (StrOpt)Subject for certificate for projects, %s for project, timestamp |
use_project_ca=False | (BoolOpt)Should we use a CA for each project? |
user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s | (StrOpt)Subject for certificate for users, %s for project, user, timestamp |
Configuration option=Default value | (Type) Description |
call_timeout=60 | (IntOpt)Seconds to wait for response from a call to a cell. |
capabilities=['hypervisor=xenserver;kvm', 'os=linux;windows'] | (ListOpt)Key/Multi-value list with the capabilities of the cell |
driver=nova.virt.baremetal.pxe.PXE | (StrOpt)Baremetal driver back-end (pxe or tilera) |
driver=nova.cells.rpc_driver.CellsRPCDriver | (StrOpt)Cells communication driver to use |
enable=False | (BoolOpt)Enable cell functionality |
instance_update_num_instances=1 | (IntOpt)Number of instances to update per periodic task run |
instance_updated_at_threshold=3600 | (IntOpt)Number of seconds after an instance was updated or deleted to continue to update cells |
manager=nova.cells.manager.CellsManager | (StrOpt)Manager for cells |
manager=nova.conductor.manager.ConductorManager | (StrOpt)full class name for the Manager for conductor |
max_hop_count=10 | (IntOpt)Maximum number of hops for cells routing. |
name=nova | (StrOpt)name of this cell |
reserve_percent=10.0 | (FloatOpt)Percentage of cell capacity to hold in reserve. Affects both memory and disk utilization |
scheduler=nova.cells.scheduler.CellsScheduler | (StrOpt)Cells scheduler to use |
topic=cells | (StrOpt)the topic cells nodes listen on |
topic=conductor | (StrOpt)the topic conductor nodes listen on |
Configuration option=Default value | (Type) Description |
bindir=$pybasedir/bin | (StrOpt)Directory where nova binaries are installed |
compute_topic=compute | (StrOpt)the topic compute nodes listen on |
console_topic=console | (StrOpt)the topic console proxy nodes listen on |
consoleauth_topic=consoleauth | (StrOpt)the topic console auth proxy nodes listen on |
disable_process_locking=False | (BoolOpt)Whether to disable inter-process locks |
host=usagi | (StrOpt)Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address. However, the node name must be valid within an AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address |
lock_path=None | (StrOpt)Directory to use for lock files. Default to a temp directory |
memcached_servers=None | (ListOpt)Memcached servers or None for in process cache. |
my_ip=192.168.1.31 | (StrOpt)ip address of this host |
notification_driver=[] | (MultiStrOpt)Driver or drivers to handle sending notifications |
notification_topics=['notifications'] | (ListOpt)AMQP topic used for openstack notifications |
notify_api_faults=False | (BoolOpt)If set, send api.fault notifications on caught exceptions in the API service. |
notify_on_any_change=False | (BoolOpt)If set, send compute.instance.update notifications on instance state changes. Valid values are False for no notifications, True for notifications on any instance changes. |
notify_on_state_change=None | (StrOpt)If set, send compute.instance.update notifications on instance state changes. Valid values are None for no notifications, "vm_state" for notifications on VM state changes, or "vm_and_task_state" for notifications on VM and task state changes. |
pybasedir=/home/fifieldt/temp/nova | (StrOpt)Directory where the nova python module is installed |
report_interval=10 | (IntOpt)seconds between nodes reporting state to datastore |
rootwrap_config=/etc/nova/rootwrap.conf | (StrOpt)Path to the rootwrap configuration file to use for running commands as root |
service_down_time=60 | (IntOpt)maximum time since last check-in for up service |
state_path=$pybasedir | (StrOpt)Top-level directory for maintaining nova's state |
tempdir=None | (StrOpt)Explicitly specify the temporary working directory |
Configuration option=Default value | (Type) Description |
base_dir_name=_base | (StrOpt)Where cached images are stored under $instances_path.This is NOT the full path - just a folder name.For per-compute-host cached images, set to _base_$my_ip |
checksum_interval_seconds=3600 | (IntOpt)How frequently to checksum base images |
compute_api_class=nova.compute.api.API | (StrOpt)The full class name of the compute API class to use |
compute_driver=None | (StrOpt)Driver to use for controlling virtualization. Options include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, fake.FakeDriver, baremetal.BareMetalDriver, vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver |
compute_manager=nova.compute.manager.ComputeManager | (StrOpt)full class name for the Manager for compute |
compute_stats_class=nova.compute.stats.Stats | (StrOpt)Class that will manage stats for the local compute host |
console_host=usagi | (StrOpt)Console proxy host to use to connect to instances on this host. |
console_manager=nova.console.manager.ConsoleProxyManager | (StrOpt)full class name for the Manager for console proxy |
default_instance_type=m1.small | (StrOpt)default instance type to use, testing only |
default_notification_level=INFO | (StrOpt)Default notification level for outgoing notifications |
default_publisher_id=$host | (StrOpt)Default publisher_id for outgoing notifications |
enable_instance_password=True | (BoolOpt)Allows use of instance password during server creation |
heal_instance_info_cache_interval=60 | (IntOpt)Number of seconds between instance info_cache self healing updates |
host_state_interval=120 | (IntOpt)Interval in seconds for querying the host status |
image_cache_manager_interval=2400 | (IntOpt)Number of seconds to wait between runs of the image cache manager |
image_info_filename_pattern=$instances_path/$base_dir_name/%(image)s.info | (StrOpt)Allows image information files to be stored in non-standard locations |
instance_build_timeout=0 | (IntOpt)Amount of time in seconds an instance can be in BUILD before going into ERROR status.Set to 0 to disable. |
instance_usage_audit=False | (BoolOpt)Generate periodic compute.instance.exists notifications |
instance_usage_audit_period=month | (StrOpt)time period to generate instance usages for. Time period must be hour, day, month or year |
instances_path=$state_path/instances | (StrOpt)where instances are stored on disk |
reboot_timeout=0 | (IntOpt)Automatically hard reboot an instance if it has been stuck in a rebooting state longer than N seconds. Set to 0 to disable. |
reclaim_instance_interval=0 | (IntOpt)Interval in seconds for reclaiming deleted instances |
resize_confirm_window=0 | (IntOpt)Automatically confirm resizes after N seconds. Set to 0 to disable. |
resume_guests_state_on_host_boot=False | (BoolOpt)Whether to start guests that were running before the host rebooted |
running_deleted_instance_action=log | (StrOpt)Action to take if a running deleted instance is detected.Valid options are 'noop', 'log' and 'reap'. Set to 'noop' to disable. |
running_deleted_instance_poll_interval=1800 | (IntOpt)Number of seconds to wait between runs of the cleanup task. |
running_deleted_instance_timeout=0 | (IntOpt)Number of seconds after being deleted when a running instance should be considered eligible for cleanup. |
Configuration option=Default value | (Type) Description |
manager=nova.cells.manager.CellsManager | (StrOpt)Manager for cells |
manager=nova.conductor.manager.ConductorManager | (StrOpt)full class name for the Manager for conductor |
topic=cells | (StrOpt)the topic cells nodes listen on |
topic=conductor | (StrOpt)the topic conductor nodes listen on |
use_local=False | (BoolOpt)Perform nova-conductor operations locally |
Configuration option=Default value | (Type) Description |
config_drive_cdrom=False | (BoolOpt)Attaches the Config Drive image as a cdrom drive instead of a disk drive |
config_drive_format=iso9660 | (StrOpt)Config drive format. One of iso9660 (default) or vfat |
config_drive_inject_password=False | (BoolOpt)Sets the admin password in the config drive image |
config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 | (StrOpt)List of metadata versions to skip placing into the config drive |
config_drive_tempdir=None | (StrOpt)Where to put temporary files associated with config drive creation |
force_config_drive=None | (StrOpt)Set to force injection to take place on a config drive (if set, valid options are: always) |
mkisofs_cmd=genisoimage | (StrOpt)Name and optionally path of the tool used for ISO image creation |
Configuration option=Default value | (Type) Description |
console_public_hostname=usagi | (StrOpt)Publicly visible name for this console host |
console_token_ttl=600 | (IntOpt)How many seconds before deleting tokens |
consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager | (StrOpt)Manager for console auth |
Configuration option=Default value | (Type) Description |
db_backend=sqlalchemy | (StrOpt)The backend to use for db |
db_backend=sqlalchemy | (StrOpt)The backend to use for bare-metal database |
db_check_interval=60 | (IntOpt)Seconds between getting fresh cell info from db. |
db_driver=nova.db | (StrOpt)driver to use for database access |
dbapi_use_tpool=False | (BoolOpt)Enable the experimental use of thread pooling for all DB API calls |
sql_connection=sqlite:////home/fifieldt/temp/nova/nova/openstack/common/db/$sqlite_db | (StrOpt)The SQLAlchemy connection string used to connect to the database |
sql_connection=sqlite:///$state_path/baremetal_$sqlite_db | (StrOpt)The SQLAlchemy connection string used to connect to the bare-metal database |
sql_connection_debug=0 | (IntOpt)Verbosity of SQL debugging information. 0=None, 100=Everything |
sql_connection_trace=False | (BoolOpt)Add python stack traces to SQL as comment strings |
sql_idle_timeout=3600 | (IntOpt)timeout before idle sql connections are reaped |
sql_max_overflow=None | (IntOpt)If set, use this value for max_overflow with sqlalchemy |
sql_max_pool_size=5 | (IntOpt)Maximum number of SQL connections to keep open in a pool |
sql_max_retries=10 | (IntOpt)maximum db connection retries during startup. (setting -1 implies an infinite retry count) |
sql_min_pool_size=1 | (IntOpt)Minimum number of SQL connections to keep open in a pool |
sql_retry_interval=10 | (IntOpt)interval between retries of opening a sql connection |
sqlite_db=nova.sqlite | (StrOpt)the filename to use with sqlite |
sqlite_synchronous=True | (BoolOpt)If passed, use synchronous mode for sqlite |
Configuration option=Default value | (Type) Description |
ec2_dmz_host=$my_ip | (StrOpt)the internal ip of the ec2 api server |
ec2_host=$my_ip | (StrOpt)the ip of the ec2 api server |
ec2_listen=0.0.0.0 | (StrOpt)IP address for EC2 API to listen |
ec2_listen_port=8773 | (IntOpt)port for ec2 api to listen |
ec2_path=/services/Cloud | (StrOpt)the path prefix used to call the ec2 api server |
ec2_port=8773 | (IntOpt)the port of the ec2 api server |
ec2_private_dns_show_ip=False | (BoolOpt)Return the IP address as private dns hostname in describe instances |
ec2_scheme=http | (StrOpt)the protocol to use when connecting to the ec2 api server (http, https) |
ec2_strict_validation=True | (BoolOpt)Validate security group names according to EC2 specification |
ec2_timestamp_expiry=300 | (IntOpt)Time in seconds before ec2 timestamp expires |
ec2_workers=None | (IntOpt)Number of workers for EC2 API service |
keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens | (StrOpt)URL to get token from ec2 request. |
lockout_attempts=5 | (IntOpt)Number of failed auths before lockout. |
lockout_minutes=15 | (IntOpt)Number of minutes to lockout if triggered. |
lockout_window=15 | (IntOpt)Number of minutes for lockout window. |
region_list=[] | (ListOpt)list of region=fqdn pairs separated by commas |
Configuration option=Default value | (Type) Description |
fping_path=/usr/sbin/fping | (StrOpt)Full path to fping. |
Configuration option=Default value | (Type) Description |
allowed_direct_url_schemes=[] | (ListOpt)A list of url scheme that can be downloaded directly via the direct_url. Currently supported schemes: [file]. |
glance_api_insecure=False | (BoolOpt)Allow to perform insecure SSL (https) requests to glance |
glance_api_servers=['$glance_host:$glance_port'] | (ListOpt)A list of the glance api servers available to nova. Prefix with https:// for ssl-based glance api servers. ([hostname|ip]:port) |
glance_host=$my_ip | (StrOpt)default glance hostname or ip |
glance_num_retries=0 | (IntOpt)Number retries when downloading an image from glance |
glance_port=9292 | (IntOpt)default glance port |
glance_protocol=http | (StrOpt)Default protocol to use when connecting to glance. Set to https for SSL. |
osapi_glance_link_prefix=None | (StrOpt)Base URL that will be presented to users in links to glance resources |
Configuration option=Default value | (Type) Description |
instances_path_share= | (StrOpt)The name of a Windows share name mapped to the "instances_path" dir and used by the resize feature to copy files to the target host. If left blank, an administrative share will be used, looking for the same "instances_path" used locally |
limit_cpu_features=False | (BoolOpt)Required for live migration among hosts with different CPU features |
qemu_img_cmd=qemu-img.exe | (StrOpt)qemu-img is used to convert between different image types |
vswitch_name=None | (StrOpt)External virtual switch Name, if not provided, the first external virtual switch is used |
Configuration option=Default value | (Type) Description |
block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC | (StrOpt)Migration flags to be set for block migration |
checksum_base_images=False | (BoolOpt)Write a checksum for files in _base to disk |
default_ephemeral_format=None | (StrOpt)The default format an ephemeral_volume will be formatted with on creation. |
disk_cachemodes=[] | (ListOpt)Specific cachemodes to use for different disk types e.g: ["file=directsync","block=none"] |
force_raw_images=True | (BoolOpt)Force backing images to raw format |
inject_password=True | (BoolOpt)Whether baremetal compute injects password or not |
libvirt_cpu_mode=None | (StrOpt)Set to "host-model" to clone the host CPU feature flags; to "host-passthrough" to use the host CPU model exactly; to "custom" to use a named CPU model; to "none" to not set any CPU model. If libvirt_type="kvm|qemu", it will default to "host-model", otherwise it will default to "none" |
libvirt_cpu_model=None | (StrOpt)Set to a named libvirt CPU model (see names listed in /usr/share/libvirt/cpu_map.xml). Only has effect if libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu" |
libvirt_disk_prefix=None | (StrOpt)Override the default disk prefix for the devices attached to a server, which is dependent on libvirt_type. (valid options are: sd, xvd, uvd, vd) |
libvirt_images_type=default | (StrOpt)VM Images format. Acceptable values are: raw, qcow2, lvm, default. If default is specified, then use_cow_images flag is used instead of this one. |
libvirt_images_volume_group=None | (StrOpt)LVM Volume Group that is used for VM images, when you specify libvirt_images_type=lvm. |
libvirt_inject_key=True | (BoolOpt)Inject the ssh public key at boot time |
libvirt_inject_partition=1 | (IntOpt)The partition to inject to : -2 => disable, -1 => inspect (libguestfs only), 0 => not partitioned, >0 => partition number |
libvirt_inject_password=False | (BoolOpt)Inject the admin password at boot time, without an agent. |
libvirt_lvm_snapshot_size=1000 | (IntOpt)The amount of storage (in megabytes) to allocate for LVM snapshot copy-on-write blocks. |
libvirt_nonblocking=True | (BoolOpt)Use a separated OS thread pool to realize non-blocking libvirt calls |
libvirt_snapshot_compression=False | (BoolOpt)Compress snapshot images when possible. This currently applies exclusively to qcow2 images |
libvirt_snapshots_directory=$instances_path/snapshots | (StrOpt)Location where libvirt driver will store snapshots before uploading them to image service |
libvirt_sparse_logical_volumes=False | (BoolOpt)Create sparse logical volumes (with virtualsize) if this flag is set to True. |
libvirt_type=kvm | (StrOpt)Libvirt domain type (valid options are: kvm, lxc, qemu, uml, xen) |
libvirt_uri= | (StrOpt)Override the default libvirt URI (which is dependent on libvirt_type) |
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver | (StrOpt)The libvirt VIF driver to configure the VIFs. |
libvirt_volume_drivers=['iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver', 'local=nova.virt.libvirt.volume.LibvirtVolumeDriver', 'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver', 'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver', 'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver', 'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver', 'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver', 'glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver', 'fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver', 'scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver'] | (ListOpt)Libvirt handlers for remote volumes. |
libvirt_wait_soft_reboot_seconds=120 | (IntOpt)Number of seconds to wait for instance to shut down after soft reboot request is made. We fall back to hard reboot if instance does not shutdown within this window. |
preallocate_images=none | (StrOpt)VM image preallocation mode: "none" => no storage provisioning is done up front, "space" => storage is fully allocated at instance start |
remove_unused_base_images=True | (BoolOpt)Should unused base images be removed? |
remove_unused_kernels=False | (BoolOpt)Should unused kernel images be removed? This is only safe to enable if all compute nodes have been updated to support this option. This will enabled by default in future. |
remove_unused_original_minimum_age_seconds=86400 | (IntOpt)Unused unresized base images younger than this will not be removed |
remove_unused_resized_minimum_age_seconds=3600 | (IntOpt)Unused resized base images younger than this will not be removed |
rescue_image_id=None | (StrOpt)Rescue ami image |
rescue_kernel_id=None | (StrOpt)Rescue aki image |
rescue_ramdisk_id=None | (StrOpt)Rescue ari image |
rescue_timeout=0 | (IntOpt)Automatically unrescue an instance after N seconds. Set to 0 to disable. |
snapshot_image_format=None | (StrOpt)Snapshot image format (valid options are : raw, qcow2, vmdk, vdi). Defaults to same as source image |
timeout_nbd=10 | (IntOpt)time to wait for a NBD device coming up |
use_cow_images=True | (BoolOpt)Whether to use cow images |
use_usb_tablet=True | (BoolOpt)Sync virtual and real mouse cursors in Windows VMs |
virt_mkfs=['default=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s'] | (MultiStrOpt)mkfs commands for ephemeral device. The format is <os_type>=<mkfs command> |
Configuration option=Default value | (Type) Description |
fixed_range_v6=fd00::/48 | (StrOpt)Fixed IPv6 address block |
gateway_v6=None | (StrOpt)Default IPv6 gateway |
ipv6_backend=rfc2462 | (StrOpt)Backend to use for IPv6 generation |
use_ipv6=False | (BoolOpt)use ipv6 |
Configuration option=Default value | (Type) Description |
kombu_ssl_ca_certs= | (StrOpt)SSL certification authority file (valid only if SSL enabled) |
kombu_ssl_certfile= | (StrOpt)SSL cert file (valid only if SSL enabled) |
kombu_ssl_keyfile= | (StrOpt)SSL key file (valid only if SSL enabled) |
kombu_ssl_version= | (StrOpt)SSL version to use (valid only if SSL enabled) |
Configuration option=Default value | (Type) Description |
ldap_dns_base_dn=ou=hosts,dc=example,dc=org | (StrOpt)Base DN for DNS entries in ldap |
ldap_dns_password=password | (StrOpt)password for ldap DNS |
ldap_dns_servers=['dns.example.org'] | (MultiStrOpt)DNS Servers for ldap dns driver |
ldap_dns_soa_expiry=86400 | (StrOpt)Expiry interval (in seconds) for ldap dns driver Statement of Authority |
[email protected] | (StrOpt)Hostmaster for ldap dns driver Statement of Authority |
ldap_dns_soa_minimum=7200 | (StrOpt)Minimum interval (in seconds) for ldap dns driver Statement of Authority |
ldap_dns_soa_refresh=1800 | (StrOpt)Refresh interval (in seconds) for ldap dns driver Statement of Authority |
ldap_dns_soa_retry=3600 | (StrOpt)Retry interval (in seconds) for ldap dns driver Statement of Authority |
ldap_dns_url=ldap://ldap.example.com:389 | (StrOpt)URL for ldap server which will store dns entries |
ldap_dns_user=uid=admin,ou=people,dc=example,dc=org | (StrOpt)user for ldap DNS |
Configuration option=Default value | (Type) Description |
live_migration_bandwidth=0 | (IntOpt)Maximum bandwidth to be used during migration, in Mbps |
live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER | (StrOpt)Migration flags to be set for live migration |
live_migration_retry_count=30 | (IntOpt)Number of 1 second retries needed in live_migration |
live_migration_uri=qemu+tcp://%s/system | (StrOpt)Migration target URI (any included "%s" is replaced with the migration target hostname) |
Configuration option=Default value | (Type) Description |
debug=False | (BoolOpt)Print debugging output (set logging level to DEBUG instead of default WARNING level). |
default_log_levels=['amqplib=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'suds=INFO', 'keystone=INFO', 'eventlet.wsgi.server=WARN'] | (ListOpt)list of logger=LEVEL pairs |
fatal_deprecations=False | (BoolOpt)make deprecations fatal |
fatal_exception_format_errors=False | (BoolOpt)make exception message format errors fatal |
instance_format=[instance: %(uuid)s] | (StrOpt)If an instance is passed with the log message, format it like this |
instance_uuid_format=[instance: %(uuid)s] | (StrOpt)If an instance UUID is passed with the log message, format it like this |
log_config=None | (StrOpt)If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files. |
log_date_format=%Y-%m-%d %H:%M:%S | (StrOpt)Format string for %%(asctime)s in log records. Default: %(default)s |
log_dir=None | (StrOpt)(Optional) The base directory used for relative --log-file paths |
log_file=None | (StrOpt)(Optional) Name of log file to output to. If no default is set, logging will go to stdout. |
log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s | (StrOpt)A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. Default: %(default)s |
logfile_mode=0644 | (StrOpt)Default file mode used when creating log files |
logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s | (StrOpt)format string to use for log messages with context |
logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d | (StrOpt)data to append to log format when level is DEBUG |
logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s | (StrOpt)format string to use for log messages without context |
logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s | (StrOpt)prefix each line of exception output with this format |
publish_errors=False | (BoolOpt)publish error events |
syslog_log_facility=LOG_USER | (StrOpt)syslog facility to receive log lines |
use_stderr=True | (BoolOpt)Log output to standard error |
use_syslog=False | (BoolOpt)Use syslog for logging. |
verbose=False | (BoolOpt)Print more verbose output (set logging level to INFO instead of default WARNING level). |
Configuration option=Default value | (Type) Description |
metadata_host=$my_ip | (StrOpt)the ip for the metadata api server |
metadata_listen=0.0.0.0 | (StrOpt)IP address for metadata api to listen |
metadata_listen_port=8775 | (IntOpt)port for metadata api to listen |
metadata_manager=nova.api.manager.MetadataManager | (StrOpt)OpenStack metadata service manager |
metadata_port=8775 | (IntOpt)the port for the metadata api port |
metadata_workers=None | (IntOpt)Number of workers for metadata service |
Configuration option=Default value | (Type) Description |
allow_same_net_traffic=True | (BoolOpt)Whether to allow network traffic from same network |
auto_assign_floating_ip=False | (BoolOpt)Autoassigning floating ip to VM |
cnt_vpn_clients=0 | (IntOpt)Number of addresses reserved for vpn clients |
create_unique_mac_address_attempts=5 | (IntOpt)Number of attempts to create unique mac address |
default_access_ip_network_name=None | (StrOpt)Name of network to use to set access ips for instances |
default_floating_pool=nova | (StrOpt)Default pool for floating ips |
defer_iptables_apply=False | (BoolOpt)Whether to batch up the application of IPTables rules during a host restart and apply all at the end of the init phase |
dhcp_domain=novalocal | (StrOpt)domain to use for building the hostnames |
dhcp_lease_time=120 | (IntOpt)Lifetime of a DHCP lease in seconds |
dhcpbridge=$bindir/nova-dhcpbridge | (StrOpt)location of nova-dhcpbridge |
dhcpbridge_flagfile=['/etc/nova/nova-dhcpbridge.conf'] | (MultiStrOpt)location of flagfiles for dhcpbridge |
dns_server=[] | (MultiStrOpt)if set, uses specific dns server for dnsmasq. Canbe specified multiple times. |
dns_update_periodic_interval=-1 | (IntOpt)Number of seconds to wait between runs of updates to DNS entries. |
dnsmasq_config_file= | (StrOpt)Override the default dnsmasq settings with this file |
firewall_driver=None | (StrOpt)Firewall driver (defaults to hypervisor specific iptables driver) |
fixed_ip_disassociate_timeout=600 | (IntOpt)Seconds after which a deallocated ip is disassociated |
fixed_range=10.0.0.0/8 | (StrOpt)DEPRECATED - Fixed IP address block.If set to an empty string, the subnet range(s) will be automatically determined and configured. |
flat_injected=False | (BoolOpt)Whether to attempt to inject network setup into guest |
flat_interface=None | (StrOpt)FlatDhcp will bridge into this interface if set |
flat_network_bridge=None | (StrOpt)Bridge for simple network instances |
flat_network_dns=8.8.4.4 | (StrOpt)Dns for simple network |
floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver | (StrOpt)full class name for the DNS Manager for floating IPs |
force_dhcp_release=False | (BoolOpt)If True, send a dhcp release on instance termination |
force_snat_range=[] | (MultiStrOpt)Traffic to this range will always be snatted to the fallback ip, even if it would normally be bridged out of the node. Can be specified multiple times. |
forward_bridge_interface=['all'] | (MultiStrOpt)An interface that bridges can forward to. If this is set to all then all traffic will be forwarded. Can be specified multiple times. |
gateway=None | (StrOpt)Default IPv4 gateway |
injected_network_template=$pybasedir/nova/virt/interfaces.template | (StrOpt)Template file for injected network |
injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template | (StrOpt)Template file for injected network |
injected_network_template=$pybasedir/nova/virt/interfaces.template | (StrOpt)Template file for injected network |
injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template | (StrOpt)Template file for injected network |
instance_dns_domain= | (StrOpt)full class name for the DNS Zone for instance IPs |
instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver | (StrOpt)full class name for the DNS Manager for instance IPs |
iptables_bottom_regex= | (StrOpt)Regular expression to match iptables rule that shouldalways be on the bottom. |
iptables_drop_action=DROP | (StrOpt)The table that iptables to jump to when a packet is to be dropped. |
iptables_top_regex= | (StrOpt)Regular expression to match iptables rule that shouldalways be on the top. |
l3_lib=nova.network.l3.LinuxNetL3 | (StrOpt)Indicates underlying L3 management library |
linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver | (StrOpt)Driver used to create ethernet devices. |
linuxnet_ovs_integration_bridge=br-int | (StrOpt)Name of Open vSwitch bridge used with linuxnet |
multi_host=False | (BoolOpt)Default value for multi_host in networks. Also, if set, some rpc network calls will be sent directly to host. |
network_api_class=nova.network.api.API | (StrOpt)The full class name of the network API class to use |
network_device_mtu=None | (StrOpt)MTU setting for vlan |
network_driver=nova.network.linux_net | (StrOpt)Driver to use for network creation |
network_manager=nova.network.manager.VlanManager | (StrOpt)full class name for the Manager for network |
network_size=256 | (IntOpt)Number of addresses in each private subnet |
network_topic=network | (StrOpt)the topic network nodes listen on |
networks_path=$state_path/networks | (StrOpt)Location to keep network config files |
num_networks=1 | (IntOpt)Number of networks to support |
public_interface=eth0 | (StrOpt)Interface for public IP addresses |
routing_source_ip=$my_ip | (StrOpt)Public IP of network host |
security_group_api=nova | (StrOpt)The full class name of the security API class |
security_group_handler=nova.network.sg.NullSecurityGroupHandler | (StrOpt)The full class name of the security group handler class |
send_arp_for_ha=False | (BoolOpt)send gratuitous ARPs for HA setup |
send_arp_for_ha_count=3 | (IntOpt)send this many gratuitous ARPs for HA setup |
service_quantum_metadata_proxy=False | (BoolOpt)Set flag to indicate Quantum will proxy metadata requests and resolve instance ids. |
share_dhcp_address=False | (BoolOpt)If True in multi_host mode, all compute hosts share the same dhcp address. |
teardown_unused_network_gateway=False | (BoolOpt)If True, unused gateway devices (VLAN and bridge) are deleted in VLAN network mode with multi hosted networks |
update_dns_entries=False | (BoolOpt)If True, when a DNS entry must be updated, it sends a fanout cast to all network hosts to update their DNS entries in multi host mode |
use_network_dns_servers=False | (BoolOpt)if set, uses the dns1 and dns2 from the network ref.as dns servers. |
use_quantum_default_nets=False | (StrOpt)Control for checking for default networks |
use_single_default_gateway=False | (BoolOpt)Use single default gateway. Only first nic of vm will get default gateway from dhcp server |
vlan_interface=None | (StrOpt)vlans will bridge into this interface if set |
vlan_start=100 | (IntOpt)First VLAN for private networks |
Configuration option=Default value | (Type) Description |
periodic_enable=True | (BoolOpt)enable periodic tasks |
periodic_fuzzy_delay=60 | (IntOpt)range of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0) |
run_external_periodic_tasks=True | (BoolOpt)Some periodic tasks can be run in a separate process. Should we run them here? |
Configuration option=Default value | (Type) Description |
allow_instance_snapshots=True | (BoolOpt)Permit instance snapshot operations. |
allow_migrate_to_same_host=False | (BoolOpt)Allow migrate machine to the same host. Useful when testing in single-host environments. |
allow_resize_to_same_host=False | (BoolOpt)Allow destination machine to match source for resize. Useful when testing in single-host environments. |
max_age=0 | (IntOpt)number of seconds between subsequent usage refreshes |
osapi_compute_unique_server_name_scope= | (StrOpt)When set, compute API will consider duplicate hostnames invalid within the specified scope, regardless of case. Should be empty, "project" or "global". |
osapi_max_limit=1000 | (IntOpt)the maximum number of items returned in a single response from a collection resource |
osapi_max_request_body_size=114688 | (IntOpt)the maximum body size per each osapi request(bytes) |
password_length=12 | (IntOpt)Length of generated instance admin passwords |
policy_default_rule=default | (StrOpt)Rule checked when requested rule is not found |
policy_file=policy.json | (StrOpt)JSON file representing policy |
reservation_expire=86400 | (IntOpt)number of seconds until a reservation expires |
until_refresh=0 | (IntOpt)count of reservations until usage is refreshed |
Configuration option=Default value | (Type) Description |
powervm_img_local_path=/tmp | (StrOpt)Local directory to download glance images to. Make sure this path can fit your biggest image in glance |
powervm_img_remote_path=/home/padmin | (StrOpt)PowerVM image remote path where images will be moved. Make sure this path can fit your biggest image in glance |
powervm_mgr=None | (StrOpt)PowerVM manager host or ip |
powervm_mgr_passwd=None | (StrOpt)PowerVM manager user password |
powervm_mgr_type=ivm | (StrOpt)PowerVM manager type (ivm, hmc) |
powervm_mgr_user=None | (StrOpt)PowerVM manager user name |
Configuration option=Default value | (Type) Description |
qpid_heartbeat=60 | (IntOpt)Seconds between connection keepalive heartbeats |
qpid_hostname=localhost | (StrOpt)Qpid broker hostname |
qpid_hosts=['$qpid_hostname:$qpid_port'] | (ListOpt)Qpid HA cluster host:port pairs |
qpid_password= | (StrOpt)Password for qpid connection |
qpid_port=5672 | (IntOpt)Qpid broker port |
qpid_protocol=tcp | (StrOpt)Transport to use, either 'tcp' or 'ssl' |
qpid_sasl_mechanisms= | (StrOpt)Space separated list of SASL mechanisms to use for auth |
qpid_tcp_nodelay=True | (BoolOpt)Disable Nagle algorithm |
qpid_username= | (StrOpt)Username for qpid connection |
Configuration option=Default value | (Type) Description |
quantum_admin_auth_url=http://localhost:5000/v2.0 | (StrOpt)auth url for connecting to quantum in admin context |
quantum_admin_password=None | (StrOpt)password for connecting to quantum in admin context |
quantum_admin_tenant_name=None | (StrOpt)tenant name for connecting to quantum in admin context |
quantum_admin_username=None | (StrOpt)username for connecting to quantum in admin context |
quantum_api_insecure=False | (BoolOpt)if set, ignore any SSL validation issues |
quantum_auth_strategy=keystone | (StrOpt)auth strategy for connecting to quantum in admin context |
quantum_default_tenant_id=default | (StrOpt)Default tenant id when creating quantum networks |
quantum_extension_sync_interval=600 | (IntOpt)Number of seconds before querying quantum for extensions |
quantum_metadata_proxy_shared_secret= | (StrOpt)Shared secret to validate proxies Quantum metadata requests |
quantum_ovs_bridge=br-int | (StrOpt)Name of Integration Bridge used by Open vSwitch |
quantum_region_name=None | (StrOpt)region name for connecting to quantum in admin context |
quantum_url=http://127.0.0.1:9696 | (StrOpt)URL for connecting to quantum |
quantum_url_timeout=30 | (IntOpt)timeout value for connecting to quantum in seconds |
Configuration option=Default value | (Type) Description |
bandwidth_poll_interval=600 | (IntOpt)interval to pull bandwidth usage info |
enable_network_quota=False | (BoolOpt)Enables or disables quotaing of tenant networks |
quota_cores=20 | (IntOpt)number of instance cores allowed per project |
quota_driver=nova.quota.DbQuotaDriver | (StrOpt)default driver to use for quota checks |
quota_fixed_ips=-1 | (IntOpt)number of fixed ips allowed per project (this should be at least the number of instances allowed) |
quota_floating_ips=10 | (IntOpt)number of floating ips allowed per project |
quota_injected_file_content_bytes=10240 | (IntOpt)number of bytes allowed per injected file |
quota_injected_file_path_bytes=255 | (IntOpt)number of bytes allowed per injected file path |
quota_injected_files=5 | (IntOpt)number of injected files allowed |
quota_instances=10 | (IntOpt)number of instances allowed per project |
quota_key_pairs=100 | (IntOpt)number of key pairs per user |
quota_metadata_items=128 | (IntOpt)number of metadata items allowed per instance |
quota_ram=51200 | (IntOpt)megabytes of instance ram allowed per project |
quota_security_group_rules=20 | (IntOpt)number of security rules per security group |
quota_security_groups=10 | (IntOpt)number of security groups per project |
Configuration option=Default value | (Type) Description |
rabbit_durable_queues=False | (BoolOpt)use durable queues in RabbitMQ |
rabbit_ha_queues=False | (BoolOpt)use H/A queues in RabbitMQ (x-ha-policy: all).You need to wipe RabbitMQ database when changing this option. |
rabbit_host=localhost | (StrOpt)The RabbitMQ broker address where a single node is used |
rabbit_hosts=['$rabbit_host:$rabbit_port'] | (ListOpt)RabbitMQ HA cluster host:port pairs |
rabbit_max_retries=0 | (IntOpt)maximum retries with trying to connect to RabbitMQ (the default of 0 implies an infinite retry count) |
rabbit_password=guest | (StrOpt)the RabbitMQ password |
rabbit_port=5672 | (IntOpt)The RabbitMQ broker port where a single node is used |
rabbit_retry_backoff=2 | (IntOpt)how long to backoff for between retries when connecting to RabbitMQ |
rabbit_retry_interval=1 | (IntOpt)how frequently to retry connecting with RabbitMQ |
rabbit_use_ssl=False | (BoolOpt)connect over SSL for RabbitMQ |
rabbit_userid=guest | (StrOpt)the RabbitMQ userid |
rabbit_virtual_host=/ | (StrOpt)the RabbitMQ virtual host |
Configuration option=Default value | (Type) Description |
amqp_rpc_single_reply_queue=False | (BoolOpt)Enable a fast single reply queue if using AMQP based RPC like RabbitMQ or Qpid. |
control_exchange=openstack | (StrOpt)AMQP exchange to connect to if using RabbitMQ or Qpid |
matchmaker_heartbeat_freq=300 | (IntOpt)Heartbeat frequency |
matchmaker_heartbeat_ttl=600 | (IntOpt)Heartbeat time-to-live. |
matchmaker_ringfile=/etc/nova/matchmaker_ring.json | (StrOpt)Matchmaker ring file (JSON) |
rpc_backend=nova.openstack.common.rpc.impl_kombu | (StrOpt)The messaging module to use, defaults to kombu. |
rpc_cast_timeout=30 | (IntOpt)Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. |
rpc_conn_pool_size=30 | (IntOpt)Size of RPC connection pool |
rpc_driver_queue_base=cells.intercell | (StrOpt)Base queue name to use when communicating between cells. Various topics by message type will be appended to this. |
rpc_response_timeout=60 | (IntOpt)Seconds to wait for a response from call or multicall |
rpc_thread_pool_size=64 | (IntOpt)Size of RPC thread pool |
Configuration option=Default value | (Type) Description |
buckets_path=$state_path/buckets | (StrOpt)path to s3 buckets |
image_decryption_dir=/tmp | (StrOpt)parent dir for tempdir used for image decryption |
s3_access_key=notchecked | (StrOpt)access key to use for s3 server for images |
s3_affix_tenant=False | (BoolOpt)whether to affix the tenant id to the access key when downloading from s3 |
s3_host=$my_ip | (StrOpt)hostname or ip for openstack to use when accessing the s3 api |
s3_listen=0.0.0.0 | (StrOpt)IP address for S3 API to listen |
s3_listen_port=3333 | (IntOpt)port for s3 api to listen |
s3_port=3333 | (IntOpt)port used when accessing the s3 api |
s3_secret_key=notchecked | (StrOpt)secret key to use for s3 server for images |
s3_use_ssl=False | (BoolOpt)whether to use ssl when talking to s3 |
Configuration option=Default value | (Type) Description |
cpu_allocation_ratio=16.0 | (FloatOpt)Virtual CPU to Physical CPU allocation ratio |
disk_allocation_ratio=1.0 | (FloatOpt)virtual disk to physical disk allocation ratio |
isolated_hosts=[] | (ListOpt)Host reserved for specific images |
isolated_images=[] | (ListOpt)Images to run on isolated host |
max_instances_per_host=50 | (IntOpt)Ignore hosts that have too many instances |
max_io_ops_per_host=8 | (IntOpt)Ignore hosts that have too many builds/resizes/snaps/migrations |
ram_allocation_ratio=1.5 | (FloatOpt)virtual ram to physical ram allocation ratio |
ram_weight_multiplier=1.0 | (FloatOpt)Multiplier used for weighing ram. Negative numbers mean to stack vs spread. |
reserved_host_disk_mb=0 | (IntOpt)Amount of disk in MB to reserve for the host |
reserved_host_memory_mb=512 | (IntOpt)Amount of memory in MB to reserve for the host |
scheduler_available_filters=['nova.scheduler.filters.all_filters'] | (MultiStrOpt)Filter classes available to the scheduler which may be specified more than once. An entry of "nova.scheduler.filters.standard_filters" maps to all filters included with nova. |
scheduler_default_filters=['RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter'] | (ListOpt)Which filter class names to use for filtering hosts when not specified in the request. |
scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler | (StrOpt)Default driver to use for the scheduler |
scheduler_host_manager=nova.scheduler.host_manager.HostManager | (StrOpt)The scheduler host manager class to use |
scheduler_host_subset_size=1 | (IntOpt)New instances will be scheduled on a host chosen randomly from a subset of the N best hosts. This property defines the subset size that a host is chosen from. A value of 1 chooses the first host returned by the weighing functions. This value must be at least 1. Any value less than 1 will be ignored, and 1 will be used instead |
scheduler_json_config_location= | (StrOpt)Absolute path to scheduler configuration JSON file. |
scheduler_manager=nova.scheduler.manager.SchedulerManager | (StrOpt)full class name for the Manager for scheduler |
scheduler_max_attempts=3 | (IntOpt)Maximum number of attempts to schedule an instance |
scheduler_retries=10 | (IntOpt)How many retries when no cells are available. |
scheduler_retry_delay=2 | (IntOpt)How often to retry in seconds when no cells are available. |
scheduler_topic=scheduler | (StrOpt)the topic scheduler nodes listen on |
scheduler_weight_classes=['nova.scheduler.weights.all_weighers'] | (ListOpt)Which weight class names to use for weighing hosts |
Configuration option=Default value | (Type) Description |
agent_enabled=True | (BoolOpt)enable spice guest agent support |
enabled=False | (BoolOpt)enable spice related features |
html5proxy_base_url=http://127.0.0.1:6080/spice_auto.html | (StrOpt)location of spice html5 console proxy, in the form "http://127.0.0.1:6080/spice_auto.html" |
keymap=en-us | (StrOpt)keymap for spice |
server_listen=127.0.0.1 | (StrOpt)IP address on which instance spice server should listen |
server_proxyclient_address=127.0.0.1 | (StrOpt)the address to which proxy clients (like nova-spicehtml5proxy) should connect |
Configuration option=Default value | (Type) Description |
allowed_rpc_exception_modules=['nova.openstack.common.exception', 'nova.exception', 'cinder.exception', 'exceptions'] | (ListOpt)Modules of exceptions that are permitted to be recreatedupon receiving exception data from an rpc call. |
backdoor_port=None | (IntOpt)port for eventlet backdoor to listen |
fake_call=False | (BoolOpt)If True, skip using the queue and make local calls |
fake_network=False | (BoolOpt)If passed, use fake network devices and addresses |
fake_rabbit=False | (BoolOpt)If passed, use a fake RabbitMQ provider |
monkey_patch=False | (BoolOpt)Whether to log monkey patching |
monkey_patch_modules=['nova.api.ec2.cloud:nova.openstack.common.notifier.api.notify_decorator', 'nova.compute.api:nova.openstack.common.notifier.api.notify_decorator'] | (ListOpt)List of modules/decorators to monkey patch |
Configuration option=Default value | (Type) Description |
tile_pdu_ip=10.0.100.1 | (StrOpt)ip address of tilera pdu |
tile_pdu_mgr=/tftpboot/pdu_mgr | (StrOpt)management script for tilera pdu |
tile_pdu_off=2 | (IntOpt)power status of tilera PDU is OFF |
tile_pdu_on=1 | (IntOpt)power status of tilera PDU is ON |
tile_pdu_status=9 | (IntOpt)power status of tilera PDU |
tile_power_wait=9 | (IntOpt)wait time in seconds until check the result after tilera power operations |
Configuration option=Default value | (Type) Description |
attestation_api_url=/OpenAttestationWebServices/V1.0 | (StrOpt)attestation web API URL |
attestation_auth_blob=None | (StrOpt)attestation authorization blob - must change |
attestation_auth_timeout=60 | (IntOpt)Attestation status cache valid period length |
attestation_port=8443 | (StrOpt)attestation server port |
attestation_server=None | (StrOpt)attestation server http |
attestation_server_ca_file=None | (StrOpt)attestation server Cert file for Identity verification |
Configuration option=Default value | (Type) Description |
integration_bridge=br-int | (StrOpt)Name of Integration Bridge |
use_linked_clone=True | (BoolOpt)Whether to use linked clone |
vmwareapi_api_retry_count=10 | (IntOpt)The number of times we retry on failures, e.g., socket error, etc. Used only if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver. |
vmwareapi_cluster_name=None | (StrOpt)Name of a VMware Cluster ComputeResource. Used only if compute_driver is vmwareapi.VMwareVCDriver. |
vmwareapi_host_ip=None | (StrOpt)URL for connection to VMware ESX/VC host. Required if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver. |
vmwareapi_host_password=None | (StrOpt)Password for connection to VMware ESX/VC host. Used only if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver. |
vmwareapi_host_username=None | (StrOpt)Username for connection to VMware ESX/VC host. Used only if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver. |
vmwareapi_task_poll_interval=5.0 | (FloatOpt)The interval used for polling of remote tasks. Used only if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver. |
vmwareapi_vlan_interface=vmnic0 | (StrOpt)Physical ethernet adapter name for vlan networking |
vmwareapi_wsdl_loc=None | (StrOpt)VIM Service WSDL Location e.g http://<server>/vimService.wsdl. Due to a bug in vSphere ESX 4.1 default wsdl. Refer readme-vmware to setup |
Configuration option=Default value | (Type) Description |
novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html | (StrOpt)location of vnc console proxy, in the form "http://127.0.0.1:6080/vnc_auto.html" |
vnc_enabled=True | (BoolOpt)enable vnc related features |
vnc_keymap=en-us | (StrOpt)keymap for vnc |
vnc_password=None | (StrOpt)VNC password |
vnc_port=5900 | (IntOpt)VNC starting port |
vnc_port_total=10000 | (IntOpt)Total number of VNC ports |
vncserver_listen=127.0.0.1 | (StrOpt)IP address on which instance vncservers should listen |
vncserver_proxyclient_address=127.0.0.1 | (StrOpt)the address to which proxy clients (like nova-xvpvncproxy) should connect |
Configuration option=Default value | (Type) Description |
block_device_creation_timeout=10 | (IntOpt)Time to wait for a block device to be created |
cinder_api_insecure=False | (BoolOpt)Allow to perform insecure SSL requests to cinder |
cinder_catalog_info=volume:cinder:publicURL | (StrOpt)Info to match when looking for cinder in the service catalog. Format is : separated values of the form: <service_type>:<service_name>:<endpoint_type> |
cinder_cross_az_attach=True | (BoolOpt)Allow attach between instance and volume in different availability zones. |
cinder_endpoint_template=None | (StrOpt)Override service catalog lookup with template for cinder endpoint e.g. http://localhost:8776/v1/%(project_id)s |
cinder_http_retries=3 | (IntOpt)Number of cinderclient retries on failed http calls |
force_volumeutils_v1=False | (BoolOpt)Force volumeutils v1 |
iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal | (StrOpt)iSCSI IQN prefix used in baremetal volume connections. |
os_region_name=None | (StrOpt)region name of this node |
volume_api_class=nova.volume.cinder.API | (StrOpt)The full class name of the volume API class to use |
volume_attach_retry_count=10 | (IntOpt)The number of times to retry to attach a volume |
volume_attach_retry_interval=5 | (IntOpt)Interval between volume attachment attempts, in seconds |
volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver | (StrOpt)Baremetal volume driver. |
volume_usage_poll_interval=0 | (IntOpt)Interval in seconds for gathering volume usages |
Configuration option=Default value | (Type) Description |
boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template | (StrOpt)Template for cloudpipe instance boot script |
dmz_cidr=[] | (ListOpt)A list of dmz range that should be accepted |
dmz_mask=255.255.255.0 | (StrOpt)Netmask to push into openvpn config |
dmz_net=10.0.0.0 | (StrOpt)Network to push into openvpn config |
vpn_image_id=0 | (StrOpt)image id used when starting up a cloudpipe vpn server |
vpn_instance_type=m1.tiny | (StrOpt)Instance type for vpn instances |
vpn_ip=$my_ip | (StrOpt)Public IP for the cloudpipe VPN servers |
vpn_key_suffix=-vpn | (StrOpt)Suffix to add to project name for vpn key and secgroups |
vpn_start=1000 | (IntOpt)First Vpn port for private networks |
Configuration option=Default value | (Type) Description |
api_paste_config=api-paste.ini | (StrOpt)File name for the paste.deploy config for nova-api |
ssl_ca_file=None | (StrOpt)CA certificate file to use to verify connecting clients |
ssl_cert_file=None | (StrOpt)SSL certificate of API server |
ssl_key_file=None | (StrOpt)SSL private key of API server |
tcp_keepidle=600 | (IntOpt)Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X. |
wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f | (StrOpt)A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds. |
Configuration option=Default value | (Type) Description |
agent_resetnetwork_timeout=60 | (IntOpt)number of seconds to wait for agent reply to resetnetwork request |
agent_timeout=30 | (IntOpt)number of seconds to wait for agent reply |
agent_version_timeout=300 | (IntOpt)number of seconds to wait for agent to be fully operational |
cache_images=all | (StrOpt)Cache glance images locally. `all` will cache all images, `some` will only cache images that have the image_property `cache_in_nova=True`, and `none` turns off caching entirely |
console_driver=nova.console.xvp.XVPConsoleProxy | (StrOpt)Driver to use for the console proxy |
console_vmrc_error_retries=10 | (IntOpt)number of retries for retrieving VMRC information |
console_vmrc_port=443 | (IntOpt)port for VMware VMRC connections |
console_xvp_conf=/etc/xvp.conf | (StrOpt)generated XVP conf file |
console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template | (StrOpt)XVP conf template |
console_xvp_log=/var/log/xvp.log | (StrOpt)XVP log file |
console_xvp_multiplex_port=5900 | (IntOpt)port for XVP to multiplex VNC connections on |
console_xvp_pid=/var/run/xvp.pid | (StrOpt)XVP master process pid file |
default_os_type=linux | (StrOpt)Default OS type |
iqn_prefix=iqn.2010-10.org.openstack | (StrOpt)IQN Prefix |
max_kernel_ramdisk_size=16777216 | (IntOpt)Maximum size in bytes of kernel or ramdisk images |
sr_matching_filter=other-config:i18n-key=local-storage | (StrOpt)Filter for finding the SR to be used to install guest instances on. The default value is the Local Storage in default XenServer/XCP installations. To select an SR with a different matching criteria, you could set it to other-config:my_favorite_sr=true. On the other hand, to fall back on the Default SR, as displayed by XenCenter, set this flag to: default-sr:true |
stub_compute=False | (BoolOpt)Stub calls to compute worker for tests |
target_host=None | (StrOpt)iSCSI Target Host |
target_port=3260 | (StrOpt)iSCSI Target Port, 3260 Default |
use_join_force=True | (BoolOpt)To use for hosts with different CPUs |
xen_hvmloader_path=/usr/lib/xen/boot/hvmloader | (StrOpt)Location where the Xen hvmloader is kept |
xenapi_agent_path=usr/sbin/xe-update-networking | (StrOpt)Specifies the path in which the xenapi guest agent should be located. If the agent is present, network configuration is not injected into the image. Used if compute_driver=xenapi.XenAPIDriver and flat_injected=True |
xenapi_check_host=True | (BoolOpt)Ensure compute service is running on host XenAPI connects to. |
xenapi_connection_concurrent=5 | (IntOpt)Maximum number of concurrent XenAPI connections. Used only if compute_driver=xenapi.XenAPIDriver |
xenapi_connection_password=None | (StrOpt)Password for connection to XenServer/Xen Cloud Platform. Used only if compute_driver=xenapi.XenAPIDriver |
xenapi_connection_url=None | (StrOpt)URL for connection to XenServer/Xen Cloud Platform. Required if compute_driver=xenapi.XenAPIDriver |
xenapi_connection_username=root | (StrOpt)Username for connection to XenServer/Xen Cloud Platform. Used only if compute_driver=xenapi.XenAPIDriver |
xenapi_disable_agent=False | (BoolOpt)Disable XenAPI agent. Reduces the amount of time it takes nova to detect that a VM has started, when that VM does not have the agent installed |
xenapi_image_upload_handler=nova.virt.xenapi.imageupload.glance.GlanceStore | (StrOpt)Object Store Driver used to handle image uploads. |
xenapi_login_timeout=10 | (IntOpt)Timeout in seconds for XenAPI login. |
xenapi_num_vbd_unplug_retries=10 | (IntOpt)Maximum number of retries to unplug VBD |
xenapi_ovs_integration_bridge=xapi1 | (StrOpt)Name of Integration Bridge used by Open vSwitch |
xenapi_remap_vbd_dev=False | (BoolOpt)Used to enable the remapping of VBD dev (Works around an issue in Ubuntu Maverick) |
xenapi_remap_vbd_dev_prefix=sd | (StrOpt)Specify prefix to remap VBD dev to (ex. /dev/xvdb -> /dev/sdb) |
xenapi_running_timeout=60 | (IntOpt)number of seconds to wait for instance to go to running state |
xenapi_sparse_copy=True | (BoolOpt)Whether to use sparse_copy for copying data on a resize down (False will use standard dd). This speeds up resizes down considerably since large runs of zeros won't have to be rsynced |
xenapi_sr_base_path=/var/run/sr-mount | (StrOpt)Base path to the storage repository |
xenapi_torrent_base_url=None | (StrOpt)Base URL for torrent files. |
xenapi_torrent_download_stall_cutoff=600 | (IntOpt)Number of seconds a download can remain at the same progress percentage w/o being considered a stall |
xenapi_torrent_images=none | (StrOpt)Whether or not to download images via Bit Torrent (all|some|none). |
xenapi_torrent_listen_port_end=6891 | (IntOpt)End of port range to listen on |
xenapi_torrent_listen_port_start=6881 | (IntOpt)Beginning of port range to listen on |
xenapi_torrent_max_last_accessed=86400 | (IntOpt)Cached torrent files not accessed within this number of seconds can be reaped |
xenapi_torrent_max_seeder_processes_per_host=1 | (IntOpt)Maximum number of seeder processes to run concurrently within a given dom0. (-1 = no limit) |
xenapi_torrent_seed_chance=1.0 | (FloatOpt)Probability that peer will become a seeder. (1.0 = 100%) |
xenapi_torrent_seed_duration=3600 | (IntOpt)Number of seconds after downloading an image via BitTorrent that it should be seeded for other peers. |
xenapi_vhd_coalesce_max_attempts=5 | (IntOpt)Max number of times to poll for VHD to coalesce. Used only if compute_driver=xenapi.XenAPIDriver |
xenapi_vhd_coalesce_poll_interval=5.0 | (FloatOpt)The interval used for polling of coalescing vhds. Used only if compute_driver=xenapi.XenAPIDriver |
xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver | (StrOpt)The XenAPI VIF driver using XenServer Network APIs. |
Configuration option=Default value | (Type) Description |
xvpvncproxy_base_url=http://127.0.0.1:6081/console | (StrOpt)location of nova xvp vnc console proxy, in the form "http://127.0.0.1:6081/console" |
xvpvncproxy_host=0.0.0.0 | (StrOpt)Address that the XCP VNC proxy should bind to |
xvpvncproxy_port=6081 | (IntOpt)Port that the XCP VNC proxy should bind to |
Configuration option=Default value | (Type) Description |
rpc_zmq_bind_address=* | (StrOpt)ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address. |
rpc_zmq_contexts=1 | (IntOpt)Number of ZeroMQ contexts, defaults to 1 |
rpc_zmq_host=usagi | (StrOpt)Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova. |
rpc_zmq_ipc_dir=/var/run/openstack | (StrOpt)Directory for holding IPC sockets |
rpc_zmq_matchmaker=nova.openstack.common.rpc.matchmaker.MatchMakerLocalhost | (StrOpt)MatchMaker driver |
rpc_zmq_port=9501 | (IntOpt)ZeroMQ receiver listening port |
rpc_zmq_topic_backlog=None | (IntOpt)Maximum number of ingress messages to locally buffer per topic. Default is unlimited. |
Configuration option=Default value | (Type) Description |
address=None | (StrOpt)The ZooKeeper addresses for servicegroup service in the format of host1:port,host2:port,host3:port |
recv_timeout=4000 | (IntOpt)recv_timeout parameter for the zk session |
sg_prefix=/servicegroups | (StrOpt)The prefix used in ZooKeeper to store ephemeral nodes |
sg_retry_interval=5 | (IntOpt)Number of seconds to wait until retrying to join the session |