text
stringlengths
52
3.87M
def job_status(name=None): ''' Return the current status, enabled or disabled, of the job. :param name: The name of the job to return status for :return: Return true if enabled or false if disabled. CLI Example: .. code-block:: bash salt '*' jenkins.job_status jobname ''' if not name: raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): raise CommandExecutionError('Job \'{0}\' does not exist'.format(name)) return server.get_job_info('empty')['buildable']
def get_job_config(name=None): ''' Return the current job configuration for the provided job. :param name: The name of the job to return the configuration for. :return: The configuration for the job specified. CLI Example: .. code-block:: bash salt '*' jenkins.get_job_config jobname ''' if not name: raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): raise CommandExecutionError('Job \'{0}\' does not exist'.format(name)) job_info = server.get_job_config(name) return job_info
def plugin_installed(name): ''' .. versionadded:: 2016.11.0 Return if the plugin is installed for the provided plugin name. :param name: The name of the parameter to confirm installation. :return: True if plugin exists, False if plugin does not exist. CLI Example: .. code-block:: bash salt '*' jenkins.plugin_installed pluginName ''' server = _connect() plugins = server.get_plugins() exists = [plugin for plugin in plugins.keys() if name in plugin] if exists: return True else: return False
def record_set_create_or_update(name, zone_name, resource_group, record_type, **kwargs): ''' .. versionadded:: Fluorine Creates or updates a record set within a DNS zone. :param name: The name of the record set, relative to the name of the zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param record_type: The type of DNS record in this record set. Record sets of type SOA can be updated but not created (they are created when the DNS zone is created). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' CLI Example: .. code-block:: bash salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A arecords='[{ipv4_address: 10.0.0.1}]' ttl=300 ''' dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: record_set_model = __utils__['azurearm.create_object_model']('dns', 'RecordSet', **kwargs) except TypeError as exc: result = {'error': 'The object model could not be built. ({0})'.format(str(exc))} return result try: record_set = dnsconn.record_sets.create_or_update( relative_record_set_name=name, zone_name=zone_name, resource_group_name=resource_group, record_type=record_type, parameters=record_set_model, if_match=kwargs.get('if_match'), if_none_match=kwargs.get('if_none_match') ) result = record_set.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} except SerializationError as exc: result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))} return result
def record_set_delete(name, zone_name, resource_group, record_type, **kwargs): ''' .. versionadded:: Fluorine Deletes a record set from a DNS zone. This operation cannot be undone. :param name: The name of the record set, relative to the name of the zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param record_type: The type of DNS record in this record set. Record sets of type SOA cannot be deleted (they are deleted when the DNS zone is deleted). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' CLI Example: .. code-block:: bash salt-call azurearm_dns.record_set_delete myhost myzone testgroup A ''' result = False dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: record_set = dnsconn.record_sets.delete( relative_record_set_name=name, zone_name=zone_name, resource_group_name=resource_group, record_type=record_type, if_match=kwargs.get('if_match') ) result = True except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) return result
def record_set_get(name, zone_name, resource_group, record_type, **kwargs): ''' .. versionadded:: Fluorine Get a dictionary representing a record set's properties. :param name: The name of the record set, relative to the name of the zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param record_type: The type of DNS record in this record set. Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' CLI Example: .. code-block:: bash salt-call azurearm_dns.record_set_get '@' myzone testgroup SOA ''' dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: record_set = dnsconn.record_sets.get( relative_record_set_name=name, zone_name=zone_name, resource_group_name=resource_group, record_type=record_type ) result = record_set.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} return result
def record_sets_list_by_type(zone_name, resource_group, record_type, top=None, recordsetnamesuffix=None, **kwargs): ''' .. versionadded:: Fluorine Lists the record sets of a specified type in a DNS zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param record_type: The type of record sets to enumerate. Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' :param top: The maximum number of record sets to return. If not specified, returns up to 100 record sets. :param recordsetnamesuffix: The suffix label of the record set name that has to be used to filter the record set enumerations. CLI Example: .. code-block:: bash salt-call azurearm_dns.record_sets_list_by_type myzone testgroup SOA ''' result = {} dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: record_sets = __utils__['azurearm.paged_object_to_list']( dnsconn.record_sets.list_by_type( zone_name=zone_name, resource_group_name=resource_group, record_type=record_type, top=top, recordsetnamesuffix=recordsetnamesuffix ) ) for record_set in record_sets: result[record_set['name']] = record_set except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} return result
def record_sets_list_by_dns_zone(zone_name, resource_group, top=None, recordsetnamesuffix=None, **kwargs): ''' .. versionadded:: Fluorine Lists all record sets in a DNS zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param top: The maximum number of record sets to return. If not specified, returns up to 100 record sets. :param recordsetnamesuffix: The suffix label of the record set name that has to be used to filter the record set enumerations. CLI Example: .. code-block:: bash salt-call azurearm_dns.record_sets_list_by_dns_zone myzone testgroup ''' result = {} dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: record_sets = __utils__['azurearm.paged_object_to_list']( dnsconn.record_sets.list_by_dns_zone( zone_name=zone_name, resource_group_name=resource_group, top=top, recordsetnamesuffix=recordsetnamesuffix ) ) for record_set in record_sets: result[record_set['name']] = record_set except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} return result
def zone_create_or_update(name, resource_group, **kwargs): ''' .. versionadded:: Fluorine Creates or updates a DNS zone. Does not modify DNS records within the zone. :param name: The name of the DNS zone to create (without a terminating dot). :param resource_group: The name of the resource group. CLI Example: .. code-block:: bash salt-call azurearm_dns.zone_create_or_update myzone testgroup ''' # DNS zones are global objects kwargs['location'] = 'global' dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) # Convert list of ID strings to list of dictionaries with id key. if isinstance(kwargs.get('registration_virtual_networks'), list): kwargs['registration_virtual_networks'] = [{'id': vnet} for vnet in kwargs['registration_virtual_networks']] if isinstance(kwargs.get('resolution_virtual_networks'), list): kwargs['resolution_virtual_networks'] = [{'id': vnet} for vnet in kwargs['resolution_virtual_networks']] try: zone_model = __utils__['azurearm.create_object_model']('dns', 'Zone', **kwargs) except TypeError as exc: result = {'error': 'The object model could not be built. ({0})'.format(str(exc))} return result try: zone = dnsconn.zones.create_or_update( zone_name=name, resource_group_name=resource_group, parameters=zone_model, if_match=kwargs.get('if_match'), if_none_match=kwargs.get('if_none_match') ) result = zone.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} except SerializationError as exc: result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))} return result
def zone_delete(name, resource_group, **kwargs): ''' .. versionadded:: Fluorine Delete a DNS zone within a resource group. :param name: The name of the DNS zone to delete. :param resource_group: The name of the resource group. CLI Example: .. code-block:: bash salt-call azurearm_dns.zone_delete myzone testgroup ''' result = False dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: zone = dnsconn.zones.delete( zone_name=name, resource_group_name=resource_group, if_match=kwargs.get('if_match') ) zone.wait() result = True except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) return result
def zone_get(name, resource_group, **kwargs): ''' .. versionadded:: Fluorine Get a dictionary representing a DNS zone's properties, but not the record sets within the zone. :param name: The DNS zone to get. :param resource_group: The name of the resource group. CLI Example: .. code-block:: bash salt-call azurearm_dns.zone_get myzone testgroup ''' dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: zone = dnsconn.zones.get( zone_name=name, resource_group_name=resource_group ) result = zone.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} return result
def zones_list_by_resource_group(resource_group, top=None, **kwargs): ''' .. versionadded:: Fluorine Lists the DNS zones in a resource group. :param resource_group: The name of the resource group. :param top: The maximum number of DNS zones to return. If not specified, returns up to 100 zones. CLI Example: .. code-block:: bash salt-call azurearm_dns.zones_list_by_resource_group testgroup ''' result = {} dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: zones = __utils__['azurearm.paged_object_to_list']( dnsconn.zones.list_by_resource_group( resource_group_name=resource_group, top=top ) ) for zone in zones: result[zone['name']] = zone except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} return result
def zones_list(top=None, **kwargs): ''' .. versionadded:: Fluorine Lists the DNS zones in all resource groups in a subscription. :param top: The maximum number of DNS zones to return. If not specified, returns up to 100 zones. CLI Example: .. code-block:: bash salt-call azurearm_dns.zones_list ''' result = {} dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: zones = __utils__['azurearm.paged_object_to_list'](dnsconn.zones.list(top=top)) for zone in zones: result[zone['name']] = zone except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} return result
def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ELB exists. CLI example: .. code-block:: bash salt myminion boto_elb.exists myelb region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: elb = conn.get_all_load_balancers(load_balancer_names=[name]) if elb: return True else: log.debug('The load balancer does not exist in region %s', region) return False except boto.exception.BotoServerError as error: log.warning(error) return False
def get_all_elbs(region=None, key=None, keyid=None, profile=None): ''' Return all load balancers associated with an account CLI example: .. code-block:: bash salt myminion boto_elb.get_all_elbs region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: return [e for e in conn.get_all_load_balancers()] except boto.exception.BotoServerError as error: log.warning(error) return []
def list_elbs(region=None, key=None, keyid=None, profile=None): ''' Return names of all load balancers associated with an account CLI example: .. code-block:: bash salt myminion boto_elb.list_elbs region=us-east-1 ''' return [e.name for e in get_all_elbs(region=region, key=key, keyid=keyid, profile=profile)]
def get_elb_config(name, region=None, key=None, keyid=None, profile=None): ''' Get an ELB configuration. CLI example: .. code-block:: bash salt myminion boto_elb.exists myelb region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) retries = 30 while retries: try: lb = conn.get_all_load_balancers(load_balancer_names=[name]) lb = lb[0] ret = {} ret['availability_zones'] = lb.availability_zones listeners = [] for _listener in lb.listeners: listener_dict = {} listener_dict['elb_port'] = _listener.load_balancer_port listener_dict['elb_protocol'] = _listener.protocol listener_dict['instance_port'] = _listener.instance_port listener_dict['instance_protocol'] = _listener.instance_protocol listener_dict['policies'] = _listener.policy_names if _listener.ssl_certificate_id: listener_dict['certificate'] = _listener.ssl_certificate_id listeners.append(listener_dict) ret['listeners'] = listeners backends = [] for _backend in lb.backends: bs_dict = {} bs_dict['instance_port'] = _backend.instance_port bs_dict['policies'] = [p.policy_name for p in _backend.policies] backends.append(bs_dict) ret['backends'] = backends ret['subnets'] = lb.subnets ret['security_groups'] = lb.security_groups ret['scheme'] = lb.scheme ret['dns_name'] = lb.dns_name ret['tags'] = _get_all_tags(conn, name) lb_policy_lists = [ lb.policies.app_cookie_stickiness_policies, lb.policies.lb_cookie_stickiness_policies, lb.policies.other_policies ] policies = [] for policy_list in lb_policy_lists: policies += [p.policy_name for p in policy_list] ret['policies'] = policies return ret except boto.exception.BotoServerError as error: if error.error_code == 'Throttling': log.debug('Throttled by AWS API, will retry in 5 seconds.') time.sleep(5) retries -= 1 continue log.error('Error fetching config for ELB %s: %s', name, error.message) log.error(error) return {} return {}
def listener_dict_to_tuple(listener): ''' Convert an ELB listener dict into a listener tuple used by certain parts of the AWS ELB API. CLI example: .. code-block:: bash salt myminion boto_elb.listener_dict_to_tuple '{"elb_port":80,"instance_port":80,"elb_protocol":"HTTP"}' ''' # We define all listeners as complex listeners. if 'instance_protocol' not in listener: instance_protocol = listener['elb_protocol'].upper() else: instance_protocol = listener['instance_protocol'].upper() listener_tuple = [listener['elb_port'], listener['instance_port'], listener['elb_protocol'], instance_protocol] if 'certificate' in listener: listener_tuple.append(listener['certificate']) return tuple(listener_tuple)
def create(name, availability_zones, listeners, subnets=None, security_groups=None, scheme='internet-facing', region=None, key=None, keyid=None, profile=None): ''' Create an ELB CLI example to create an ELB: .. code-block:: bash salt myminion boto_elb.create myelb '["us-east-1a", "us-east-1e"]' '{"elb_port": 443, "elb_protocol": "HTTPS", ...}' region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if exists(name, region, key, keyid, profile): return True if isinstance(availability_zones, six.string_types): availability_zones = salt.utils.json.loads(availability_zones) if isinstance(listeners, six.string_types): listeners = salt.utils.json.loads(listeners) _complex_listeners = [] for listener in listeners: _complex_listeners.append(listener_dict_to_tuple(listener)) try: lb = conn.create_load_balancer(name=name, zones=availability_zones, subnets=subnets, security_groups=security_groups, scheme=scheme, complex_listeners=_complex_listeners) if lb: log.info('Created ELB %s', name) return True else: log.error('Failed to create ELB %s', name) return False except boto.exception.BotoServerError as error: log.error('Failed to create ELB %s: %s: %s', name, error.error_code, error.message, exc_info_on_loglevel=logging.DEBUG) return False
def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ELB. CLI example to delete an ELB: .. code-block:: bash salt myminion boto_elb.delete myelb region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not exists(name, region, key, keyid, profile): return True try: conn.delete_load_balancer(name) log.info('Deleted ELB %s.', name) return True except boto.exception.BotoServerError as error: log.error('Failed to delete ELB %s', name, exc_info_on_loglevel=logging.DEBUG) return False
def create_listeners(name, listeners, region=None, key=None, keyid=None, profile=None): ''' Create listeners on an ELB. CLI example: .. code-block:: bash salt myminion boto_elb.create_listeners myelb '[["HTTPS", "HTTP", 443, 80, "arn:aws:iam::11 11111:server-certificate/mycert"]]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if isinstance(listeners, six.string_types): listeners = salt.utils.json.loads(listeners) _complex_listeners = [] for listener in listeners: _complex_listeners.append(listener_dict_to_tuple(listener)) try: conn.create_load_balancer_listeners(name, [], _complex_listeners) log.info('Created ELB listeners on %s', name) return True except boto.exception.BotoServerError as error: log.error('Failed to create ELB listeners on %s: %s', name, error, exc_info_on_loglevel=logging.DEBUG) return False
def delete_listeners(name, ports, region=None, key=None, keyid=None, profile=None): ''' Delete listeners on an ELB. CLI example: .. code-block:: bash salt myminion boto_elb.delete_listeners myelb '[80,443]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if isinstance(ports, six.string_types): ports = salt.utils.json.loads(ports) try: conn.delete_load_balancer_listeners(name, ports) log.info('Deleted ELB listeners on %s', name) return True except boto.exception.BotoServerError as error: log.error('Failed to delete ELB listeners on %s: %s', name, error, exc_info_on_loglevel=logging.DEBUG) return False
def apply_security_groups(name, security_groups, region=None, key=None, keyid=None, profile=None): ''' Apply security groups to ELB. CLI example: .. code-block:: bash salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if isinstance(security_groups, six.string_types): security_groups = salt.utils.json.loads(security_groups) try: conn.apply_security_groups_to_lb(name, security_groups) log.info('Applied security_groups on ELB %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to appply security_groups on ELB %s: %s', name, e.message) return False
def get_attributes(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if attributes are set on an ELB. CLI example: .. code-block:: bash salt myminion boto_elb.get_attributes myelb ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) retries = 30 while retries: try: lbattrs = conn.get_all_lb_attributes(name) ret = odict.OrderedDict() ret['access_log'] = odict.OrderedDict() ret['cross_zone_load_balancing'] = odict.OrderedDict() ret['connection_draining'] = odict.OrderedDict() ret['connecting_settings'] = odict.OrderedDict() al = lbattrs.access_log czlb = lbattrs.cross_zone_load_balancing cd = lbattrs.connection_draining cs = lbattrs.connecting_settings ret['access_log']['enabled'] = al.enabled ret['access_log']['s3_bucket_name'] = al.s3_bucket_name ret['access_log']['s3_bucket_prefix'] = al.s3_bucket_prefix ret['access_log']['emit_interval'] = al.emit_interval ret['cross_zone_load_balancing']['enabled'] = czlb.enabled ret['connection_draining']['enabled'] = cd.enabled ret['connection_draining']['timeout'] = cd.timeout ret['connecting_settings']['idle_timeout'] = cs.idle_timeout return ret except boto.exception.BotoServerError as e: if e.error_code == 'Throttling': log.debug("Throttled by AWS API, will retry in 5 seconds...") time.sleep(5) retries -= 1 continue log.error('ELB %s does not exist: %s', name, e.message) return {} return {}
def set_attributes(name, attributes, region=None, key=None, keyid=None, profile=None): ''' Set attributes on an ELB. name (string) Name of the ELB instance to set attributes for attributes A dict of attributes to set. Valid attributes are: access_log (dict) enabled (bool) Enable storage of access logs. s3_bucket_name (string) The name of the S3 bucket to place logs. s3_bucket_prefix (string) Prefix for the log file name. emit_interval (int) Interval for storing logs in S3 in minutes. Valid values are 5 and 60. connection_draining (dict) enabled (bool) Enable connection draining. timeout (int) Maximum allowed time in seconds for sending existing connections to an instance that is deregistering or unhealthy. Default is 300. cross_zone_load_balancing (dict) enabled (bool) Enable cross-zone load balancing. CLI example to set attributes on an ELB: .. code-block:: bash salt myminion boto_elb.set_attributes myelb '{"access_log": {"enabled": "true", "s3_bucket_name": "mybucket", "s3_bucket_prefix": "mylogs/", "emit_interval": "5"}}' region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) al = attributes.get('access_log', {}) czlb = attributes.get('cross_zone_load_balancing', {}) cd = attributes.get('connection_draining', {}) cs = attributes.get('connecting_settings', {}) if not al and not czlb and not cd and not cs: log.error('No supported attributes for ELB.') return False if al: _al = AccessLogAttribute() _al.enabled = al.get('enabled', False) if not _al.enabled: msg = 'Access log attribute configured, but enabled config missing' log.error(msg) return False _al.s3_bucket_name = al.get('s3_bucket_name', None) _al.s3_bucket_prefix = al.get('s3_bucket_prefix', None) _al.emit_interval = al.get('emit_interval', None) added_attr = conn.modify_lb_attribute(name, 'accessLog', _al) if added_attr: log.info('Added access_log attribute to %s elb.', name) else: log.error('Failed to add access_log attribute to %s elb.', name) return False if czlb: _czlb = CrossZoneLoadBalancingAttribute() _czlb.enabled = czlb['enabled'] added_attr = conn.modify_lb_attribute(name, 'crossZoneLoadBalancing', _czlb.enabled) if added_attr: log.info('Added cross_zone_load_balancing attribute to %s elb.', name) else: log.error('Failed to add cross_zone_load_balancing attribute.') return False if cd: _cd = ConnectionDrainingAttribute() _cd.enabled = cd['enabled'] _cd.timeout = cd.get('timeout', 300) added_attr = conn.modify_lb_attribute(name, 'connectionDraining', _cd) if added_attr: log.info('Added connection_draining attribute to %s elb.', name) else: log.error('Failed to add connection_draining attribute.') return False if cs: _cs = ConnectionSettingAttribute() _cs.idle_timeout = cs.get('idle_timeout', 60) added_attr = conn.modify_lb_attribute(name, 'connectingSettings', _cs) if added_attr: log.info('Added connecting_settings attribute to %s elb.', name) else: log.error('Failed to add connecting_settings attribute.') return False return True
def get_health_check(name, region=None, key=None, keyid=None, profile=None): ''' Get the health check configured for this ELB. CLI example: .. code-block:: bash salt myminion boto_elb.get_health_check myelb ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) retries = 30 while True: try: lb = conn.get_all_load_balancers(load_balancer_names=[name]) lb = lb[0] ret = odict.OrderedDict() hc = lb.health_check ret['interval'] = hc.interval ret['target'] = hc.target ret['healthy_threshold'] = hc.healthy_threshold ret['timeout'] = hc.timeout ret['unhealthy_threshold'] = hc.unhealthy_threshold return ret except boto.exception.BotoServerError as e: if retries and e.code == 'Throttling': log.debug('Throttled by AWS API, will retry in 5 seconds.') time.sleep(5) retries -= 1 continue log.error('ELB %s not found.', name, exc_info_on_logleve=logging.DEBUG) return {}
def set_health_check(name, health_check, region=None, key=None, keyid=None, profile=None): ''' Set attributes on an ELB. CLI example to set attributes on an ELB: .. code-block:: bash salt myminion boto_elb.set_health_check myelb '{"target": "HTTP:80/"}' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) retries = 30 hc = HealthCheck(**health_check) while True: try: conn.configure_health_check(name, hc) log.info('Configured health check on ELB %s', name) return True except boto.exception.BotoServerError as error: if retries and e.code == 'Throttling': log.debug('Throttled by AWS API, will retry in 5 seconds.') time.sleep(5) retries -= 1 continue log.exception('Failed to configure health check on ELB %s', name) return False
def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): ''' Register instances with an ELB. Instances is either a string instance id or a list of string instance id's. Returns: - ``True``: instance(s) registered successfully - ``False``: instance(s) failed to be registered CLI example: .. code-block:: bash salt myminion boto_elb.register_instances myelb instance_id salt myminion boto_elb.register_instances myelb "[instance_id,instance_id]" ''' # convert instances to list type, enabling consistent use of instances # variable throughout the register_instances method if isinstance(instances, six.string_types) or isinstance(instances, six.text_type): instances = [instances] conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: registered_instances = conn.register_instances(name, instances) except boto.exception.BotoServerError as error: log.warning(error) return False registered_instance_ids = [instance.id for instance in registered_instances] # register_failues is a set that will contain any instances that were not # able to be registered with the given ELB register_failures = set(instances).difference(set(registered_instance_ids)) if register_failures: log.warning('Instance(s): %s not registered with ELB %s.', list(register_failures), name) register_result = False else: register_result = True return register_result
def deregister_instances(name, instances, region=None, key=None, keyid=None, profile=None): ''' Deregister instances with an ELB. Instances is either a string instance id or a list of string instance id's. Returns: - ``True``: instance(s) deregistered successfully - ``False``: instance(s) failed to be deregistered - ``None``: instance(s) not valid or not registered, no action taken CLI example: .. code-block:: bash salt myminion boto_elb.deregister_instances myelb instance_id salt myminion boto_elb.deregister_instances myelb "[instance_id, instance_id]" ''' # convert instances to list type, enabling consistent use of instances # variable throughout the deregister_instances method if isinstance(instances, six.string_types) or isinstance(instances, six.text_type): instances = [instances] conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: registered_instances = conn.deregister_instances(name, instances) except boto.exception.BotoServerError as error: # if the instance(s) given as an argument are not members of the ELB # boto returns error.error_code == 'InvalidInstance' # deregister_instances returns "None" because the instances are # effectively deregistered from ELB if error.error_code == 'InvalidInstance': log.warning( 'One or more of instance(s) %s are not part of ELB %s. ' 'deregister_instances not performed.', instances, name ) return None else: log.warning(error) return False registered_instance_ids = [instance.id for instance in registered_instances] # deregister_failures is a set that will contain any instances that were # unable to be deregistered from the given ELB deregister_failures = set(instances).intersection(set(registered_instance_ids)) if deregister_failures: log.warning( 'Instance(s): %s not deregistered from ELB %s.', list(deregister_failures), name ) deregister_result = False else: deregister_result = True return deregister_result
def set_instances(name, instances, test=False, region=None, key=None, keyid=None, profile=None): ''' Set the instances assigned to an ELB to exactly the list given CLI example: .. code-block:: bash salt myminion boto_elb.set_instances myelb region=us-east-1 instances="[instance_id,instance_id]" ''' ret = True current = set([i['instance_id'] for i in get_instance_health(name, region, key, keyid, profile)]) desired = set(instances) add = desired - current remove = current - desired if test: return bool(add or remove) if remove: if deregister_instances(name, list(remove), region, key, keyid, profile) is False: ret = False if add: if register_instances(name, list(add), region, key, keyid, profile) is False: ret = False return ret
def get_instance_health(name, region=None, key=None, keyid=None, profile=None, instances=None): ''' Get a list of instances and their health state CLI example: .. code-block:: bash salt myminion boto_elb.get_instance_health myelb salt myminion boto_elb.get_instance_health myelb region=us-east-1 instances="[instance_id,instance_id]" ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: instance_states = conn.describe_instance_health(name, instances) ret = [] for _instance in instance_states: ret.append({'instance_id': _instance.instance_id, 'description': _instance.description, 'state': _instance.state, 'reason_code': _instance.reason_code }) return ret except boto.exception.BotoServerError as error: log.debug(error) return []
def create_policy(name, policy_name, policy_type, policy, region=None, key=None, keyid=None, profile=None): ''' Create an ELB policy. .. versionadded:: 2016.3.0 CLI example: .. code-block:: bash salt myminion boto_elb.create_policy myelb mypolicy LBCookieStickinessPolicyType '{"CookieExpirationPeriod": 3600}' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not exists(name, region, key, keyid, profile): return False try: success = conn.create_lb_policy(name, policy_name, policy_type, policy) if success: log.info('Created policy %s on ELB %s', policy_name, name) return True else: log.error('Failed to create policy %s on ELB %s', policy_name, name) return False except boto.exception.BotoServerError as e: log.error('Failed to create policy %s on ELB %s: %s', policy_name, name, e.message, exc_info_on_loglevel=logging.DEBUG) return False
def delete_policy(name, policy_name, region=None, key=None, keyid=None, profile=None): ''' Delete an ELB policy. .. versionadded:: 2016.3.0 CLI example: .. code-block:: bash salt myminion boto_elb.delete_policy myelb mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not exists(name, region, key, keyid, profile): return True try: conn.delete_lb_policy(name, policy_name) log.info('Deleted policy %s on ELB %s', policy_name, name) return True except boto.exception.BotoServerError as e: log.error('Failed to delete policy %s on ELB %s: %s', policy_name, name, e.message, exc_info_on_loglevel=logging.DEBUG) return False
def set_listener_policy(name, port, policies=None, region=None, key=None, keyid=None, profile=None): ''' Set the policies of an ELB listener. .. versionadded:: 2016.3.0 CLI example: .. code-block:: Bash salt myminion boto_elb.set_listener_policy myelb 443 "[policy1,policy2]" ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not exists(name, region, key, keyid, profile): return True if policies is None: policies = [] try: conn.set_lb_policies_of_listener(name, port, policies) log.info('Set policies %s on ELB %s listener %s', policies, name, port) except boto.exception.BotoServerError as e: log.info('Failed to set policy %s on ELB %s listener %s: %s', policies, name, port, e.message, exc_info_on_loglevel=logging.DEBUG) return False return True
def set_tags(name, tags, region=None, key=None, keyid=None, profile=None): ''' Add the tags on an ELB .. versionadded:: 2016.3.0 name name of the ELB tags dict of name/value pair tags CLI Example: .. code-block:: bash salt myminion boto_elb.set_tags my-elb-name "{'Tag1': 'Value', 'Tag2': 'Another Value'}" ''' if exists(name, region, key, keyid, profile): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = _add_tags(conn, name, tags) return ret else: return False
def delete_tags(name, tags, region=None, key=None, keyid=None, profile=None): ''' Add the tags on an ELB name name of the ELB tags list of tags to remove CLI Example: .. code-block:: bash salt myminion boto_elb.delete_tags my-elb-name ['TagToRemove1', 'TagToRemove2'] ''' if exists(name, region, key, keyid, profile): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = _remove_tags(conn, name, tags) return ret else: return False
def _build_tag_param_list(params, tags): ''' helper function to build a tag parameter list to send ''' keys = sorted(tags.keys()) i = 1 for key in keys: value = tags[key] params['Tags.member.{0}.Key'.format(i)] = key if value is not None: params['Tags.member.{0}.Value'.format(i)] = value i += 1
def _get_all_tags(conn, load_balancer_names=None): ''' Retrieve all the metadata tags associated with your ELB(s). :type load_balancer_names: list :param load_balancer_names: An optional list of load balancer names. :rtype: list :return: A list of :class:`boto.ec2.elb.tag.Tag` objects ''' params = {} if load_balancer_names: conn.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') tags = conn.get_object( 'DescribeTags', params, __utils__['boto_elb_tag.get_tag_descriptions'](), verb='POST' ) if tags[load_balancer_names]: return tags[load_balancer_names] else: return None
def _add_tags(conn, load_balancer_names, tags): ''' Create new metadata tags for the specified resource ids. :type load_balancer_names: list :param load_balancer_names: A list of load balancer names. :type tags: dict :param tags: A dictionary containing the name/value pairs. If you want to create only a tag name, the value for that tag should be the empty string (e.g. ''). ''' params = {} conn.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') _build_tag_param_list(params, tags) return conn.get_status('AddTags', params, verb='POST')
def _remove_tags(conn, load_balancer_names, tags): ''' Delete metadata tags for the specified resource ids. :type load_balancer_names: list :param load_balancer_names: A list of load balancer names. :type tags: list :param tags: A list containing just tag names for the tags to be deleted. ''' params = {} conn.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') conn.build_list_params(params, tags, 'Tags.member.%d.Key') return conn.get_status('RemoveTags', params, verb='POST')
def _repack_pkgs(pkgs, normalize=True): ''' Repack packages specified using "pkgs" argument to pkg states into a single dictionary ''' if normalize and 'pkg.normalize_name' in __salt__: _normalize_name = __salt__['pkg.normalize_name'] else: _normalize_name = lambda pkgname: pkgname return dict( [ (_normalize_name(six.text_type(x)), six.text_type(y) if y is not None else y) for x, y in six.iteritems(salt.utils.data.repack_dictlist(pkgs)) ] )
def pack_sources(sources, normalize=True): ''' Accepts list of dicts (or a string representing a list of dicts) and packs the key/value pairs into a single dict. ``'[{"foo": "salt://foo.rpm"}, {"bar": "salt://bar.rpm"}]'`` would become ``{"foo": "salt://foo.rpm", "bar": "salt://bar.rpm"}`` normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' pkg_resource.pack_sources '[{"foo": "salt://foo.rpm"}, {"bar": "salt://bar.rpm"}]' ''' if normalize and 'pkg.normalize_name' in __salt__: _normalize_name = __salt__['pkg.normalize_name'] else: _normalize_name = lambda pkgname: pkgname if isinstance(sources, six.string_types): try: sources = salt.utils.yaml.safe_load(sources) except salt.utils.yaml.parser.ParserError as err: log.error(err) return {} ret = {} for source in sources: if (not isinstance(source, dict)) or len(source) != 1: log.error('Invalid input: %s', pprint.pformat(sources)) log.error('Input must be a list of 1-element dicts') return {} else: key = next(iter(source)) ret[_normalize_name(key)] = source[key] return ret
def parse_targets(name=None, pkgs=None, sources=None, saltenv='base', normalize=True, **kwargs): ''' Parses the input to pkg.install and returns back the package(s) to be installed. Returns a list of packages, as well as a string noting whether the packages are to come from a repository or a binary package. CLI Example: .. code-block:: bash salt '*' pkg_resource.parse_targets ''' if '__env__' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('__env__') if __grains__['os'] == 'MacOS' and sources: log.warning('Parameter "sources" ignored on MacOS hosts.') version = kwargs.get('version') if pkgs and sources: log.error('Only one of "pkgs" and "sources" can be used.') return None, None elif 'advisory_ids' in kwargs: if pkgs: log.error('Cannot use "advisory_ids" and "pkgs" at the same time') return None, None elif kwargs['advisory_ids']: return kwargs['advisory_ids'], 'advisory' else: return [name], 'advisory' elif pkgs: if version is not None: log.warning('\'version\' argument will be ignored for multiple ' 'package targets') pkgs = _repack_pkgs(pkgs, normalize=normalize) if not pkgs: return None, None else: return pkgs, 'repository' elif sources and __grains__['os'] != 'MacOS': if version is not None: log.warning('\'version\' argument will be ignored for multiple ' 'package targets') sources = pack_sources(sources, normalize=normalize) if not sources: return None, None srcinfo = [] for pkg_name, pkg_src in six.iteritems(sources): if __salt__['config.valid_fileproto'](pkg_src): # Cache package from remote source (salt master, HTTP, FTP) and # append the cached path. srcinfo.append(__salt__['cp.cache_file'](pkg_src, saltenv)) else: # Package file local to the minion, just append the path to the # package file. if not os.path.isabs(pkg_src): raise SaltInvocationError( 'Path {0} for package {1} is either not absolute or ' 'an invalid protocol'.format(pkg_src, pkg_name) ) srcinfo.append(pkg_src) return srcinfo, 'file' elif name: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) packed = dict([(_normalize_name(x), version) for x in name.split(',')]) else: packed = dict([(x, version) for x in name.split(',')]) return packed, 'repository' else: log.error('No package sources provided') return None, None
def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. CLI Example: .. code-block:: bash salt '*' pkg_resource.version vim salt '*' pkg_resource.version foo bar baz salt '*' pkg_resource.version 'python*' ''' ret = {} versions_as_list = \ salt.utils.data.is_true(kwargs.pop('versions_as_list', False)) pkg_glob = False if names: pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) for name in names: if '*' in name: pkg_glob = True for match in fnmatch.filter(pkgs, name): ret[match] = pkgs.get(match, []) else: ret[name] = pkgs.get(name, []) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) # Return a string if no globbing is used, and there is one item in the # return dict if len(ret) == 1 and not pkg_glob: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret
def add_pkg(pkgs, name, pkgver): ''' Add a package to a dict of installed packages. CLI Example: .. code-block:: bash salt '*' pkg_resource.add_pkg '{}' bind 9 ''' try: pkgs.setdefault(name, []).append(pkgver) except AttributeError as exc: log.exception(exc)
def sort_pkglist(pkgs): ''' Accepts a dict obtained from pkg.list_pkgs() and sorts in place the list of versions for any packages that have multiple versions installed, so that two package lists can be compared to one another. CLI Example: .. code-block:: bash salt '*' pkg_resource.sort_pkglist '["3.45", "2.13"]' ''' # It doesn't matter that ['4.9','4.10'] would be sorted to ['4.10','4.9'], # so long as the sorting is consistent. try: for key in pkgs: # Passing the pkglist to set() also removes duplicate version # numbers (if present). pkgs[key] = sorted(set(pkgs[key])) except AttributeError as exc: log.exception(exc)
def stringify(pkgs): ''' Takes a dict of package name/version information and joins each list of installed versions into a string. CLI Example: .. code-block:: bash salt '*' pkg_resource.stringify 'vim: 7.127' ''' try: for key in pkgs: pkgs[key] = ','.join(pkgs[key]) except AttributeError as exc: log.exception(exc)
def format_pkg_list(packages, versions_as_list, attr): ''' Formats packages according to parameters for list_pkgs. ''' ret = copy.deepcopy(packages) if attr: requested_attr = {'epoch', 'version', 'release', 'arch', 'install_date', 'install_date_time_t'} if attr != 'all': requested_attr &= set(attr + ['version']) for name in ret: versions = [] for all_attr in ret[name]: filtered_attr = {} for key in requested_attr: if all_attr[key]: filtered_attr[key] = all_attr[key] versions.append(filtered_attr) ret[name] = versions return ret for name in ret: ret[name] = [format_version(d['epoch'], d['version'], d['release']) for d in ret[name]] if not versions_as_list: stringify(ret) return ret
def format_version(epoch, version, release): ''' Formats a version string for list_pkgs. ''' full_version = '{0}:{1}'.format(epoch, version) if epoch else version if release: full_version += '-{0}'.format(release) return full_version
def _make_set(var): ''' Force var to be a set ''' if var is None: return set() if not isinstance(var, list): if isinstance(var, six.string_types): var = var.split() else: var = list(var) return set(var)
def present(name, value=None, contains=None, excludes=None): ''' Verify that the variable is in the ``make.conf`` and has the provided settings. If value is set, contains and excludes will be ignored. name The variable name. This will automatically be converted to upper case since variables in ``make.conf`` are in upper case value Enforce that the value of the variable is set to the provided value contains Enforce that the value of the variable contains the provided value excludes Enforce that the value of the variable does not contain the provided value. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} # Make name all Uppers since make.conf uses all Upper vars upper_name = name.upper() old_value = __salt__['makeconf.get_var'](upper_name) # If only checking if variable is present allows for setting the # variable outside of salt states, but the state can still ensure # that is exists if value is None and contains is None and excludes is None: # variable is present if old_value is not None: msg = 'Variable {0} is already present in make.conf' ret['comment'] = msg.format(name) else: if __opts__['test']: msg = 'Variable {0} is to be set in make.conf' ret['comment'] = msg.format(name) ret['result'] = None else: changes = __salt__['makeconf.set_var'](upper_name, '') # If failed to be set if changes[upper_name]['new'] is None: msg = 'Variable {0} failed to be set in make.conf' ret['comment'] = msg.format(name) ret['result'] = False else: msg = 'Variable {0} set in make.conf' ret['comment'] = msg.format(name) elif value is not None: # variable is present and is set to value if old_value is not None and old_value == value: msg = 'Variable {0} is already "{1}" in make.conf' ret['comment'] = msg.format(name, value) else: if __opts__['test']: msg = 'Variable {0} is to be set to "{1}" in make.conf' ret['comment'] = msg.format(name, value) ret['result'] = None else: changes = __salt__['makeconf.set_var'](upper_name, value) # If failed to be set new_value = __salt__['makeconf.get_var'](upper_name) if new_value is None or new_value != value: msg = 'Variable {0} failed to be set in make.conf' ret['comment'] = msg.format(name) ret['result'] = False else: msg = 'Variable {0} is set in make.conf' ret['changes'] = changes ret['comment'] = msg.format(name) elif contains is not None or excludes is not None: # Make these into sets to easily compare things contains_set = _make_set(contains) excludes_set = _make_set(excludes) old_value_set = _make_set(old_value) if contains_set.intersection(excludes_set): msg = 'Variable {0} cannot contain and exclude the same value' ret['comment'] = msg.format(name) ret['result'] = False else: to_append = set() to_trim = set() if contains is not None: to_append = contains_set.difference(old_value_set) if excludes is not None: to_trim = excludes_set.intersection(old_value_set) if not to_append and not to_trim: msg = 'Variable {0} is correct in make.conf' ret['comment'] = msg.format(name) else: if __opts__['test']: msg = 'Variable {0} is set to'.format(name) if to_append: msg += ' append "{0}"'.format(list(to_append)) if to_trim: msg += ' trim "{0}"'.format(list(to_trim)) msg += ' in make.conf' ret['comment'] = msg ret['result'] = None else: for value in to_append: __salt__['makeconf.append_var'](upper_name, value) for value in to_trim: __salt__['makeconf.trim_var'](upper_name, value) new_value = __salt__['makeconf.get_var'](upper_name) # TODO verify appends and trims worked ret['changes'] = {upper_name: {'old': old_value, 'new': new_value}} msg = 'Variable {0} is correct in make.conf' ret['comment'] = msg.format(name) # Now finally return return ret
def absent(name): ''' Verify that the variable is not in the ``make.conf``. name The variable name. This will automatically be converted to upper case since variables in ``make.conf`` are in upper case ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} # Make name all Uppers since make.conf uses all Upper vars upper_name = name.upper() old_value = __salt__['makeconf.get_var'](upper_name) if old_value is None: msg = 'Variable {0} is already absent from make.conf' ret['comment'] = msg.format(name) else: if __opts__['test']: msg = 'Variable {0} is set to be removed from make.conf' ret['comment'] = msg.format(name) ret['result'] = None else: __salt__['makeconf.remove_var'](upper_name) new_value = __salt__['makeconf.get_var'](upper_name) if new_value is not None: msg = 'Variable {0} failed to be removed from make.conf' ret['comment'] = msg.format(name) ret['result'] = False else: msg = 'Variable {0} was removed from make.conf' ret['comment'] = msg.format(name) ret['result'] = True return ret
def info(name): ''' Return information for the specified user CLI Example: .. code-block:: bash salt '*' shadow.info root ''' if HAS_SPWD: try: data = spwd.getspnam(name) ret = { 'name': data.sp_nam, 'passwd': data.sp_pwd, 'lstchg': data.sp_lstchg, 'min': data.sp_min, 'max': data.sp_max, 'warn': data.sp_warn, 'inact': data.sp_inact, 'expire': data.sp_expire} except KeyError: ret = { 'name': '', 'passwd': '', 'lstchg': '', 'min': '', 'max': '', 'warn': '', 'inact': '', 'expire': ''} return ret # SmartOS joyent_20130322T181205Z does not have spwd, but not all is lost # Return what we can know ret = { 'name': '', 'passwd': '', 'lstchg': '', 'min': '', 'max': '', 'warn': '', 'inact': '', 'expire': ''} try: data = pwd.getpwnam(name) ret.update({ 'name': name }) except KeyError: return ret # To compensate for lack of spwd module, read in password hash from /etc/shadow s_file = '/etc/shadow' if not os.path.isfile(s_file): return ret with salt.utils.files.fopen(s_file, 'rb') as ifile: for line in ifile: comps = line.strip().split(':') if comps[0] == name: ret.update({'passwd': comps[1]}) # For SmartOS `passwd -s <username>` and the output format is: # name status mm/dd/yy min max warn # # Fields: # 1. Name: username # 2. Status: # - LK: locked # - NL: no login # - NP: No password # - PS: Password # 3. Last password change # 4. Minimum age # 5. Maximum age # 6. Warning period output = __salt__['cmd.run_all']('passwd -s {0}'.format(name), python_shell=False) if output['retcode'] != 0: return ret fields = output['stdout'].split() if len(fields) == 2: # For example: # root NL return ret # We have all fields: # buildbot L 05/09/2013 0 99999 7 ret.update({ 'name': data.pw_name, 'lstchg': fields[2], 'min': int(fields[3]), 'max': int(fields[4]), 'warn': int(fields[5]), 'inact': '', 'expire': '' }) return ret
def set_maxdays(name, maxdays): ''' Set the maximum number of days during which a password is valid. See man passwd. CLI Example: .. code-block:: bash salt '*' shadow.set_maxdays username 90 ''' pre_info = info(name) if maxdays == pre_info['max']: return True cmd = 'passwd -x {0} {1}'.format(maxdays, name) __salt__['cmd.run'](cmd, python_shell=False) post_info = info(name) if post_info['max'] != pre_info['max']: return post_info['max'] == maxdays
def set_mindays(name, mindays): ''' Set the minimum number of days between password changes. See man passwd. CLI Example: .. code-block:: bash salt '*' shadow.set_mindays username 7 ''' pre_info = info(name) if mindays == pre_info['min']: return True cmd = 'passwd -n {0} {1}'.format(mindays, name) __salt__['cmd.run'](cmd, python_shell=False) post_info = info(name) if post_info['min'] != pre_info['min']: return post_info['min'] == mindays return False
def del_password(name): ''' .. versionadded:: 2015.8.8 Delete the password from name user CLI Example: .. code-block:: bash salt '*' shadow.del_password username ''' cmd = 'passwd -d {0}'.format(name) __salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet') uinfo = info(name) return not uinfo['passwd']
def set_password(name, password): ''' Set the password for a named user. The password must be a properly defined hash, the password hash can be generated with this command: ``openssl passwd -1 <plaintext password>`` CLI Example: .. code-block:: bash salt '*' shadow.set_password root $1$UYCIxa628.9qXjpQCjM4a.. ''' s_file = '/etc/shadow' ret = {} if not os.path.isfile(s_file): return ret lines = [] with salt.utils.files.fopen(s_file, 'rb') as ifile: for line in ifile: comps = line.strip().split(':') if comps[0] != name: lines.append(line) continue comps[1] = password line = ':'.join(comps) lines.append('{0}\n'.format(line)) with salt.utils.files.fopen(s_file, 'w+') as ofile: lines = [salt.utils.stringutils.to_str(_l) for _l in lines] ofile.writelines(lines) uinfo = info(name) return uinfo['passwd'] == password
def set_warndays(name, warndays): ''' Set the number of days of warning before a password change is required. See man passwd. CLI Example: .. code-block:: bash salt '*' shadow.set_warndays username 7 ''' pre_info = info(name) if warndays == pre_info['warn']: return True cmd = 'passwd -w {0} {1}'.format(warndays, name) __salt__['cmd.run'](cmd, python_shell=False) post_info = info(name) if post_info['warn'] != pre_info['warn']: return post_info['warn'] == warndays return False
def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument ''' Checkout the ext_pillar sources and compile the resulting pillar SLS ''' opts = copy.deepcopy(__opts__) opts['pillar_roots'] = {} opts['__git_pillar'] = True git_pillar = salt.utils.gitfs.GitPillar( opts, repos, per_remote_overrides=PER_REMOTE_OVERRIDES, per_remote_only=PER_REMOTE_ONLY, global_only=GLOBAL_ONLY) if __opts__.get('__role') == 'minion': # If masterless, fetch the remotes. We'll need to remove this once # we make the minion daemon able to run standalone. git_pillar.fetch_remotes() git_pillar.checkout() ret = {} merge_strategy = __opts__.get( 'pillar_source_merging_strategy', 'smart' ) merge_lists = __opts__.get( 'pillar_merge_lists', False ) for pillar_dir, env in six.iteritems(git_pillar.pillar_dirs): # Map env if env == '__env__' before checking the env value if env == '__env__': env = opts.get('pillarenv') \ or opts.get('saltenv') \ or opts.get('git_pillar_base') log.debug('__env__ maps to %s', env) # If pillarenv is set, only grab pillars with that match pillarenv if opts['pillarenv'] and env != opts['pillarenv']: log.debug( 'env \'%s\' for pillar dir \'%s\' does not match ' 'pillarenv \'%s\', skipping', env, pillar_dir, opts['pillarenv'] ) continue if pillar_dir in git_pillar.pillar_linked_dirs: log.debug( 'git_pillar is skipping processing on %s as it is a ' 'mounted repo', pillar_dir ) continue else: log.debug( 'git_pillar is processing pillar SLS from %s for pillar ' 'env \'%s\'', pillar_dir, env ) pillar_roots = [pillar_dir] if __opts__['git_pillar_includes']: # Add the rest of the pillar_dirs in this environment to the # list, excluding the current pillar_dir being processed. This # is because it was already specified above as the first in the # list, so that its top file is sourced from the correct # location and not from another git_pillar remote. pillar_roots.extend( [d for (d, e) in six.iteritems(git_pillar.pillar_dirs) if env == e and d != pillar_dir] ) opts['pillar_roots'] = {env: pillar_roots} local_pillar = Pillar(opts, __grains__, minion_id, env) ret = salt.utils.dictupdate.merge( ret, local_pillar.compile_pillar(ext=False), strategy=merge_strategy, merge_lists=merge_lists ) return ret
def _execute_command(cmd, at_time=None): ''' Helper function to execute the command :param str cmd: the command to run :param str at_time: If passed, the cmd will be scheduled. Returns: bool ''' if at_time: cmd = 'echo \'{0}\' | at {1}'.format(cmd, _cmd_quote(at_time)) return not bool(__salt__['cmd.retcode'](cmd, python_shell=True))
def set_remote_login(enable): ''' Set the remote login (SSH) to either on or off. :param bool enable: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_remote_login True ''' state = __utils__['mac_utils.validate_enabled'](enable) cmd = 'systemsetup -f -setremotelogin {0}'.format(state) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated'](state, get_remote_login, normalize_ret=True)
def set_remote_events(enable): ''' Set whether the server responds to events sent by other computers (such as AppleScripts) :param bool enable: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_remote_events On ''' state = __utils__['mac_utils.validate_enabled'](enable) cmd = 'systemsetup -setremoteappleevents {0}'.format(state) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( state, get_remote_events, normalize_ret=True, )
def set_computer_name(name): ''' Set the computer name :param str name: The new computer name :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_computer_name "Mike's Mac" ''' cmd = 'systemsetup -setcomputername "{0}"'.format(name) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( name, get_computer_name, )
def set_subnet_name(name): ''' Set the local subnet name :param str name: The new local subnet name .. note:: Spaces are changed to dashes. Other special characters are removed. :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash The following will be set as 'Mikes-Mac' salt '*' system.set_subnet_name "Mike's Mac" ''' cmd = 'systemsetup -setlocalsubnetname "{0}"'.format(name) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( name, get_subnet_name, )
def set_startup_disk(path): ''' Set the current startup disk to the indicated path. Use ``system.list_startup_disks`` to find valid startup disks on the system. :param str path: The valid startup disk path :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_startup_disk /System/Library/CoreServices ''' if path not in list_startup_disks(): msg = 'Invalid value passed for path.\n' \ 'Must be a valid startup disk as found in ' \ 'system.list_startup_disks.\n' \ 'Passed: {0}'.format(path) raise SaltInvocationError(msg) cmd = 'systemsetup -setstartupdisk {0}'.format(path) __utils__['mac_utils.execute_return_result'](cmd) return __utils__['mac_utils.confirm_updated']( path, get_startup_disk, )
def set_restart_delay(seconds): ''' Set the number of seconds after which the computer will start up after a power failure. .. warning:: This command fails with the following error: ``Error, IOServiceOpen returned 0x10000003`` The setting is not updated. This is an apple bug. It seems like it may only work on certain versions of Mac Server X. This article explains the issue in more detail, though it is quite old. http://lists.apple.com/archives/macos-x-server/2006/Jul/msg00967.html :param int seconds: The number of seconds. Must be a multiple of 30 :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_restart_delay 180 ''' if seconds % 30 != 0: msg = 'Invalid value passed for seconds.\n' \ 'Must be a multiple of 30.\n' \ 'Passed: {0}'.format(seconds) raise SaltInvocationError(msg) cmd = 'systemsetup -setwaitforstartupafterpowerfailure {0}'.format(seconds) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( seconds, get_restart_delay, )
def set_disable_keyboard_on_lock(enable): ''' Get whether or not the keyboard should be disabled when the X Serve enclosure lock is engaged. :param bool enable: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_disable_keyboard_on_lock False ''' state = __utils__['mac_utils.validate_enabled'](enable) cmd = 'systemsetup -setdisablekeyboardwhenenclosurelockisengaged ' \ '{0}'.format(state) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( state, get_disable_keyboard_on_lock, normalize_ret=True, )
def set_boot_arch(arch='default'): ''' Set the kernel to boot in 32 or 64 bit mode on next boot. .. note:: When this function fails with the error ``changes to kernel architecture failed to save!``, then the boot arch is not updated. This is either an Apple bug, not available on the test system, or a result of system files being locked down in macOS (SIP Protection). :param str arch: A string representing the desired architecture. If no value is passed, default is assumed. Valid values include: - i386 - x86_64 - default :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_boot_arch i386 ''' if arch not in ['i386', 'x86_64', 'default']: msg = 'Invalid value passed for arch.\n' \ 'Must be i386, x86_64, or default.\n' \ 'Passed: {0}'.format(arch) raise SaltInvocationError(msg) cmd = 'systemsetup -setkernelbootarchitecture {0}'.format(arch) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( arch, get_boot_arch, )
def create_target_group(name, protocol, port, vpc_id, region=None, key=None, keyid=None, profile=None, health_check_protocol='HTTP', health_check_port='traffic-port', health_check_path='/', health_check_interval_seconds=30, health_check_timeout_seconds=5, healthy_threshold_count=5, unhealthy_threshold_count=2): ''' Create target group if not present. name (string) - The name of the target group. protocol (string) - The protocol to use for routing traffic to the targets port (int) - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the traffic. vpc_id (string) - The identifier of the virtual private cloud (VPC). health_check_protocol (string) - The protocol the load balancer uses when performing health check on targets. The default is the HTTP protocol. health_check_port (string) - The port the load balancer uses when performing health checks on targets. The default is 'traffic-port', which indicates the port on which each target receives traffic from the load balancer. health_check_path (string) - The ping path that is the destination on the targets for health checks. The default is /. health_check_interval_seconds (integer) - The approximate amount of time, in seconds, between health checks of an individual target. The default is 30 seconds. health_check_timeout_seconds (integer) - The amount of time, in seconds, during which no response from a target means a failed health check. The default is 5 seconds. healthy_threshold_count (integer) - The number of consecutive health checks successes required before considering an unhealthy target healthy. The default is 5. unhealthy_threshold_count (integer) - The number of consecutive health check failures required before considering a target unhealthy. The default is 2. returns (bool) - True on success, False on failure. CLI example: .. code-block:: bash salt myminion boto_elbv2.create_target_group learn1give1 protocol=HTTP port=54006 vpc_id=vpc-deadbeef ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if target_group_exists(name, region, key, keyid, profile): return True try: alb = conn.create_target_group(Name=name, Protocol=protocol, Port=port, VpcId=vpc_id, HealthCheckProtocol=health_check_protocol, HealthCheckPort=health_check_port, HealthCheckPath=health_check_path, HealthCheckIntervalSeconds=health_check_interval_seconds, HealthCheckTimeoutSeconds=health_check_timeout_seconds, HealthyThresholdCount=healthy_threshold_count, UnhealthyThresholdCount=unhealthy_threshold_count) if alb: log.info('Created ALB %s: %s', name, alb['TargetGroups'][0]['TargetGroupArn']) return True else: log.error('Failed to create ALB %s', name) return False except ClientError as error: log.error( 'Failed to create ALB %s: %s: %s', name, error.response['Error']['Code'], error.response['Error']['Message'], exc_info_on_loglevel=logging.DEBUG )
def delete_target_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete target group. name (string) - Target Group Name or Amazon Resource Name (ARN). returns (bool) - True on success, False on failure. CLI example: .. code-block:: bash salt myminion boto_elbv2.delete_target_group arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not target_group_exists(name, region, key, keyid, profile): return True try: if name.startswith('arn:aws:elasticloadbalancing'): conn.delete_target_group(TargetGroupArn=name) log.info('Deleted target group %s', name) else: tg_info = conn.describe_target_groups(Names=[name]) if len(tg_info['TargetGroups']) != 1: return False arn = tg_info['TargetGroups'][0]['TargetGroupArn'] conn.delete_target_group(TargetGroupArn=arn) log.info('Deleted target group %s ARN %s', name, arn) return True except ClientError as error: log.error('Failed to delete target group %s', name, exc_info_on_loglevel=logging.DEBUG) return False
def target_group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an target group exists. CLI example: .. code-block:: bash salt myminion boto_elbv2.target_group_exists arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if name.startswith('arn:aws:elasticloadbalancing'): alb = conn.describe_target_groups(TargetGroupArns=[name]) else: alb = conn.describe_target_groups(Names=[name]) if alb: return True else: log.warning('The target group does not exist in region %s', region) return False except ClientError as error: log.warning('target_group_exists check for %s returned: %s', name, error) return False
def describe_target_health(name, targets=None, region=None, key=None, keyid=None, profile=None): ''' Get the curret health check status for targets in a target group. CLI example: .. code-block:: bash salt myminion boto_elbv2.describe_target_health arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 targets=["i-isdf23ifjf"] ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if targets: targetsdict = [] for target in targets: targetsdict.append({"Id": target}) instances = conn.describe_target_health(TargetGroupArn=name, Targets=targetsdict) else: instances = conn.describe_target_health(TargetGroupArn=name) ret = {} for instance in instances['TargetHealthDescriptions']: ret.update({instance['Target']['Id']: instance['TargetHealth']['State']}) return ret except ClientError as error: log.warning(error) return {}
def register_targets(name, targets, region=None, key=None, keyid=None, profile=None): ''' Register targets to a target froup of an ALB. ``targets`` is either a instance id string or a list of instance id's. Returns: - ``True``: instance(s) registered successfully - ``False``: instance(s) failed to be registered CLI example: .. code-block:: bash salt myminion boto_elbv2.register_targets myelb instance_id salt myminion boto_elbv2.register_targets myelb "[instance_id,instance_id]" ''' targetsdict = [] if isinstance(targets, six.string_types) or isinstance(targets, six.text_type): targetsdict.append({"Id": targets}) else: for target in targets: targetsdict.append({"Id": target}) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: registered_targets = conn.register_targets(TargetGroupArn=name, Targets=targetsdict) if registered_targets: return True return False except ClientError as error: log.warning(error) return False
def describe_load_balancers(names=None, load_balancer_arns=None, region=None, key=None, keyid=None, profile=None): ''' Describes the specified load balancer or all of your load balancers. Returns: list CLI example: .. code-block:: bash salt myminion boto_elbv2.describe_load_balancers salt myminion boto_elbv2.describe_load_balancers alb_name salt myminion boto_elbv2.describe_load_balancers "[alb_name,alb_name]" ''' if names and load_balancer_arns: raise SaltInvocationError('At most one of names or load_balancer_arns may ' 'be provided') if names: albs = names elif load_balancer_arns: albs = load_balancer_arns else: albs = None albs_list = [] if albs: if isinstance(albs, str) or isinstance(albs, six.text_type): albs_list.append(albs) else: for alb in albs: albs_list.append(alb) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if names: ret = conn.describe_load_balancers(Names=albs_list)['LoadBalancers'] elif load_balancer_arns: ret = conn.describe_load_balancers(LoadBalancerArns=albs_list)['LoadBalancers'] else: ret = [] next_marker = '' while True: r = conn.describe_load_balancers(Marker=next_marker) for alb in r['LoadBalancers']: ret.append(alb) if 'NextMarker' in r: next_marker = r['NextMarker'] else: break return ret if ret else [] except ClientError as error: log.warning(error) return False
def describe_target_groups(names=None, target_group_arns=None, load_balancer_arn=None, region=None, key=None, keyid=None, profile=None): ''' Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups. Returns: list CLI example: .. code-block:: bash salt myminion boto_elbv2.describe_target_groups salt myminion boto_elbv2.describe_target_groups target_group_name salt myminion boto_elbv2.describe_target_groups "[tg_name,tg_name]" ''' if names and target_group_arns: raise SaltInvocationError('At most one of names or target_group_arns may ' 'be provided') if names: target_groups = names elif target_group_arns: target_groups = target_group_arns else: target_groups = None tg_list = [] if target_groups: if isinstance(target_groups, str) or isinstance(target_groups, six.text_type): tg_list.append(target_groups) else: for group in target_groups: tg_list.append(group) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if names: ret = conn.describe_target_groups(Names=tg_list)['TargetGroups'] elif target_group_arns: ret = conn.describe_target_groups(TargetGroupArns=tg_list)['TargetGroups'] elif load_balancer_arn: ret = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn)['TargetGroups'] else: ret = [] next_marker = '' while True: r = conn.describe_target_groups(Marker=next_marker) for alb in r['TargetGroups']: ret.append(alb) if 'NextMarker' in r: next_marker = r['NextMarker'] else: break return ret if ret else [] except ClientError as error: log.warning(error) return False
def get_parameter(name, withdecryption=False, resp_json=False, region=None, key=None, keyid=None, profile=None): ''' Retrives a parameter from SSM Parameter Store .. versionadded:: Neon .. code-block:: text salt-call boto_ssm.get_parameter test-param withdescription=True ''' conn = __utils__['boto3.get_connection']('ssm', region=region, key=key, keyid=keyid, profile=profile) try: resp = conn.get_parameter(Name=name, WithDecryption=withdecryption) except conn.exceptions.ParameterNotFound: log.warning("get_parameter: Unable to locate name: %s", name) return False if resp_json: return json.loads(resp['Parameter']['Value']) else: return resp['Parameter']['Value']
def put_parameter(Name, Value, Description=None, Type='String', KeyId=None, Overwrite=False, AllowedPattern=None, region=None, key=None, keyid=None, profile=None): ''' Sets a parameter in the SSM parameter store .. versionadded:: Neon .. code-block:: text salt-call boto_ssm.put_parameter test-param test_value Type=SecureString KeyId=alias/aws/ssm Description='test encrypted key' ''' conn = __utils__['boto3.get_connection']('ssm', region=region, key=key, keyid=keyid, profile=profile) if Type not in ('String', 'StringList', 'SecureString'): raise AssertionError('Type needs to be String|StringList|SecureString') if Type == 'SecureString' and not KeyId: raise AssertionError('Require KeyId with SecureString') boto_args = {} if Description: boto_args['Description'] = Description if KeyId: boto_args['KeyId'] = KeyId if AllowedPattern: boto_args['AllowedPattern'] = AllowedPattern try: resp = conn.put_parameter(Name=Name, Value=Value, Type=Type, Overwrite=Overwrite, **boto_args) except conn.exceptions.ParameterAlreadyExists: log.warning("The parameter already exists." " To overwrite this value, set the Overwrite option in the request to True") return False return resp['Version']
def delete_parameter(Name, region=None, key=None, keyid=None, profile=None): ''' Removes a parameter from the SSM parameter store .. versionadded:: Neon .. code-block:: text salt-call boto_ssm.delete_parameter test-param ''' conn = __utils__['boto3.get_connection']('ssm', region=region, key=key, keyid=keyid, profile=profile) try: resp = conn.delete_parameter(Name=Name) except conn.exceptions.ParameterNotFound: log.warning("delete_parameter: Unable to locate name: %s", Name) return False if resp['ResponseMetadata']['HTTPStatusCode'] == 200: return True else: return False
def base64_b64encode(instr): ''' Encode a string as base64 using the "modern" Python interface. Among other possible differences, the "modern" encoder does not include newline ('\\n') characters in the encoded output. ''' return salt.utils.stringutils.to_unicode( base64.b64encode(salt.utils.stringutils.to_bytes(instr)), encoding='utf8' if salt.utils.platform.is_windows() else None )
def base64_b64decode(instr): ''' Decode a base64-encoded string using the "modern" Python interface. ''' decoded = base64.b64decode(salt.utils.stringutils.to_bytes(instr)) try: return salt.utils.stringutils.to_unicode( decoded, encoding='utf8' if salt.utils.platform.is_windows() else None ) except UnicodeDecodeError: return decoded
def base64_encodestring(instr): ''' Encode a string as base64 using the "legacy" Python interface. Among other possible differences, the "legacy" encoder includes a newline ('\\n') character after every 76 characters and always at the end of the encoded string. ''' return salt.utils.stringutils.to_unicode( base64.encodestring(salt.utils.stringutils.to_bytes(instr)), encoding='utf8' if salt.utils.platform.is_windows() else None )
def base64_decodestring(instr): ''' Decode a base64-encoded string using the "legacy" Python interface. ''' b = salt.utils.stringutils.to_bytes(instr) try: # PY3 decoded = base64.decodebytes(b) except AttributeError: # PY2 decoded = base64.decodestring(b) try: return salt.utils.stringutils.to_unicode( decoded, encoding='utf8' if salt.utils.platform.is_windows() else None ) except UnicodeDecodeError: return decoded
def md5_digest(instr): ''' Generate an md5 hash of a given string. ''' return salt.utils.stringutils.to_unicode( hashlib.md5(salt.utils.stringutils.to_bytes(instr)).hexdigest() )
def sha1_digest(instr): ''' Generate an sha1 hash of a given string. ''' if six.PY3: b = salt.utils.stringutils.to_bytes(instr) return hashlib.sha1(b).hexdigest() return hashlib.sha1(instr).hexdigest()
def sha256_digest(instr): ''' Generate a sha256 hash of a given string. ''' return salt.utils.stringutils.to_unicode( hashlib.sha256(salt.utils.stringutils.to_bytes(instr)).hexdigest() )
def sha512_digest(instr): ''' Generate a sha512 hash of a given string ''' return salt.utils.stringutils.to_unicode( hashlib.sha512(salt.utils.stringutils.to_bytes(instr)).hexdigest() )
def hmac_signature(string, shared_secret, challenge_hmac): ''' Verify a challenging hmac signature against a string / shared-secret Returns a boolean if the verification succeeded or failed. ''' msg = salt.utils.stringutils.to_bytes(string) key = salt.utils.stringutils.to_bytes(shared_secret) challenge = salt.utils.stringutils.to_bytes(challenge_hmac) hmac_hash = hmac.new(key, msg, hashlib.sha256) valid_hmac = base64.b64encode(hmac_hash.digest()) return valid_hmac == challenge
def random_hash(size=9999999999, hash_type=None): ''' Return a hash of a randomized data from random.SystemRandom() ''' if not hash_type: hash_type = 'md5' hasher = getattr(hashlib, hash_type) return hasher(salt.utils.stringutils.to_bytes(six.text_type(random.SystemRandom().randint(0, size)))).hexdigest()
def get_hash(path, form='sha256', chunk_size=65536): ''' Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'`` ''' hash_type = hasattr(hashlib, form) and getattr(hashlib, form) or None if hash_type is None: raise ValueError('Invalid hash type: {0}'.format(form)) with salt.utils.files.fopen(path, 'rb') as ifile: hash_obj = hash_type() # read the file in in chunks, not the entire file for chunk in iter(lambda: ifile.read(chunk_size), b''): hash_obj.update(chunk) return hash_obj.hexdigest()
def add(self, path): ''' Update digest with the file content by path. :param path: :return: ''' with salt.utils.files.fopen(path, 'rb') as ifile: for chunk in iter(lambda: ifile.read(self.__buff), b''): self.__digest.update(chunk)
def digest(self): ''' Get digest. :return: ''' return salt.utils.stringutils.to_str(self.__digest.hexdigest() + os.linesep)
def _load_properties(property_name, config_option, set_default=False, default=None): ''' Load properties for the cassandra module from config or pillar. :param property_name: The property to load. :type property_name: str or list of str :param config_option: The name of the config option. :type config_option: str :param set_default: Should a default be set if not found in config. :type set_default: bool :param default: The default value to be set. :type default: str or int :return: The property fetched from the configuration or default. :rtype: str or list of str ''' if not property_name: log.debug("No property specified in function, trying to load from salt configuration") try: options = __salt__['config.option']('cassandra') except BaseException as e: log.error("Failed to get cassandra config options. Reason: %s", e) raise loaded_property = options.get(config_option) if not loaded_property: if set_default: log.debug('Setting default Cassandra %s to %s', config_option, default) loaded_property = default else: log.error('No cassandra %s specified in the configuration or passed to the module.', config_option) raise CommandExecutionError("ERROR: Cassandra {0} cannot be empty.".format(config_option)) return loaded_property return property_name
def _get_ssl_opts(): ''' Parse out ssl_options for Cassandra cluster connection. Make sure that the ssl_version (if any specified) is valid. ''' sslopts = __salt__['config.option']('cassandra').get('ssl_options', None) ssl_opts = {} if sslopts: ssl_opts['ca_certs'] = sslopts['ca_certs'] if SSL_VERSION in sslopts: if not sslopts[SSL_VERSION].startswith('PROTOCOL_'): valid_opts = ', '.join( [x for x in dir(ssl) if x.startswith('PROTOCOL_')] ) raise CommandExecutionError('Invalid protocol_version ' 'specified! ' 'Please make sure ' 'that the ssl protocol' 'version is one from the SSL' 'module. ' 'Valid options are ' '{0}'.format(valid_opts)) else: ssl_opts[SSL_VERSION] = \ getattr(ssl, sslopts[SSL_VERSION]) return ssl_opts else: return None
def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None, protocol_version=None): ''' Connect to a Cassandra cluster. :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str or list of str :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :param protocol_version: Cassandra protocol version to use. :type port: int :return: The session and cluster objects. :rtype: cluster object, session object ''' # Lazy load the Cassandra cluster and session for this module by creating a # cluster and session when cql_query is called the first time. Get the # Cassandra cluster and session from this module's __context__ after it is # loaded the first time cql_query is called. # # TODO: Call cluster.shutdown() when the module is unloaded on # master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown() # do nothing to allow loaded modules to gracefully handle resources stored # in __context__ (i.e. connection pools). This means that the connection # pool is orphaned and Salt relies on Cassandra to reclaim connections. # Perhaps if Master/Minion daemons could be enhanced to call an "__unload__" # function, or something similar for each loaded module, connection pools # and the like can be gracefully reclaimed/shutdown. if (__context__ and 'cassandra_cql_returner_cluster' in __context__ and 'cassandra_cql_returner_session' in __context__): return __context__['cassandra_cql_returner_cluster'], __context__['cassandra_cql_returner_session'] else: contact_points = _load_properties(property_name=contact_points, config_option='cluster') contact_points = contact_points if isinstance(contact_points, list) else contact_points.split(',') port = _load_properties(property_name=port, config_option='port', set_default=True, default=9042) cql_user = _load_properties(property_name=cql_user, config_option='username', set_default=True, default="cassandra") cql_pass = _load_properties(property_name=cql_pass, config_option='password', set_default=True, default="cassandra") protocol_version = _load_properties(property_name=protocol_version, config_option='protocol_version', set_default=True, default=4) try: auth_provider = PlainTextAuthProvider(username=cql_user, password=cql_pass) ssl_opts = _get_ssl_opts() if ssl_opts: cluster = Cluster(contact_points, port=port, auth_provider=auth_provider, ssl_options=ssl_opts, protocol_version=protocol_version, compression=True) else: cluster = Cluster(contact_points, port=port, auth_provider=auth_provider, protocol_version=protocol_version, compression=True) for recontimes in range(1, 4): try: session = cluster.connect() break except OperationTimedOut: log.warning('Cassandra cluster.connect timed out, try %s', recontimes) if recontimes >= 3: raise # TODO: Call cluster.shutdown() when the module is unloaded on shutdown. __context__['cassandra_cql_returner_cluster'] = cluster __context__['cassandra_cql_returner_session'] = session __context__['cassandra_cql_prepared'] = {} log.debug('Successfully connected to Cassandra cluster at %s', contact_points) return cluster, session except TypeError: pass except (ConnectionException, ConnectionShutdown, NoHostAvailable): log.error('Could not connect to Cassandra cluster at %s', contact_points) raise CommandExecutionError('ERROR: Could not connect to Cassandra cluster.')
def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' Run a query on a Cassandra cluster and return a dictionary. :param query: The query to execute. :type query: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :param params: The parameters for the query, optional. :type params: str :return: A dictionary from the return values of the query :rtype: list[dict] CLI Example: .. code-block:: bash salt 'cassandra-server' cassandra_cql.cql_query "SELECT * FROM users_by_name WHERE first_name = 'jane'" ''' try: cluster, session = _connect(contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass) except CommandExecutionError: log.critical('Could not get Cassandra cluster session.') raise except BaseException as e: log.critical('Unexpected error while getting Cassandra cluster session: %s', e) raise session.row_factory = dict_factory ret = [] # Cassandra changed their internal schema from v2 to v3 # If the query contains a dictionary sorted by versions # Find the query for the current cluster version. # https://issues.apache.org/jira/browse/CASSANDRA-6717 if isinstance(query, dict): cluster_version = version(contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass) match = re.match(r'^(\d+)\.(\d+)(?:\.(\d+))?', cluster_version) major, minor, point = match.groups() # try to find the specific version in the query dictionary # then try the major version # otherwise default to the highest version number try: query = query[cluster_version] except KeyError: query = query.get(major, max(query)) log.debug('New query is: %s', query) try: results = session.execute(query) except BaseException as e: log.error('Failed to execute query: %s\n reason: %s', query, e) msg = "ERROR: Cassandra query failed: {0} reason: {1}".format(query, e) raise CommandExecutionError(msg) if results: for result in results: values = {} for key, value in six.iteritems(result): # Salt won't return dictionaries with odd types like uuid.UUID if not isinstance(value, six.text_type): # Must support Cassandra collection types. # Namely, Cassandras set, list, and map collections. if not isinstance(value, (set, list, dict)): value = six.text_type(value) values[key] = value ret.append(values) return ret
def cql_query_with_prepare(query, statement_name, statement_arguments, callback_errors=None, contact_points=None, port=None, cql_user=None, cql_pass=None, **kwargs): ''' Run a query on a Cassandra cluster and return a dictionary. This function should not be used asynchronously for SELECTs -- it will not return anything and we don't currently have a mechanism for handling a future that will return results. :param query: The query to execute. :type query: str :param statement_name: Name to assign the prepared statement in the __context__ dictionary :type statement_name: str :param statement_arguments: Bind parameters for the SQL statement :type statement_arguments: list[str] :param async: Run this query in asynchronous mode :type async: bool :param callback_errors: Function to call after query runs if there is an error :type callback_errors: Function callable :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :param params: The parameters for the query, optional. :type params: str :return: A dictionary from the return values of the query :rtype: list[dict] CLI Example: .. code-block:: bash # Insert data asynchronously salt this-node cassandra_cql.cql_query_with_prepare "name_insert" "INSERT INTO USERS (first_name, last_name) VALUES (?, ?)" \ statement_arguments=['John','Doe'], asynchronous=True # Select data, should not be asynchronous because there is not currently a facility to return data from a future salt this-node cassandra_cql.cql_query_with_prepare "name_select" "SELECT * FROM USERS WHERE first_name=?" \ statement_arguments=['John'] ''' # Backward-compatibility with Python 3.7: "async" is a reserved word asynchronous = kwargs.get('async', False) try: cluster, session = _connect(contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass) except CommandExecutionError: log.critical('Could not get Cassandra cluster session.') raise except BaseException as e: log.critical('Unexpected error while getting Cassandra cluster session: %s', e) raise if statement_name not in __context__['cassandra_cql_prepared']: try: bound_statement = session.prepare(query) __context__['cassandra_cql_prepared'][statement_name] = bound_statement except BaseException as e: log.critical('Unexpected error while preparing SQL statement: %s', e) raise else: bound_statement = __context__['cassandra_cql_prepared'][statement_name] session.row_factory = dict_factory ret = [] try: if asynchronous: future_results = session.execute_async(bound_statement.bind(statement_arguments)) # future_results.add_callbacks(_async_log_errors) else: results = session.execute(bound_statement.bind(statement_arguments)) except BaseException as e: log.error('Failed to execute query: %s\n reason: %s', query, e) msg = "ERROR: Cassandra query failed: {0} reason: {1}".format(query, e) raise CommandExecutionError(msg) if not asynchronous and results: for result in results: values = {} for key, value in six.iteritems(result): # Salt won't return dictionaries with odd types like uuid.UUID if not isinstance(value, six.text_type): # Must support Cassandra collection types. # Namely, Cassandras set, list, and map collections. if not isinstance(value, (set, list, dict)): value = six.text_type(value) values[key] = value ret.append(values) # If this was a synchronous call, then we either have an empty list # because there was no return, or we have a return # If this was an asynchronous call we only return the empty list return ret
def list_column_families(keyspace=None, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' List column families in a Cassandra cluster for all keyspaces or just the provided one. :param keyspace: The keyspace to provide the column families for, optional. :type keyspace: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: The column families in this Cassandra cluster. :rtype: list[dict] CLI Example: .. code-block:: bash salt 'minion1' cassandra_cql.list_column_families salt 'minion1' cassandra_cql.list_column_families contact_points=minion1 salt 'minion1' cassandra_cql.list_column_families keyspace=system ''' where_clause = "where keyspace_name = '{0}'".format(keyspace) if keyspace else "" query = { '2': '''select columnfamily_name from system.schema_columnfamilies {0};'''.format(where_clause), '3': '''select column_name from system_schema.columns {0};'''.format(where_clause), } ret = {} try: ret = cql_query(query, contact_points, port, cql_user, cql_pass) except CommandExecutionError: log.critical('Could not list column families.') raise except BaseException as e: log.critical('Unexpected error while listing column families: %s', e) raise return ret
def keyspace_exists(keyspace, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' Check if a keyspace exists in a Cassandra cluster. :param keyspace The keyspace name to check for. :type keyspace: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: The info for the keyspace or False if it does not exist. :rtype: dict CLI Example: .. code-block:: bash salt 'minion1' cassandra_cql.keyspace_exists keyspace=system ''' query = { '2': '''select keyspace_name from system.schema_keyspaces where keyspace_name = '{0}';'''.format(keyspace), '3': '''select keyspace_name from system_schema.keyspaces where keyspace_name = '{0}';'''.format(keyspace), } try: ret = cql_query(query, contact_points, port, cql_user, cql_pass) except CommandExecutionError: log.critical('Could not determine if keyspace exists.') raise except BaseException as e: log.critical('Unexpected error while determining if keyspace exists: %s', e) raise return True if ret else False
def create_keyspace(keyspace, replication_strategy='SimpleStrategy', replication_factor=1, replication_datacenters=None, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' Create a new keyspace in Cassandra. :param keyspace: The keyspace name :type keyspace: str :param replication_strategy: either `SimpleStrategy` or `NetworkTopologyStrategy` :type replication_strategy: str :param replication_factor: number of replicas of data on multiple nodes. not used if using NetworkTopologyStrategy :type replication_factor: int :param replication_datacenters: string or dict of datacenter names to replication factors, required if using NetworkTopologyStrategy (will be a dict if coming from state file). :type replication_datacenters: str | dict[str, int] :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: The info for the keyspace or False if it does not exist. :rtype: dict CLI Example: .. code-block:: bash # CLI Example: salt 'minion1' cassandra_cql.create_keyspace keyspace=newkeyspace salt 'minion1' cassandra_cql.create_keyspace keyspace=newkeyspace replication_strategy=NetworkTopologyStrategy \ replication_datacenters='{"datacenter_1": 3, "datacenter_2": 2}' ''' existing_keyspace = keyspace_exists(keyspace, contact_points, port) if not existing_keyspace: # Add the strategy, replication_factor, etc. replication_map = { 'class': replication_strategy } if replication_datacenters: if isinstance(replication_datacenters, six.string_types): try: replication_datacenter_map = salt.utils.json.loads(replication_datacenters) replication_map.update(**replication_datacenter_map) except BaseException: # pylint: disable=W0703 log.error("Could not load json replication_datacenters.") return False else: replication_map.update(**replication_datacenters) else: replication_map['replication_factor'] = replication_factor query = '''create keyspace {0} with replication = {1} and durable_writes = true;'''.format(keyspace, replication_map) try: cql_query(query, contact_points, port, cql_user, cql_pass) except CommandExecutionError: log.critical('Could not create keyspace.') raise except BaseException as e: log.critical('Unexpected error while creating keyspace: %s', e) raise
def drop_keyspace(keyspace, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' Drop a keyspace if it exists in a Cassandra cluster. :param keyspace: The keyspace to drop. :type keyspace: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: The info for the keyspace or False if it does not exist. :rtype: dict CLI Example: .. code-block:: bash salt 'minion1' cassandra_cql.drop_keyspace keyspace=test salt 'minion1' cassandra_cql.drop_keyspace keyspace=test contact_points=minion1 ''' existing_keyspace = keyspace_exists(keyspace, contact_points, port) if existing_keyspace: query = '''drop keyspace {0};'''.format(keyspace) try: cql_query(query, contact_points, port, cql_user, cql_pass) except CommandExecutionError: log.critical('Could not drop keyspace.') raise except BaseException as e: log.critical('Unexpected error while dropping keyspace: %s', e) raise return True