Unnamed: 0
int64
0
10k
source
stringlengths
27
7.27k
target
stringlengths
54
7.29k
400
public static Map < String , String > getExternalResourceConfigurationKeys ( Configuration config , String suffix ) { final Set < String > resourceSet = getExternalResourceSet ( config ) ; final Map < String , String > configKeysToResourceNameMap = new HashMap < > ( ) ; LOG . info ( "Enabled external resources: {}" , resourceSet ) ; if ( resourceSet . isEmpty ( ) ) { return Collections . emptyMap ( ) ; } final Map < String , String > externalResourceConfigs = new HashMap < > ( ) ; for ( String resourceName : resourceSet ) { final ConfigOption < String > configKeyOption = key ( ExternalResourceOptions . getSystemConfigKeyConfigOptionForResource ( resourceName , suffix ) ) . stringType ( ) . noDefaultValue ( ) ; final String configKey = config . get ( configKeyOption ) ; if ( StringUtils . isNullOrWhitespaceOnly ( configKey ) ) { LOG . warn ( "Could not find valid {} for {}. Will ignore that resource." , configKeyOption . key ( ) , resourceName ) ; } else { externalResourceConfigs . put ( resourceName , configKey ) ; } } return externalResourceConfigs ; }
public static Map < String , String > getExternalResourceConfigurationKeys ( Configuration config , String suffix ) { final Set < String > resourceSet = getExternalResourceSet ( config ) ; final Map < String , String > configKeysToResourceNameMap = new HashMap < > ( ) ; LOG . info ( "Enabled external resources: {}" , resourceSet ) ; if ( resourceSet . isEmpty ( ) ) { return Collections . emptyMap ( ) ; } final Map < String , String > externalResourceConfigs = new HashMap < > ( ) ; for ( String resourceName : resourceSet ) { final ConfigOption < String > configKeyOption = key ( ExternalResourceOptions . getSystemConfigKeyConfigOptionForResource ( resourceName , suffix ) ) . stringType ( ) . noDefaultValue ( ) ; final String configKey = config . get ( configKeyOption ) ; if ( StringUtils . isNullOrWhitespaceOnly ( configKey ) ) { LOG . warn ( "Could not find valid {} for {}. Will ignore that resource." , configKeyOption . key ( ) , resourceName ) ; } else { configKeysToResourceNameMap . compute ( configKey , ( ignored , previousResource ) -> { if ( previousResource != null ) { LOG . warn ( "Duplicate config key {} occurred for external resources, the one named {} will overwrite the value." , configKey , resourceName ) ; externalResourceConfigs . remove ( previousResource ) ; } return resourceName ; } ) ; externalResourceConfigs . put ( resourceName , configKey ) ; } } return externalResourceConfigs ; }
401
public static Map < String , String > getExternalResourceConfigurationKeys ( Configuration config , String suffix ) { final Set < String > resourceSet = getExternalResourceSet ( config ) ; final Map < String , String > configKeysToResourceNameMap = new HashMap < > ( ) ; LOG . info ( "Enabled external resources: {}" , resourceSet ) ; if ( resourceSet . isEmpty ( ) ) { return Collections . emptyMap ( ) ; } final Map < String , String > externalResourceConfigs = new HashMap < > ( ) ; for ( String resourceName : resourceSet ) { final ConfigOption < String > configKeyOption = key ( ExternalResourceOptions . getSystemConfigKeyConfigOptionForResource ( resourceName , suffix ) ) . stringType ( ) . noDefaultValue ( ) ; final String configKey = config . get ( configKeyOption ) ; if ( StringUtils . isNullOrWhitespaceOnly ( configKey ) ) { LOG . warn ( "Could not find valid {} for {}. Will ignore that resource." , configKeyOption . key ( ) , resourceName ) ; } else { configKeysToResourceNameMap . compute ( configKey , ( ignored , previousResource ) -> { if ( previousResource != null ) { externalResourceConfigs . remove ( previousResource ) ; } return resourceName ; } ) ; externalResourceConfigs . put ( resourceName , configKey ) ; } } return externalResourceConfigs ; }
public static Map < String , String > getExternalResourceConfigurationKeys ( Configuration config , String suffix ) { final Set < String > resourceSet = getExternalResourceSet ( config ) ; final Map < String , String > configKeysToResourceNameMap = new HashMap < > ( ) ; LOG . info ( "Enabled external resources: {}" , resourceSet ) ; if ( resourceSet . isEmpty ( ) ) { return Collections . emptyMap ( ) ; } final Map < String , String > externalResourceConfigs = new HashMap < > ( ) ; for ( String resourceName : resourceSet ) { final ConfigOption < String > configKeyOption = key ( ExternalResourceOptions . getSystemConfigKeyConfigOptionForResource ( resourceName , suffix ) ) . stringType ( ) . noDefaultValue ( ) ; final String configKey = config . get ( configKeyOption ) ; if ( StringUtils . isNullOrWhitespaceOnly ( configKey ) ) { LOG . warn ( "Could not find valid {} for {}. Will ignore that resource." , configKeyOption . key ( ) , resourceName ) ; } else { configKeysToResourceNameMap . compute ( configKey , ( ignored , previousResource ) -> { if ( previousResource != null ) { LOG . warn ( "Duplicate config key {} occurred for external resources, the one named {} will overwrite the value." , configKey , resourceName ) ; externalResourceConfigs . remove ( previousResource ) ; } return resourceName ; } ) ; externalResourceConfigs . put ( resourceName , configKey ) ; } } return externalResourceConfigs ; }
402
public void testGetFileByPathNoPermissions ( ) throws Exception { ByteArrayContainerResponseWriter writer = new ByteArrayContainerResponseWriter ( ) ; String requestPath = SERVICE_URI + "itembypath" + protectedFilePath ; ContainerResponse response = launcher . service ( HttpMethod . GET , requestPath , BASE_URI , null , null , writer , null ) ; assertEquals ( 403 , response . getStatus ( ) ) ; }
public void testGetFileByPathNoPermissions ( ) throws Exception { ByteArrayContainerResponseWriter writer = new ByteArrayContainerResponseWriter ( ) ; String requestPath = SERVICE_URI + "itembypath" + protectedFilePath ; ContainerResponse response = launcher . service ( HttpMethod . GET , requestPath , BASE_URI , null , null , writer , null ) ; assertEquals ( 403 , response . getStatus ( ) ) ; log . info ( new String ( writer . getBody ( ) ) ) ; }
403
public Collection < GrantedAuthority > getContextualAuthorities ( boolean isSelf , Entity entity , SecurityUtil . UserContext context , boolean isRead ) { Collection < GrantedAuthority > auths = new HashSet < GrantedAuthority > ( ) ; SLIPrincipal principal = SecurityUtil . getSLIPrincipal ( ) ; if ( SecurityUtil . isStaffUser ( ) ) { if ( entity == null ) { } else { if ( ( entity . getMetaData ( ) != null && SecurityUtil . principalId ( ) . equals ( entity . getMetaData ( ) . get ( "createdBy" ) ) && "true" . equals ( entity . getMetaData ( ) . get ( "isOrphaned" ) ) ) || ( EntityNames . isPublic ( entity . getType ( ) ) && ! edOrgOwnershipArbiter . isEntityOwnedByEdOrg ( entity . getType ( ) ) ) ) { auths . addAll ( principal . getAllContextRights ( isSelf ) ) ; } else if ( EntityNames . isPublic ( entity . getType ( ) ) && principal . getAllContextRights ( isSelf ) . contains ( Right . READ_PUBLIC ) && isRead ) { auths . add ( Right . READ_PUBLIC ) ; } else { auths . addAll ( entityEdOrgRightBuilder . buildEntityEdOrgContextRights ( principal . getEdOrgContextRights ( ) , entity , context , isRead ) ) ; } if ( isSelf ) { auths . addAll ( entityEdOrgRightBuilder . buildEntityEdOrgRights ( principal . getEdOrgSelfRights ( ) , entity , isRead ) ) ; } } } else { Authentication auth = SecurityContextHolder . getContext ( ) . getAuthentication ( ) ; auths . addAll ( auth . getAuthorities ( ) ) ; if ( isSelf ) { auths . addAll ( principal . getSelfRights ( ) ) ; } } return auths ; }
public Collection < GrantedAuthority > getContextualAuthorities ( boolean isSelf , Entity entity , SecurityUtil . UserContext context , boolean isRead ) { Collection < GrantedAuthority > auths = new HashSet < GrantedAuthority > ( ) ; SLIPrincipal principal = SecurityUtil . getSLIPrincipal ( ) ; if ( SecurityUtil . isStaffUser ( ) ) { if ( entity == null ) { LOG . trace ( "No authority for null" ) ; } else { if ( ( entity . getMetaData ( ) != null && SecurityUtil . principalId ( ) . equals ( entity . getMetaData ( ) . get ( "createdBy" ) ) && "true" . equals ( entity . getMetaData ( ) . get ( "isOrphaned" ) ) ) || ( EntityNames . isPublic ( entity . getType ( ) ) && ! edOrgOwnershipArbiter . isEntityOwnedByEdOrg ( entity . getType ( ) ) ) ) { auths . addAll ( principal . getAllContextRights ( isSelf ) ) ; } else if ( EntityNames . isPublic ( entity . getType ( ) ) && principal . getAllContextRights ( isSelf ) . contains ( Right . READ_PUBLIC ) && isRead ) { auths . add ( Right . READ_PUBLIC ) ; } else { auths . addAll ( entityEdOrgRightBuilder . buildEntityEdOrgContextRights ( principal . getEdOrgContextRights ( ) , entity , context , isRead ) ) ; } if ( isSelf ) { auths . addAll ( entityEdOrgRightBuilder . buildEntityEdOrgRights ( principal . getEdOrgSelfRights ( ) , entity , isRead ) ) ; } } } else { Authentication auth = SecurityContextHolder . getContext ( ) . getAuthentication ( ) ; auths . addAll ( auth . getAuthorities ( ) ) ; if ( isSelf ) { auths . addAll ( principal . getSelfRights ( ) ) ; } } return auths ; }
404
public Class getEntryType ( String component , String name ) throws ConfigurationException { if ( component == null ) { throw new NullPointerException ( "component cannot be null" ) ; } else if ( ! validQualifiedIdentifier ( component ) ) { throw new IllegalArgumentException ( "component must be a valid qualified identifier" ) ; } else if ( name == null ) { throw new NullPointerException ( "name cannot be null" ) ; } else if ( ! validIdentifier ( name ) ) { throw new IllegalArgumentException ( "name must be a valid identifier" ) ; } Entry entry = ( Entry ) entries . get ( component + '.' + name ) ; if ( entry == null || entry . isPrivate ) { if ( logger . isDebugEnabled ( ) ) { } oopsNoSuchEntry ( "entry not found for component " + component + ", name " + name ) ; } ConfigurationException configEx ; try { Class result = entry . resolve ( null ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}, component {}, name {}: returns {}" , this , component , name , result ) ; } return result ; } catch ( ConfigurationException e ) { configEx = e ; } catch ( RuntimeException e ) { String description = "problem getting type of entry for component " + component + ", name " + name ; configEx = null ; try { oops ( description , entry . lineno , entry . override , e ) ; } catch ( ConfigurationException ce ) { configEx = ce ; } } if ( logger . isDebugEnabled ( ) ) { String message = LogUtils . format ( this . getClass ( ) , "getEntryType" , "{}, component {}, name {}: throws" , this , component , name ) ; logger . debug ( message , configEx ) ; } throw configEx ; }
public Class getEntryType ( String component , String name ) throws ConfigurationException { if ( component == null ) { throw new NullPointerException ( "component cannot be null" ) ; } else if ( ! validQualifiedIdentifier ( component ) ) { throw new IllegalArgumentException ( "component must be a valid qualified identifier" ) ; } else if ( name == null ) { throw new NullPointerException ( "name cannot be null" ) ; } else if ( ! validIdentifier ( name ) ) { throw new IllegalArgumentException ( "name must be a valid identifier" ) ; } Entry entry = ( Entry ) entries . get ( component + '.' + name ) ; if ( entry == null || entry . isPrivate ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "entry for component {}, name {} not found in {}" , component , name , this ) ; } oopsNoSuchEntry ( "entry not found for component " + component + ", name " + name ) ; } ConfigurationException configEx ; try { Class result = entry . resolve ( null ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}, component {}, name {}: returns {}" , this , component , name , result ) ; } return result ; } catch ( ConfigurationException e ) { configEx = e ; } catch ( RuntimeException e ) { String description = "problem getting type of entry for component " + component + ", name " + name ; configEx = null ; try { oops ( description , entry . lineno , entry . override , e ) ; } catch ( ConfigurationException ce ) { configEx = ce ; } } if ( logger . isDebugEnabled ( ) ) { String message = LogUtils . format ( this . getClass ( ) , "getEntryType" , "{}, component {}, name {}: throws" , this , component , name ) ; logger . debug ( message , configEx ) ; } throw configEx ; }
405
public Class getEntryType ( String component , String name ) throws ConfigurationException { if ( component == null ) { throw new NullPointerException ( "component cannot be null" ) ; } else if ( ! validQualifiedIdentifier ( component ) ) { throw new IllegalArgumentException ( "component must be a valid qualified identifier" ) ; } else if ( name == null ) { throw new NullPointerException ( "name cannot be null" ) ; } else if ( ! validIdentifier ( name ) ) { throw new IllegalArgumentException ( "name must be a valid identifier" ) ; } Entry entry = ( Entry ) entries . get ( component + '.' + name ) ; if ( entry == null || entry . isPrivate ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "entry for component {}, name {} not found in {}" , component , name , this ) ; } oopsNoSuchEntry ( "entry not found for component " + component + ", name " + name ) ; } ConfigurationException configEx ; try { Class result = entry . resolve ( null ) ; if ( logger . isDebugEnabled ( ) ) { } return result ; } catch ( ConfigurationException e ) { configEx = e ; } catch ( RuntimeException e ) { String description = "problem getting type of entry for component " + component + ", name " + name ; configEx = null ; try { oops ( description , entry . lineno , entry . override , e ) ; } catch ( ConfigurationException ce ) { configEx = ce ; } } if ( logger . isDebugEnabled ( ) ) { String message = LogUtils . format ( this . getClass ( ) , "getEntryType" , "{}, component {}, name {}: throws" , this , component , name ) ; logger . debug ( message , configEx ) ; } throw configEx ; }
public Class getEntryType ( String component , String name ) throws ConfigurationException { if ( component == null ) { throw new NullPointerException ( "component cannot be null" ) ; } else if ( ! validQualifiedIdentifier ( component ) ) { throw new IllegalArgumentException ( "component must be a valid qualified identifier" ) ; } else if ( name == null ) { throw new NullPointerException ( "name cannot be null" ) ; } else if ( ! validIdentifier ( name ) ) { throw new IllegalArgumentException ( "name must be a valid identifier" ) ; } Entry entry = ( Entry ) entries . get ( component + '.' + name ) ; if ( entry == null || entry . isPrivate ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "entry for component {}, name {} not found in {}" , component , name , this ) ; } oopsNoSuchEntry ( "entry not found for component " + component + ", name " + name ) ; } ConfigurationException configEx ; try { Class result = entry . resolve ( null ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}, component {}, name {}: returns {}" , this , component , name , result ) ; } return result ; } catch ( ConfigurationException e ) { configEx = e ; } catch ( RuntimeException e ) { String description = "problem getting type of entry for component " + component + ", name " + name ; configEx = null ; try { oops ( description , entry . lineno , entry . override , e ) ; } catch ( ConfigurationException ce ) { configEx = ce ; } } if ( logger . isDebugEnabled ( ) ) { String message = LogUtils . format ( this . getClass ( ) , "getEntryType" , "{}, component {}, name {}: throws" , this , component , name ) ; logger . debug ( message , configEx ) ; } throw configEx ; }
406
public Class getEntryType ( String component , String name ) throws ConfigurationException { if ( component == null ) { throw new NullPointerException ( "component cannot be null" ) ; } else if ( ! validQualifiedIdentifier ( component ) ) { throw new IllegalArgumentException ( "component must be a valid qualified identifier" ) ; } else if ( name == null ) { throw new NullPointerException ( "name cannot be null" ) ; } else if ( ! validIdentifier ( name ) ) { throw new IllegalArgumentException ( "name must be a valid identifier" ) ; } Entry entry = ( Entry ) entries . get ( component + '.' + name ) ; if ( entry == null || entry . isPrivate ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "entry for component {}, name {} not found in {}" , component , name , this ) ; } oopsNoSuchEntry ( "entry not found for component " + component + ", name " + name ) ; } ConfigurationException configEx ; try { Class result = entry . resolve ( null ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}, component {}, name {}: returns {}" , this , component , name , result ) ; } return result ; } catch ( ConfigurationException e ) { configEx = e ; } catch ( RuntimeException e ) { String description = "problem getting type of entry for component " + component + ", name " + name ; configEx = null ; try { oops ( description , entry . lineno , entry . override , e ) ; } catch ( ConfigurationException ce ) { configEx = ce ; } } if ( logger . isDebugEnabled ( ) ) { String message = LogUtils . format ( this . getClass ( ) , "getEntryType" , "{}, component {}, name {}: throws" , this , component , name ) ; } throw configEx ; }
public Class getEntryType ( String component , String name ) throws ConfigurationException { if ( component == null ) { throw new NullPointerException ( "component cannot be null" ) ; } else if ( ! validQualifiedIdentifier ( component ) ) { throw new IllegalArgumentException ( "component must be a valid qualified identifier" ) ; } else if ( name == null ) { throw new NullPointerException ( "name cannot be null" ) ; } else if ( ! validIdentifier ( name ) ) { throw new IllegalArgumentException ( "name must be a valid identifier" ) ; } Entry entry = ( Entry ) entries . get ( component + '.' + name ) ; if ( entry == null || entry . isPrivate ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "entry for component {}, name {} not found in {}" , component , name , this ) ; } oopsNoSuchEntry ( "entry not found for component " + component + ", name " + name ) ; } ConfigurationException configEx ; try { Class result = entry . resolve ( null ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}, component {}, name {}: returns {}" , this , component , name , result ) ; } return result ; } catch ( ConfigurationException e ) { configEx = e ; } catch ( RuntimeException e ) { String description = "problem getting type of entry for component " + component + ", name " + name ; configEx = null ; try { oops ( description , entry . lineno , entry . override , e ) ; } catch ( ConfigurationException ce ) { configEx = ce ; } } if ( logger . isDebugEnabled ( ) ) { String message = LogUtils . format ( this . getClass ( ) , "getEntryType" , "{}, component {}, name {}: throws" , this , component , name ) ; logger . debug ( message , configEx ) ; } throw configEx ; }
407
public void registerTransient ( long executionId , ExportTransientUserData transientUserData , String cosBucketPathPrefix , String fhirResourceType , boolean isExportPublic ) throws Exception { if ( transientUserData == null ) { throw new Exception ( "registerTransient: chunkData is null, this should never happen!" ) ; } this . executionId = executionId ; this . chunkData = transientUserData ; this . cosBucketPathPrefix = cosBucketPathPrefix ; this . fhirResourceType = fhirResourceType ; this . isExportPublic = isExportPublic ; }
public void registerTransient ( long executionId , ExportTransientUserData transientUserData , String cosBucketPathPrefix , String fhirResourceType , boolean isExportPublic ) throws Exception { if ( transientUserData == null ) { logger . warning ( "registerTransient: chunkData is null, this should never happen!" ) ; throw new Exception ( "registerTransient: chunkData is null, this should never happen!" ) ; } this . executionId = executionId ; this . chunkData = transientUserData ; this . cosBucketPathPrefix = cosBucketPathPrefix ; this . fhirResourceType = fhirResourceType ; this . isExportPublic = isExportPublic ; }
408
public void run ( ) { CQSControllerServlet . valueAccumulator . initializeAllCounters ( ) ; boolean brokenJedis = false ; long ts1 = System . currentTimeMillis ( ) ; ShardedJedis jedis = getResource ( ) ; try { jedis . del ( queueUrl + "-" + shard + "-Q" ) ; jedis . del ( queueUrl + "-" + shard + "-H" ) ; jedis . del ( queueUrl + "-" + shard + "-R" ) ; String previousReceiptHandle = null ; List < CQSMessage > messages = persistenceStorage . peekQueue ( queueUrl , shard , null , null , 1000 ) ; int totalCached = 0 ; while ( messages . size ( ) != 0 ) { for ( CQSMessage message : messages ) { addMessageToCache ( queueUrl , shard , message , jedis ) ; totalCached ++ ; } previousReceiptHandle = messages . get ( messages . size ( ) - 1 ) . getMessageId ( ) ; messages = persistenceStorage . peekQueue ( queueUrl , shard , previousReceiptHandle , null , 1000 ) ; } setCacheState ( queueUrl , shard , QCacheState . OK , null , false ) ; setCacheFillerProcessing ( queueUrl , shard , 0 ) ; long ts3 = System . currentTimeMillis ( ) ; logger . info ( "event=cache_filler_finished queue_url=" + queueUrl + " shard=" + shard + " num_cached=" + totalCached + " total_ms=" + ( ts3 - ts1 ) + " redis_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . RedisTime ) + " cass_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . CassandraTime ) ) ; } catch ( Exception e ) { if ( e instanceof JedisException ) { brokenJedis = true ; } logger . error ( "event=cache_filler_failed" , e ) ; trySettingCacheState ( queueUrl , shard , QCacheState . Unavailable ) ; } finally { returnResource ( jedis , brokenJedis ) ; CQSControllerServlet . valueAccumulator . deleteAllCounters ( ) ; } }
public void run ( ) { CQSControllerServlet . valueAccumulator . initializeAllCounters ( ) ; boolean brokenJedis = false ; long ts1 = System . currentTimeMillis ( ) ; ShardedJedis jedis = getResource ( ) ; try { logger . info ( "event=cache_filler_started queue_url=" + queueUrl + " shard=" + shard ) ; jedis . del ( queueUrl + "-" + shard + "-Q" ) ; jedis . del ( queueUrl + "-" + shard + "-H" ) ; jedis . del ( queueUrl + "-" + shard + "-R" ) ; String previousReceiptHandle = null ; List < CQSMessage > messages = persistenceStorage . peekQueue ( queueUrl , shard , null , null , 1000 ) ; int totalCached = 0 ; while ( messages . size ( ) != 0 ) { for ( CQSMessage message : messages ) { addMessageToCache ( queueUrl , shard , message , jedis ) ; totalCached ++ ; } previousReceiptHandle = messages . get ( messages . size ( ) - 1 ) . getMessageId ( ) ; messages = persistenceStorage . peekQueue ( queueUrl , shard , previousReceiptHandle , null , 1000 ) ; } setCacheState ( queueUrl , shard , QCacheState . OK , null , false ) ; setCacheFillerProcessing ( queueUrl , shard , 0 ) ; long ts3 = System . currentTimeMillis ( ) ; logger . info ( "event=cache_filler_finished queue_url=" + queueUrl + " shard=" + shard + " num_cached=" + totalCached + " total_ms=" + ( ts3 - ts1 ) + " redis_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . RedisTime ) + " cass_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . CassandraTime ) ) ; } catch ( Exception e ) { if ( e instanceof JedisException ) { brokenJedis = true ; } logger . error ( "event=cache_filler_failed" , e ) ; trySettingCacheState ( queueUrl , shard , QCacheState . Unavailable ) ; } finally { returnResource ( jedis , brokenJedis ) ; CQSControllerServlet . valueAccumulator . deleteAllCounters ( ) ; } }
409
public void run ( ) { CQSControllerServlet . valueAccumulator . initializeAllCounters ( ) ; boolean brokenJedis = false ; long ts1 = System . currentTimeMillis ( ) ; ShardedJedis jedis = getResource ( ) ; try { logger . info ( "event=cache_filler_started queue_url=" + queueUrl + " shard=" + shard ) ; jedis . del ( queueUrl + "-" + shard + "-Q" ) ; jedis . del ( queueUrl + "-" + shard + "-H" ) ; jedis . del ( queueUrl + "-" + shard + "-R" ) ; String previousReceiptHandle = null ; List < CQSMessage > messages = persistenceStorage . peekQueue ( queueUrl , shard , null , null , 1000 ) ; int totalCached = 0 ; while ( messages . size ( ) != 0 ) { for ( CQSMessage message : messages ) { addMessageToCache ( queueUrl , shard , message , jedis ) ; totalCached ++ ; } previousReceiptHandle = messages . get ( messages . size ( ) - 1 ) . getMessageId ( ) ; messages = persistenceStorage . peekQueue ( queueUrl , shard , previousReceiptHandle , null , 1000 ) ; } setCacheState ( queueUrl , shard , QCacheState . OK , null , false ) ; setCacheFillerProcessing ( queueUrl , shard , 0 ) ; long ts3 = System . currentTimeMillis ( ) ; } catch ( Exception e ) { if ( e instanceof JedisException ) { brokenJedis = true ; } logger . error ( "event=cache_filler_failed" , e ) ; trySettingCacheState ( queueUrl , shard , QCacheState . Unavailable ) ; } finally { returnResource ( jedis , brokenJedis ) ; CQSControllerServlet . valueAccumulator . deleteAllCounters ( ) ; } }
public void run ( ) { CQSControllerServlet . valueAccumulator . initializeAllCounters ( ) ; boolean brokenJedis = false ; long ts1 = System . currentTimeMillis ( ) ; ShardedJedis jedis = getResource ( ) ; try { logger . info ( "event=cache_filler_started queue_url=" + queueUrl + " shard=" + shard ) ; jedis . del ( queueUrl + "-" + shard + "-Q" ) ; jedis . del ( queueUrl + "-" + shard + "-H" ) ; jedis . del ( queueUrl + "-" + shard + "-R" ) ; String previousReceiptHandle = null ; List < CQSMessage > messages = persistenceStorage . peekQueue ( queueUrl , shard , null , null , 1000 ) ; int totalCached = 0 ; while ( messages . size ( ) != 0 ) { for ( CQSMessage message : messages ) { addMessageToCache ( queueUrl , shard , message , jedis ) ; totalCached ++ ; } previousReceiptHandle = messages . get ( messages . size ( ) - 1 ) . getMessageId ( ) ; messages = persistenceStorage . peekQueue ( queueUrl , shard , previousReceiptHandle , null , 1000 ) ; } setCacheState ( queueUrl , shard , QCacheState . OK , null , false ) ; setCacheFillerProcessing ( queueUrl , shard , 0 ) ; long ts3 = System . currentTimeMillis ( ) ; logger . info ( "event=cache_filler_finished queue_url=" + queueUrl + " shard=" + shard + " num_cached=" + totalCached + " total_ms=" + ( ts3 - ts1 ) + " redis_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . RedisTime ) + " cass_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . CassandraTime ) ) ; } catch ( Exception e ) { if ( e instanceof JedisException ) { brokenJedis = true ; } logger . error ( "event=cache_filler_failed" , e ) ; trySettingCacheState ( queueUrl , shard , QCacheState . Unavailable ) ; } finally { returnResource ( jedis , brokenJedis ) ; CQSControllerServlet . valueAccumulator . deleteAllCounters ( ) ; } }
410
public void run ( ) { CQSControllerServlet . valueAccumulator . initializeAllCounters ( ) ; boolean brokenJedis = false ; long ts1 = System . currentTimeMillis ( ) ; ShardedJedis jedis = getResource ( ) ; try { logger . info ( "event=cache_filler_started queue_url=" + queueUrl + " shard=" + shard ) ; jedis . del ( queueUrl + "-" + shard + "-Q" ) ; jedis . del ( queueUrl + "-" + shard + "-H" ) ; jedis . del ( queueUrl + "-" + shard + "-R" ) ; String previousReceiptHandle = null ; List < CQSMessage > messages = persistenceStorage . peekQueue ( queueUrl , shard , null , null , 1000 ) ; int totalCached = 0 ; while ( messages . size ( ) != 0 ) { for ( CQSMessage message : messages ) { addMessageToCache ( queueUrl , shard , message , jedis ) ; totalCached ++ ; } previousReceiptHandle = messages . get ( messages . size ( ) - 1 ) . getMessageId ( ) ; messages = persistenceStorage . peekQueue ( queueUrl , shard , previousReceiptHandle , null , 1000 ) ; } setCacheState ( queueUrl , shard , QCacheState . OK , null , false ) ; setCacheFillerProcessing ( queueUrl , shard , 0 ) ; long ts3 = System . currentTimeMillis ( ) ; logger . info ( "event=cache_filler_finished queue_url=" + queueUrl + " shard=" + shard + " num_cached=" + totalCached + " total_ms=" + ( ts3 - ts1 ) + " redis_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . RedisTime ) + " cass_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . CassandraTime ) ) ; } catch ( Exception e ) { if ( e instanceof JedisException ) { brokenJedis = true ; } trySettingCacheState ( queueUrl , shard , QCacheState . Unavailable ) ; } finally { returnResource ( jedis , brokenJedis ) ; CQSControllerServlet . valueAccumulator . deleteAllCounters ( ) ; } }
public void run ( ) { CQSControllerServlet . valueAccumulator . initializeAllCounters ( ) ; boolean brokenJedis = false ; long ts1 = System . currentTimeMillis ( ) ; ShardedJedis jedis = getResource ( ) ; try { logger . info ( "event=cache_filler_started queue_url=" + queueUrl + " shard=" + shard ) ; jedis . del ( queueUrl + "-" + shard + "-Q" ) ; jedis . del ( queueUrl + "-" + shard + "-H" ) ; jedis . del ( queueUrl + "-" + shard + "-R" ) ; String previousReceiptHandle = null ; List < CQSMessage > messages = persistenceStorage . peekQueue ( queueUrl , shard , null , null , 1000 ) ; int totalCached = 0 ; while ( messages . size ( ) != 0 ) { for ( CQSMessage message : messages ) { addMessageToCache ( queueUrl , shard , message , jedis ) ; totalCached ++ ; } previousReceiptHandle = messages . get ( messages . size ( ) - 1 ) . getMessageId ( ) ; messages = persistenceStorage . peekQueue ( queueUrl , shard , previousReceiptHandle , null , 1000 ) ; } setCacheState ( queueUrl , shard , QCacheState . OK , null , false ) ; setCacheFillerProcessing ( queueUrl , shard , 0 ) ; long ts3 = System . currentTimeMillis ( ) ; logger . info ( "event=cache_filler_finished queue_url=" + queueUrl + " shard=" + shard + " num_cached=" + totalCached + " total_ms=" + ( ts3 - ts1 ) + " redis_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . RedisTime ) + " cass_ms=" + CQSControllerServlet . valueAccumulator . getCounter ( AccumulatorName . CassandraTime ) ) ; } catch ( Exception e ) { if ( e instanceof JedisException ) { brokenJedis = true ; } logger . error ( "event=cache_filler_failed" , e ) ; trySettingCacheState ( queueUrl , shard , QCacheState . Unavailable ) ; } finally { returnResource ( jedis , brokenJedis ) ; CQSControllerServlet . valueAccumulator . deleteAllCounters ( ) ; } }
411
protected void setupOutput ( final Job job , final SampleDataForSplitPoints operation , final Store store ) throws IOException { job . setOutputFormatClass ( SequenceFileOutputFormat . class ) ; SequenceFileOutputFormat . setOutputPath ( job , new Path ( operation . getOutputPath ( ) ) ) ; if ( null != operation . getCompressionCodec ( ) ) { if ( GzipCodec . class . isAssignableFrom ( operation . getCompressionCodec ( ) ) && ! NativeCodeLoader . isNativeCodeLoaded ( ) && ! ZlibFactory . isNativeZlibLoaded ( job . getConfiguration ( ) ) ) { } else { SequenceFileOutputFormat . setCompressOutput ( job , true ) ; SequenceFileOutputFormat . setOutputCompressorClass ( job , operation . getCompressionCodec ( ) ) ; SequenceFileOutputFormat . setOutputCompressionType ( job , SequenceFile . CompressionType . BLOCK ) ; } } }
protected void setupOutput ( final Job job , final SampleDataForSplitPoints operation , final Store store ) throws IOException { job . setOutputFormatClass ( SequenceFileOutputFormat . class ) ; SequenceFileOutputFormat . setOutputPath ( job , new Path ( operation . getOutputPath ( ) ) ) ; if ( null != operation . getCompressionCodec ( ) ) { if ( GzipCodec . class . isAssignableFrom ( operation . getCompressionCodec ( ) ) && ! NativeCodeLoader . isNativeCodeLoaded ( ) && ! ZlibFactory . isNativeZlibLoaded ( job . getConfiguration ( ) ) ) { LOGGER . warn ( "SequenceFile doesn't work with GzipCodec without native-hadoop code!" ) ; } else { SequenceFileOutputFormat . setCompressOutput ( job , true ) ; SequenceFileOutputFormat . setOutputCompressorClass ( job , operation . getCompressionCodec ( ) ) ; SequenceFileOutputFormat . setOutputCompressionType ( job , SequenceFile . CompressionType . BLOCK ) ; } } }
412
public static void shutdownChannel ( ManagedChannel channel , ILogger logger , long timeout ) throws InterruptedException { if ( ! channel . shutdown ( ) . awaitTermination ( timeout , SECONDS ) ) { if ( ! channel . shutdownNow ( ) . awaitTermination ( 1 , SECONDS ) ) { logger . info ( "gRPC client has not shut down on time, even after forceful shutdown" ) ; } } }
public static void shutdownChannel ( ManagedChannel channel , ILogger logger , long timeout ) throws InterruptedException { if ( ! channel . shutdown ( ) . awaitTermination ( timeout , SECONDS ) ) { logger . info ( "gRPC client has not shut down within " + timeout + " seconds, you can override the timeout " + "by setting the `jet.grpc.shutdown.timeout.seconds` system property" ) ; if ( ! channel . shutdownNow ( ) . awaitTermination ( 1 , SECONDS ) ) { logger . info ( "gRPC client has not shut down on time, even after forceful shutdown" ) ; } } }
413
public static void shutdownChannel ( ManagedChannel channel , ILogger logger , long timeout ) throws InterruptedException { if ( ! channel . shutdown ( ) . awaitTermination ( timeout , SECONDS ) ) { logger . info ( "gRPC client has not shut down within " + timeout + " seconds, you can override the timeout " + "by setting the `jet.grpc.shutdown.timeout.seconds` system property" ) ; if ( ! channel . shutdownNow ( ) . awaitTermination ( 1 , SECONDS ) ) { } } }
public static void shutdownChannel ( ManagedChannel channel , ILogger logger , long timeout ) throws InterruptedException { if ( ! channel . shutdown ( ) . awaitTermination ( timeout , SECONDS ) ) { logger . info ( "gRPC client has not shut down within " + timeout + " seconds, you can override the timeout " + "by setting the `jet.grpc.shutdown.timeout.seconds` system property" ) ; if ( ! channel . shutdownNow ( ) . awaitTermination ( 1 , SECONDS ) ) { logger . info ( "gRPC client has not shut down on time, even after forceful shutdown" ) ; } } }
414
public void membershipFailure ( String reason , Throwable t ) { dm . exceptionInThreads = true ; Throwable cause = t ; if ( cause != null && ! ( cause instanceof ForcedDisconnectException ) ) { cause = new ForcedDisconnectException ( cause . getMessage ( ) ) ; } dm . setRootCause ( cause ) ; try { List < MembershipTestHook > testHooks = dm . getMembershipTestHooks ( ) ; if ( testHooks != null ) { for ( final MembershipTestHook testHook : testHooks ) { testHook . beforeMembershipFailure ( reason , cause ) ; } } dm . getSystem ( ) . disconnect ( reason , true ) ; testHooks = dm . getMembershipTestHooks ( ) ; if ( testHooks != null ) { for ( final MembershipTestHook testHook : testHooks ) { testHook . afterMembershipFailure ( reason , cause ) ; } } } catch ( RuntimeException re ) { logger . warn ( "Exception caught while shutting down" , re ) ; } }
public void membershipFailure ( String reason , Throwable t ) { dm . exceptionInThreads = true ; Throwable cause = t ; if ( cause != null && ! ( cause instanceof ForcedDisconnectException ) ) { logger . info ( "cluster membership failed due to " , cause ) ; cause = new ForcedDisconnectException ( cause . getMessage ( ) ) ; } dm . setRootCause ( cause ) ; try { List < MembershipTestHook > testHooks = dm . getMembershipTestHooks ( ) ; if ( testHooks != null ) { for ( final MembershipTestHook testHook : testHooks ) { testHook . beforeMembershipFailure ( reason , cause ) ; } } dm . getSystem ( ) . disconnect ( reason , true ) ; testHooks = dm . getMembershipTestHooks ( ) ; if ( testHooks != null ) { for ( final MembershipTestHook testHook : testHooks ) { testHook . afterMembershipFailure ( reason , cause ) ; } } } catch ( RuntimeException re ) { logger . warn ( "Exception caught while shutting down" , re ) ; } }
415
public void membershipFailure ( String reason , Throwable t ) { dm . exceptionInThreads = true ; Throwable cause = t ; if ( cause != null && ! ( cause instanceof ForcedDisconnectException ) ) { logger . info ( "cluster membership failed due to " , cause ) ; cause = new ForcedDisconnectException ( cause . getMessage ( ) ) ; } dm . setRootCause ( cause ) ; try { List < MembershipTestHook > testHooks = dm . getMembershipTestHooks ( ) ; if ( testHooks != null ) { for ( final MembershipTestHook testHook : testHooks ) { testHook . beforeMembershipFailure ( reason , cause ) ; } } dm . getSystem ( ) . disconnect ( reason , true ) ; testHooks = dm . getMembershipTestHooks ( ) ; if ( testHooks != null ) { for ( final MembershipTestHook testHook : testHooks ) { testHook . afterMembershipFailure ( reason , cause ) ; } } } catch ( RuntimeException re ) { } }
public void membershipFailure ( String reason , Throwable t ) { dm . exceptionInThreads = true ; Throwable cause = t ; if ( cause != null && ! ( cause instanceof ForcedDisconnectException ) ) { logger . info ( "cluster membership failed due to " , cause ) ; cause = new ForcedDisconnectException ( cause . getMessage ( ) ) ; } dm . setRootCause ( cause ) ; try { List < MembershipTestHook > testHooks = dm . getMembershipTestHooks ( ) ; if ( testHooks != null ) { for ( final MembershipTestHook testHook : testHooks ) { testHook . beforeMembershipFailure ( reason , cause ) ; } } dm . getSystem ( ) . disconnect ( reason , true ) ; testHooks = dm . getMembershipTestHooks ( ) ; if ( testHooks != null ) { for ( final MembershipTestHook testHook : testHooks ) { testHook . afterMembershipFailure ( reason , cause ) ; } } } catch ( RuntimeException re ) { logger . warn ( "Exception caught while shutting down" , re ) ; } }
416
public synchronized void suspend ( ) throws JobException { try { if ( ! this . suspended ) { this . closeSessions ( ) ; this . suspended = true ; } } catch ( CPMException e ) { throw new JobException ( e ) ; } }
public synchronized void suspend ( ) throws JobException { try { if ( ! this . suspended ) { this . closeSessions ( ) ; log . debug ( "Job message processing suspended" ) ; this . suspended = true ; } } catch ( CPMException e ) { throw new JobException ( e ) ; } }
417
public StateUpdate execute ( IntegrationDeployment integrationDeployment ) { Map < String , String > stepsDone = new HashMap < > ( integrationDeployment . getStepsDone ( ) ) ; stepsDone . remove ( "deploy" ) ; IntegrationDeploymentState currentState = IntegrationDeploymentState . Pending ; Map < String , String > labels = new HashMap < > ( ) ; labels . put ( OpenShiftService . INTEGRATION_ID_LABEL , Labels . validate ( integrationDeployment . getIntegrationId ( ) . get ( ) ) ) ; labels . put ( OpenShiftService . DEPLOYMENT_VERSION_LABEL , String . valueOf ( integrationDeployment . getVersion ( ) ) ) ; List < DeploymentConfig > deployments = getOpenShiftService ( ) . getDeploymentsByLabel ( labels ) ; Boolean isDeployed = ! deployments . stream ( ) . filter ( d -> d . getSpec ( ) . getReplicas ( ) != 0 ) . collect ( Collectors . toList ( ) ) . isEmpty ( ) ; if ( isDeployed ) { try { getOpenShiftService ( ) . scale ( integrationDeployment . getSpec ( ) . getName ( ) , labels , 0 , 1 , TimeUnit . MINUTES ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; return new StateUpdate ( currentState , stepsDone ) ; } } Boolean isUndeployed = ! deployments . stream ( ) . filter ( d -> d . getStatus ( ) . getAvailableReplicas ( ) == 0 ) . collect ( Collectors . toList ( ) ) . isEmpty ( ) ; if ( isUndeployed ) { currentState = IntegrationDeploymentState . Unpublished ; } return new StateUpdate ( currentState , stepsDone ) ; }
public StateUpdate execute ( IntegrationDeployment integrationDeployment ) { Map < String , String > stepsDone = new HashMap < > ( integrationDeployment . getStepsDone ( ) ) ; stepsDone . remove ( "deploy" ) ; IntegrationDeploymentState currentState = IntegrationDeploymentState . Pending ; Map < String , String > labels = new HashMap < > ( ) ; labels . put ( OpenShiftService . INTEGRATION_ID_LABEL , Labels . validate ( integrationDeployment . getIntegrationId ( ) . get ( ) ) ) ; labels . put ( OpenShiftService . DEPLOYMENT_VERSION_LABEL , String . valueOf ( integrationDeployment . getVersion ( ) ) ) ; List < DeploymentConfig > deployments = getOpenShiftService ( ) . getDeploymentsByLabel ( labels ) ; Boolean isDeployed = ! deployments . stream ( ) . filter ( d -> d . getSpec ( ) . getReplicas ( ) != 0 ) . collect ( Collectors . toList ( ) ) . isEmpty ( ) ; if ( isDeployed ) { try { LOG . info ( "Undeploying integration deployment:{} version:{}" , integrationDeployment . getSpec ( ) . getName ( ) , integrationDeployment . getVersion ( ) ) ; getOpenShiftService ( ) . scale ( integrationDeployment . getSpec ( ) . getName ( ) , labels , 0 , 1 , TimeUnit . MINUTES ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; return new StateUpdate ( currentState , stepsDone ) ; } } Boolean isUndeployed = ! deployments . stream ( ) . filter ( d -> d . getStatus ( ) . getAvailableReplicas ( ) == 0 ) . collect ( Collectors . toList ( ) ) . isEmpty ( ) ; if ( isUndeployed ) { currentState = IntegrationDeploymentState . Unpublished ; } return new StateUpdate ( currentState , stepsDone ) ; }
418
private void readAccessTokens ( JsonReader reader ) throws IOException { reader . beginArray ( ) ; while ( reader . hasNext ( ) ) { OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity ( ) ; reader . beginObject ( ) ; Long currentId = null ; String clientId = null ; Long authHolderId = null ; Long refreshTokenId = null ; while ( reader . hasNext ( ) ) { switch ( reader . peek ( ) ) { case END_OBJECT : continue ; case NAME : String name = reader . nextName ( ) ; if ( reader . peek ( ) == JsonToken . NULL ) { reader . skipValue ( ) ; } else if ( name . equals ( ID ) ) { currentId = reader . nextLong ( ) ; } else if ( name . equals ( EXPIRATION ) ) { Date date = utcToDate ( reader . nextString ( ) ) ; token . setExpiration ( date ) ; } else if ( name . equals ( VALUE ) ) { String value = reader . nextString ( ) ; try { token . setJwt ( JWTParser . parse ( value ) ) ; } catch ( ParseException ex ) { } } else if ( name . equals ( CLIENT_ID ) ) { clientId = reader . nextString ( ) ; } else if ( name . equals ( AUTHENTICATION_HOLDER_ID ) ) { authHolderId = reader . nextLong ( ) ; } else if ( name . equals ( REFRESH_TOKEN_ID ) ) { refreshTokenId = reader . nextLong ( ) ; } else if ( name . equals ( SCOPE ) ) { Set < String > scope = readSet ( reader ) ; token . setScope ( scope ) ; } else if ( name . equals ( TYPE ) ) { token . setTokenType ( reader . nextString ( ) ) ; } else { logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; } break ; default : logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; continue ; } } reader . endObject ( ) ; Long newId = tokenRepository . saveAccessToken ( token ) . getId ( ) ; maps . getAccessTokenToClientRefs ( ) . put ( currentId , clientId ) ; maps . getAccessTokenToAuthHolderRefs ( ) . put ( currentId , authHolderId ) ; if ( refreshTokenId != null ) { maps . getAccessTokenToRefreshTokenRefs ( ) . put ( currentId , refreshTokenId ) ; } maps . getAccessTokenOldToNewIdMap ( ) . put ( currentId , newId ) ; logger . debug ( "Read access token {}" , currentId ) ; } reader . endArray ( ) ; logger . info ( "Done reading access tokens" ) ; }
private void readAccessTokens ( JsonReader reader ) throws IOException { reader . beginArray ( ) ; while ( reader . hasNext ( ) ) { OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity ( ) ; reader . beginObject ( ) ; Long currentId = null ; String clientId = null ; Long authHolderId = null ; Long refreshTokenId = null ; while ( reader . hasNext ( ) ) { switch ( reader . peek ( ) ) { case END_OBJECT : continue ; case NAME : String name = reader . nextName ( ) ; if ( reader . peek ( ) == JsonToken . NULL ) { reader . skipValue ( ) ; } else if ( name . equals ( ID ) ) { currentId = reader . nextLong ( ) ; } else if ( name . equals ( EXPIRATION ) ) { Date date = utcToDate ( reader . nextString ( ) ) ; token . setExpiration ( date ) ; } else if ( name . equals ( VALUE ) ) { String value = reader . nextString ( ) ; try { token . setJwt ( JWTParser . parse ( value ) ) ; } catch ( ParseException ex ) { logger . error ( "Unable to set refresh token value to {}" , value , ex ) ; } } else if ( name . equals ( CLIENT_ID ) ) { clientId = reader . nextString ( ) ; } else if ( name . equals ( AUTHENTICATION_HOLDER_ID ) ) { authHolderId = reader . nextLong ( ) ; } else if ( name . equals ( REFRESH_TOKEN_ID ) ) { refreshTokenId = reader . nextLong ( ) ; } else if ( name . equals ( SCOPE ) ) { Set < String > scope = readSet ( reader ) ; token . setScope ( scope ) ; } else if ( name . equals ( TYPE ) ) { token . setTokenType ( reader . nextString ( ) ) ; } else { logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; } break ; default : logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; continue ; } } reader . endObject ( ) ; Long newId = tokenRepository . saveAccessToken ( token ) . getId ( ) ; maps . getAccessTokenToClientRefs ( ) . put ( currentId , clientId ) ; maps . getAccessTokenToAuthHolderRefs ( ) . put ( currentId , authHolderId ) ; if ( refreshTokenId != null ) { maps . getAccessTokenToRefreshTokenRefs ( ) . put ( currentId , refreshTokenId ) ; } maps . getAccessTokenOldToNewIdMap ( ) . put ( currentId , newId ) ; logger . debug ( "Read access token {}" , currentId ) ; } reader . endArray ( ) ; logger . info ( "Done reading access tokens" ) ; }
419
private void readAccessTokens ( JsonReader reader ) throws IOException { reader . beginArray ( ) ; while ( reader . hasNext ( ) ) { OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity ( ) ; reader . beginObject ( ) ; Long currentId = null ; String clientId = null ; Long authHolderId = null ; Long refreshTokenId = null ; while ( reader . hasNext ( ) ) { switch ( reader . peek ( ) ) { case END_OBJECT : continue ; case NAME : String name = reader . nextName ( ) ; if ( reader . peek ( ) == JsonToken . NULL ) { reader . skipValue ( ) ; } else if ( name . equals ( ID ) ) { currentId = reader . nextLong ( ) ; } else if ( name . equals ( EXPIRATION ) ) { Date date = utcToDate ( reader . nextString ( ) ) ; token . setExpiration ( date ) ; } else if ( name . equals ( VALUE ) ) { String value = reader . nextString ( ) ; try { token . setJwt ( JWTParser . parse ( value ) ) ; } catch ( ParseException ex ) { logger . error ( "Unable to set refresh token value to {}" , value , ex ) ; } } else if ( name . equals ( CLIENT_ID ) ) { clientId = reader . nextString ( ) ; } else if ( name . equals ( AUTHENTICATION_HOLDER_ID ) ) { authHolderId = reader . nextLong ( ) ; } else if ( name . equals ( REFRESH_TOKEN_ID ) ) { refreshTokenId = reader . nextLong ( ) ; } else if ( name . equals ( SCOPE ) ) { Set < String > scope = readSet ( reader ) ; token . setScope ( scope ) ; } else if ( name . equals ( TYPE ) ) { token . setTokenType ( reader . nextString ( ) ) ; } else { logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; } break ; default : logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; continue ; } } reader . endObject ( ) ; Long newId = tokenRepository . saveAccessToken ( token ) . getId ( ) ; maps . getAccessTokenToClientRefs ( ) . put ( currentId , clientId ) ; maps . getAccessTokenToAuthHolderRefs ( ) . put ( currentId , authHolderId ) ; if ( refreshTokenId != null ) { maps . getAccessTokenToRefreshTokenRefs ( ) . put ( currentId , refreshTokenId ) ; } maps . getAccessTokenOldToNewIdMap ( ) . put ( currentId , newId ) ; } reader . endArray ( ) ; logger . info ( "Done reading access tokens" ) ; }
private void readAccessTokens ( JsonReader reader ) throws IOException { reader . beginArray ( ) ; while ( reader . hasNext ( ) ) { OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity ( ) ; reader . beginObject ( ) ; Long currentId = null ; String clientId = null ; Long authHolderId = null ; Long refreshTokenId = null ; while ( reader . hasNext ( ) ) { switch ( reader . peek ( ) ) { case END_OBJECT : continue ; case NAME : String name = reader . nextName ( ) ; if ( reader . peek ( ) == JsonToken . NULL ) { reader . skipValue ( ) ; } else if ( name . equals ( ID ) ) { currentId = reader . nextLong ( ) ; } else if ( name . equals ( EXPIRATION ) ) { Date date = utcToDate ( reader . nextString ( ) ) ; token . setExpiration ( date ) ; } else if ( name . equals ( VALUE ) ) { String value = reader . nextString ( ) ; try { token . setJwt ( JWTParser . parse ( value ) ) ; } catch ( ParseException ex ) { logger . error ( "Unable to set refresh token value to {}" , value , ex ) ; } } else if ( name . equals ( CLIENT_ID ) ) { clientId = reader . nextString ( ) ; } else if ( name . equals ( AUTHENTICATION_HOLDER_ID ) ) { authHolderId = reader . nextLong ( ) ; } else if ( name . equals ( REFRESH_TOKEN_ID ) ) { refreshTokenId = reader . nextLong ( ) ; } else if ( name . equals ( SCOPE ) ) { Set < String > scope = readSet ( reader ) ; token . setScope ( scope ) ; } else if ( name . equals ( TYPE ) ) { token . setTokenType ( reader . nextString ( ) ) ; } else { logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; } break ; default : logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; continue ; } } reader . endObject ( ) ; Long newId = tokenRepository . saveAccessToken ( token ) . getId ( ) ; maps . getAccessTokenToClientRefs ( ) . put ( currentId , clientId ) ; maps . getAccessTokenToAuthHolderRefs ( ) . put ( currentId , authHolderId ) ; if ( refreshTokenId != null ) { maps . getAccessTokenToRefreshTokenRefs ( ) . put ( currentId , refreshTokenId ) ; } maps . getAccessTokenOldToNewIdMap ( ) . put ( currentId , newId ) ; logger . debug ( "Read access token {}" , currentId ) ; } reader . endArray ( ) ; logger . info ( "Done reading access tokens" ) ; }
420
private void readAccessTokens ( JsonReader reader ) throws IOException { reader . beginArray ( ) ; while ( reader . hasNext ( ) ) { OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity ( ) ; reader . beginObject ( ) ; Long currentId = null ; String clientId = null ; Long authHolderId = null ; Long refreshTokenId = null ; while ( reader . hasNext ( ) ) { switch ( reader . peek ( ) ) { case END_OBJECT : continue ; case NAME : String name = reader . nextName ( ) ; if ( reader . peek ( ) == JsonToken . NULL ) { reader . skipValue ( ) ; } else if ( name . equals ( ID ) ) { currentId = reader . nextLong ( ) ; } else if ( name . equals ( EXPIRATION ) ) { Date date = utcToDate ( reader . nextString ( ) ) ; token . setExpiration ( date ) ; } else if ( name . equals ( VALUE ) ) { String value = reader . nextString ( ) ; try { token . setJwt ( JWTParser . parse ( value ) ) ; } catch ( ParseException ex ) { logger . error ( "Unable to set refresh token value to {}" , value , ex ) ; } } else if ( name . equals ( CLIENT_ID ) ) { clientId = reader . nextString ( ) ; } else if ( name . equals ( AUTHENTICATION_HOLDER_ID ) ) { authHolderId = reader . nextLong ( ) ; } else if ( name . equals ( REFRESH_TOKEN_ID ) ) { refreshTokenId = reader . nextLong ( ) ; } else if ( name . equals ( SCOPE ) ) { Set < String > scope = readSet ( reader ) ; token . setScope ( scope ) ; } else if ( name . equals ( TYPE ) ) { token . setTokenType ( reader . nextString ( ) ) ; } else { logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; } break ; default : logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; continue ; } } reader . endObject ( ) ; Long newId = tokenRepository . saveAccessToken ( token ) . getId ( ) ; maps . getAccessTokenToClientRefs ( ) . put ( currentId , clientId ) ; maps . getAccessTokenToAuthHolderRefs ( ) . put ( currentId , authHolderId ) ; if ( refreshTokenId != null ) { maps . getAccessTokenToRefreshTokenRefs ( ) . put ( currentId , refreshTokenId ) ; } maps . getAccessTokenOldToNewIdMap ( ) . put ( currentId , newId ) ; logger . debug ( "Read access token {}" , currentId ) ; } reader . endArray ( ) ; }
private void readAccessTokens ( JsonReader reader ) throws IOException { reader . beginArray ( ) ; while ( reader . hasNext ( ) ) { OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity ( ) ; reader . beginObject ( ) ; Long currentId = null ; String clientId = null ; Long authHolderId = null ; Long refreshTokenId = null ; while ( reader . hasNext ( ) ) { switch ( reader . peek ( ) ) { case END_OBJECT : continue ; case NAME : String name = reader . nextName ( ) ; if ( reader . peek ( ) == JsonToken . NULL ) { reader . skipValue ( ) ; } else if ( name . equals ( ID ) ) { currentId = reader . nextLong ( ) ; } else if ( name . equals ( EXPIRATION ) ) { Date date = utcToDate ( reader . nextString ( ) ) ; token . setExpiration ( date ) ; } else if ( name . equals ( VALUE ) ) { String value = reader . nextString ( ) ; try { token . setJwt ( JWTParser . parse ( value ) ) ; } catch ( ParseException ex ) { logger . error ( "Unable to set refresh token value to {}" , value , ex ) ; } } else if ( name . equals ( CLIENT_ID ) ) { clientId = reader . nextString ( ) ; } else if ( name . equals ( AUTHENTICATION_HOLDER_ID ) ) { authHolderId = reader . nextLong ( ) ; } else if ( name . equals ( REFRESH_TOKEN_ID ) ) { refreshTokenId = reader . nextLong ( ) ; } else if ( name . equals ( SCOPE ) ) { Set < String > scope = readSet ( reader ) ; token . setScope ( scope ) ; } else if ( name . equals ( TYPE ) ) { token . setTokenType ( reader . nextString ( ) ) ; } else { logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; } break ; default : logger . debug ( "Found unexpected entry" ) ; reader . skipValue ( ) ; continue ; } } reader . endObject ( ) ; Long newId = tokenRepository . saveAccessToken ( token ) . getId ( ) ; maps . getAccessTokenToClientRefs ( ) . put ( currentId , clientId ) ; maps . getAccessTokenToAuthHolderRefs ( ) . put ( currentId , authHolderId ) ; if ( refreshTokenId != null ) { maps . getAccessTokenToRefreshTokenRefs ( ) . put ( currentId , refreshTokenId ) ; } maps . getAccessTokenOldToNewIdMap ( ) . put ( currentId , newId ) ; logger . debug ( "Read access token {}" , currentId ) ; } reader . endArray ( ) ; logger . info ( "Done reading access tokens" ) ; }
421
public String getMessage ( Locale locale , String key ) { if ( log . isDebugEnabled ( ) ) { } String localeKey = localeKey ( locale ) ; String originalKey = messageKey ( localeKey , key ) ; String message = null ; message = findMessage ( locale , key , originalKey ) ; if ( message != null ) { return message ; } if ( mode == MODE_JSTL ) { } else if ( mode == MODE_RESOURCE_BUNDLE ) { if ( ! defaultLocale . equals ( locale ) ) { message = findMessage ( defaultLocale , key , originalKey ) ; } } else { if ( ! defaultLocale . equals ( locale ) ) { localeKey = localeKey ( defaultLocale ) ; message = findMessage ( localeKey , key , originalKey ) ; } } if ( message != null ) { return message ; } message = findMessage ( "" , key , originalKey ) ; if ( message != null ) { return message ; } if ( returnNull ) { return ( null ) ; } else { return ( "???" + messageKey ( locale , key ) + "???" ) ; } }
public String getMessage ( Locale locale , String key ) { if ( log . isDebugEnabled ( ) ) { log . debug ( "getMessage(" + locale + "," + key + ")" ) ; } String localeKey = localeKey ( locale ) ; String originalKey = messageKey ( localeKey , key ) ; String message = null ; message = findMessage ( locale , key , originalKey ) ; if ( message != null ) { return message ; } if ( mode == MODE_JSTL ) { } else if ( mode == MODE_RESOURCE_BUNDLE ) { if ( ! defaultLocale . equals ( locale ) ) { message = findMessage ( defaultLocale , key , originalKey ) ; } } else { if ( ! defaultLocale . equals ( locale ) ) { localeKey = localeKey ( defaultLocale ) ; message = findMessage ( localeKey , key , originalKey ) ; } } if ( message != null ) { return message ; } message = findMessage ( "" , key , originalKey ) ; if ( message != null ) { return message ; } if ( returnNull ) { return ( null ) ; } else { return ( "???" + messageKey ( locale , key ) + "???" ) ; } }
422
private Response _processRedirects ( Response response ) throws ConnectionClientException { if ( ! _followRedirects ) { return response ; } int count = 0 ; Response currentResponse = response ; while ( _responseHandler . isRedirect ( currentResponse ) && ( count < 3 ) ) { String location = currentResponse . getHeaderString ( HttpHeaders . LOCATION ) ; if ( StringUtil . isEmpty ( location ) ) { return currentResponse ; } if ( _logger . isDebugEnabled ( ) ) { } currentResponse . close ( ) ; try { Invocation . Builder builder = _createBuilder ( new URI ( location ) ) ; currentResponse = builder . get ( ) ; } catch ( URISyntaxException uriSyntaxException ) { throw new ConnectionClientException ( "Unable to redirect to location " + location , response . getStatus ( ) , uriSyntaxException ) ; } } return currentResponse ; }
private Response _processRedirects ( Response response ) throws ConnectionClientException { if ( ! _followRedirects ) { return response ; } int count = 0 ; Response currentResponse = response ; while ( _responseHandler . isRedirect ( currentResponse ) && ( count < 3 ) ) { String location = currentResponse . getHeaderString ( HttpHeaders . LOCATION ) ; if ( StringUtil . isEmpty ( location ) ) { return currentResponse ; } if ( _logger . isDebugEnabled ( ) ) { _logger . debug ( "Redirect {}# to {}" , count , location ) ; } currentResponse . close ( ) ; try { Invocation . Builder builder = _createBuilder ( new URI ( location ) ) ; currentResponse = builder . get ( ) ; } catch ( URISyntaxException uriSyntaxException ) { throw new ConnectionClientException ( "Unable to redirect to location " + location , response . getStatus ( ) , uriSyntaxException ) ; } } return currentResponse ; }
423
public void run ( ) { while ( ! shutdown . get ( ) ) { long startNanos = nanoTime ( ) ; long currentTimeMillis = currentTimeMillis ( ) ; updateTrackers ( currentTimeMillis ) ; if ( ! dirtyContainers . isEmpty ( ) ) { coordinatorUpdate ( ) ; persist ( currentTimeMillis ) ; } long elapsedNanos = nanoTime ( ) - startNanos ; if ( scanIntervalNanos > elapsedNanos ) { if ( dirtyContainers . isEmpty ( ) ) { sleepNanos ( WAIT_FOR_TEST_CONTAINERS_DELAY_NANOS - elapsedNanos ) ; } else { sleepNanos ( scanIntervalNanos - elapsedNanos ) ; } } else { } } }
public void run ( ) { while ( ! shutdown . get ( ) ) { long startNanos = nanoTime ( ) ; long currentTimeMillis = currentTimeMillis ( ) ; updateTrackers ( currentTimeMillis ) ; if ( ! dirtyContainers . isEmpty ( ) ) { coordinatorUpdate ( ) ; persist ( currentTimeMillis ) ; } long elapsedNanos = nanoTime ( ) - startNanos ; if ( scanIntervalNanos > elapsedNanos ) { if ( dirtyContainers . isEmpty ( ) ) { sleepNanos ( WAIT_FOR_TEST_CONTAINERS_DELAY_NANOS - elapsedNanos ) ; } else { sleepNanos ( scanIntervalNanos - elapsedNanos ) ; } } else { LOGGER . warn ( getName ( ) + ".run() took " + NANOSECONDS . toMillis ( elapsedNanos ) + " ms" ) ; } } }
424
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
425
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
426
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
427
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
428
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; }
protected void doFilter ( HttpServletRequest request , HttpServletResponse response , FilterChain chain ) throws IOException , ServletException { LOG . log ( Level . FINER , "GeoServerKeycloakFilter.doFilter ENTRY" ) ; LOG . log ( Level . FINEST , ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ) ; logHttpRequest ( Level . FINEST , request ) ; AuthResults authResults = loadAuthn ( request ) ; if ( ! authResults . hasAuthentication ( ) ) { authResults = getNewAuthn ( request , response ) ; } saveAuthn ( request , authResults ) ; request . setAttribute ( GeoServerSecurityFilter . AUTHENTICATION_ENTRY_POINT_HEADER , authResults ) ; LOG . log ( Level . FINER , "continuing filter chain" ) ; LOG . log ( Level . FINEST , chain . getClass ( ) . getCanonicalName ( ) ) ; chain . doFilter ( request , response ) ; logHttpResponse ( Level . FINEST , response ) ; LOG . log ( Level . FINEST , "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ) ; }
429
public int saveTestSuiteToSahiJobs ( ) { MapSqlParameterSource tcParameters = getGuidParameter ( ) ; LOGGER . debug ( "write the following values to 'sakuli_jobs': " + tcParameters . getValues ( ) + " ==> now execute ...." ) ; SimpleJdbcInsert insertTS = new SimpleJdbcInsert ( getDataSource ( ) ) . withTableName ( "sakuli_jobs" ) . usingGeneratedKeyColumns ( "id" ) ; testSuite . setDbJobPrimaryKey ( insertTS . executeAndReturnKey ( tcParameters ) . intValue ( ) ) ; LOGGER . info ( "the test suite \"" + testSuite . getId ( ) + "\"" + "with the guid \"" + testSuite . getGuid ( ) + "\" has been written to 'sakuli_jobs' with primaryKey=" + testSuite . getDbJobPrimaryKey ( ) ) ; return testSuite . getDbJobPrimaryKey ( ) ; }
public int saveTestSuiteToSahiJobs ( ) { LOGGER . debug ( "save the guid to the table 'sakuli_jobs'" ) ; MapSqlParameterSource tcParameters = getGuidParameter ( ) ; LOGGER . debug ( "write the following values to 'sakuli_jobs': " + tcParameters . getValues ( ) + " ==> now execute ...." ) ; SimpleJdbcInsert insertTS = new SimpleJdbcInsert ( getDataSource ( ) ) . withTableName ( "sakuli_jobs" ) . usingGeneratedKeyColumns ( "id" ) ; testSuite . setDbJobPrimaryKey ( insertTS . executeAndReturnKey ( tcParameters ) . intValue ( ) ) ; LOGGER . info ( "the test suite \"" + testSuite . getId ( ) + "\"" + "with the guid \"" + testSuite . getGuid ( ) + "\" has been written to 'sakuli_jobs' with primaryKey=" + testSuite . getDbJobPrimaryKey ( ) ) ; return testSuite . getDbJobPrimaryKey ( ) ; }
430
public int saveTestSuiteToSahiJobs ( ) { LOGGER . debug ( "save the guid to the table 'sakuli_jobs'" ) ; MapSqlParameterSource tcParameters = getGuidParameter ( ) ; SimpleJdbcInsert insertTS = new SimpleJdbcInsert ( getDataSource ( ) ) . withTableName ( "sakuli_jobs" ) . usingGeneratedKeyColumns ( "id" ) ; testSuite . setDbJobPrimaryKey ( insertTS . executeAndReturnKey ( tcParameters ) . intValue ( ) ) ; LOGGER . info ( "the test suite \"" + testSuite . getId ( ) + "\"" + "with the guid \"" + testSuite . getGuid ( ) + "\" has been written to 'sakuli_jobs' with primaryKey=" + testSuite . getDbJobPrimaryKey ( ) ) ; return testSuite . getDbJobPrimaryKey ( ) ; }
public int saveTestSuiteToSahiJobs ( ) { LOGGER . debug ( "save the guid to the table 'sakuli_jobs'" ) ; MapSqlParameterSource tcParameters = getGuidParameter ( ) ; LOGGER . debug ( "write the following values to 'sakuli_jobs': " + tcParameters . getValues ( ) + " ==> now execute ...." ) ; SimpleJdbcInsert insertTS = new SimpleJdbcInsert ( getDataSource ( ) ) . withTableName ( "sakuli_jobs" ) . usingGeneratedKeyColumns ( "id" ) ; testSuite . setDbJobPrimaryKey ( insertTS . executeAndReturnKey ( tcParameters ) . intValue ( ) ) ; LOGGER . info ( "the test suite \"" + testSuite . getId ( ) + "\"" + "with the guid \"" + testSuite . getGuid ( ) + "\" has been written to 'sakuli_jobs' with primaryKey=" + testSuite . getDbJobPrimaryKey ( ) ) ; return testSuite . getDbJobPrimaryKey ( ) ; }
431
public int saveTestSuiteToSahiJobs ( ) { LOGGER . debug ( "save the guid to the table 'sakuli_jobs'" ) ; MapSqlParameterSource tcParameters = getGuidParameter ( ) ; LOGGER . debug ( "write the following values to 'sakuli_jobs': " + tcParameters . getValues ( ) + " ==> now execute ...." ) ; SimpleJdbcInsert insertTS = new SimpleJdbcInsert ( getDataSource ( ) ) . withTableName ( "sakuli_jobs" ) . usingGeneratedKeyColumns ( "id" ) ; testSuite . setDbJobPrimaryKey ( insertTS . executeAndReturnKey ( tcParameters ) . intValue ( ) ) ; return testSuite . getDbJobPrimaryKey ( ) ; }
public int saveTestSuiteToSahiJobs ( ) { LOGGER . debug ( "save the guid to the table 'sakuli_jobs'" ) ; MapSqlParameterSource tcParameters = getGuidParameter ( ) ; LOGGER . debug ( "write the following values to 'sakuli_jobs': " + tcParameters . getValues ( ) + " ==> now execute ...." ) ; SimpleJdbcInsert insertTS = new SimpleJdbcInsert ( getDataSource ( ) ) . withTableName ( "sakuli_jobs" ) . usingGeneratedKeyColumns ( "id" ) ; testSuite . setDbJobPrimaryKey ( insertTS . executeAndReturnKey ( tcParameters ) . intValue ( ) ) ; LOGGER . info ( "the test suite \"" + testSuite . getId ( ) + "\"" + "with the guid \"" + testSuite . getGuid ( ) + "\" has been written to 'sakuli_jobs' with primaryKey=" + testSuite . getDbJobPrimaryKey ( ) ) ; return testSuite . getDbJobPrimaryKey ( ) ; }
432
public void accept ( SyntheticRunResult syntheticRunResult ) { String errorMessage = null ; long durationNanos = syntheticRunResult . durationNanos ( ) ; Throwable t = syntheticRunResult . throwable ( ) ; if ( syntheticMonitorConfig . getKind ( ) == SyntheticMonitorKind . PING && durationNanos >= PING_TIMEOUT_NANOS ) { durationNanos = PING_TIMEOUT_NANOS ; errorMessage = "Timeout" ; } else if ( t != null ) { errorMessage = getBestMessageForSyntheticFailure ( t ) ; } try { syntheticResponseDao . store ( agentRollup . id ( ) , syntheticMonitorConfig . getId ( ) , MoreConfigDefaults . getDisplayOrDefault ( syntheticMonitorConfig ) , syntheticRunResult . captureTime ( ) , durationNanos , errorMessage ) ; } catch ( InterruptedException e ) { return ; } catch ( Exception e ) { logger . error ( e . getMessage ( ) , e ) ; } }
public void accept ( SyntheticRunResult syntheticRunResult ) { String errorMessage = null ; long durationNanos = syntheticRunResult . durationNanos ( ) ; Throwable t = syntheticRunResult . throwable ( ) ; if ( syntheticMonitorConfig . getKind ( ) == SyntheticMonitorKind . PING && durationNanos >= PING_TIMEOUT_NANOS ) { durationNanos = PING_TIMEOUT_NANOS ; errorMessage = "Timeout" ; } else if ( t != null ) { errorMessage = getBestMessageForSyntheticFailure ( t ) ; } try { syntheticResponseDao . store ( agentRollup . id ( ) , syntheticMonitorConfig . getId ( ) , MoreConfigDefaults . getDisplayOrDefault ( syntheticMonitorConfig ) , syntheticRunResult . captureTime ( ) , durationNanos , errorMessage ) ; } catch ( InterruptedException e ) { logger . debug ( e . getMessage ( ) , e ) ; return ; } catch ( Exception e ) { logger . error ( e . getMessage ( ) , e ) ; } }
433
public void accept ( SyntheticRunResult syntheticRunResult ) { String errorMessage = null ; long durationNanos = syntheticRunResult . durationNanos ( ) ; Throwable t = syntheticRunResult . throwable ( ) ; if ( syntheticMonitorConfig . getKind ( ) == SyntheticMonitorKind . PING && durationNanos >= PING_TIMEOUT_NANOS ) { durationNanos = PING_TIMEOUT_NANOS ; errorMessage = "Timeout" ; } else if ( t != null ) { errorMessage = getBestMessageForSyntheticFailure ( t ) ; } try { syntheticResponseDao . store ( agentRollup . id ( ) , syntheticMonitorConfig . getId ( ) , MoreConfigDefaults . getDisplayOrDefault ( syntheticMonitorConfig ) , syntheticRunResult . captureTime ( ) , durationNanos , errorMessage ) ; } catch ( InterruptedException e ) { logger . debug ( e . getMessage ( ) , e ) ; return ; } catch ( Exception e ) { } }
public void accept ( SyntheticRunResult syntheticRunResult ) { String errorMessage = null ; long durationNanos = syntheticRunResult . durationNanos ( ) ; Throwable t = syntheticRunResult . throwable ( ) ; if ( syntheticMonitorConfig . getKind ( ) == SyntheticMonitorKind . PING && durationNanos >= PING_TIMEOUT_NANOS ) { durationNanos = PING_TIMEOUT_NANOS ; errorMessage = "Timeout" ; } else if ( t != null ) { errorMessage = getBestMessageForSyntheticFailure ( t ) ; } try { syntheticResponseDao . store ( agentRollup . id ( ) , syntheticMonitorConfig . getId ( ) , MoreConfigDefaults . getDisplayOrDefault ( syntheticMonitorConfig ) , syntheticRunResult . captureTime ( ) , durationNanos , errorMessage ) ; } catch ( InterruptedException e ) { logger . debug ( e . getMessage ( ) , e ) ; return ; } catch ( Exception e ) { logger . error ( e . getMessage ( ) , e ) ; } }
434
protected static JoynrRuntime getRuntime ( Properties joynrConfig , Module ... modules ) { providerDomain = joynrConfig . getProperty ( "provider.domain" ) ; logger . info ( "getRuntime: providerDomain = " + providerDomain ) ; Properties appConfig = new Properties ( ) ; appConfig . setProperty ( INTER_LANGUAGE_PROVIDER_DOMAIN , providerDomain ) ; Module modulesWithRuntime = getRuntimeModule ( joynrConfig ) ; IltDummyApplication application = ( IltDummyApplication ) new JoynrInjectorFactory ( joynrConfig , modulesWithRuntime ) . createApplication ( IltDummyApplication . class , appConfig ) ; objectMapper = application . getObjectMapper ( ) ; logger . info ( "getRuntime: Leaving" ) ; return application . getRuntime ( ) ; }
protected static JoynrRuntime getRuntime ( Properties joynrConfig , Module ... modules ) { logger . info ( "getRuntime: Entering" ) ; providerDomain = joynrConfig . getProperty ( "provider.domain" ) ; logger . info ( "getRuntime: providerDomain = " + providerDomain ) ; Properties appConfig = new Properties ( ) ; appConfig . setProperty ( INTER_LANGUAGE_PROVIDER_DOMAIN , providerDomain ) ; Module modulesWithRuntime = getRuntimeModule ( joynrConfig ) ; IltDummyApplication application = ( IltDummyApplication ) new JoynrInjectorFactory ( joynrConfig , modulesWithRuntime ) . createApplication ( IltDummyApplication . class , appConfig ) ; objectMapper = application . getObjectMapper ( ) ; logger . info ( "getRuntime: Leaving" ) ; return application . getRuntime ( ) ; }
435
protected static JoynrRuntime getRuntime ( Properties joynrConfig , Module ... modules ) { logger . info ( "getRuntime: Entering" ) ; providerDomain = joynrConfig . getProperty ( "provider.domain" ) ; Properties appConfig = new Properties ( ) ; appConfig . setProperty ( INTER_LANGUAGE_PROVIDER_DOMAIN , providerDomain ) ; Module modulesWithRuntime = getRuntimeModule ( joynrConfig ) ; IltDummyApplication application = ( IltDummyApplication ) new JoynrInjectorFactory ( joynrConfig , modulesWithRuntime ) . createApplication ( IltDummyApplication . class , appConfig ) ; objectMapper = application . getObjectMapper ( ) ; logger . info ( "getRuntime: Leaving" ) ; return application . getRuntime ( ) ; }
protected static JoynrRuntime getRuntime ( Properties joynrConfig , Module ... modules ) { logger . info ( "getRuntime: Entering" ) ; providerDomain = joynrConfig . getProperty ( "provider.domain" ) ; logger . info ( "getRuntime: providerDomain = " + providerDomain ) ; Properties appConfig = new Properties ( ) ; appConfig . setProperty ( INTER_LANGUAGE_PROVIDER_DOMAIN , providerDomain ) ; Module modulesWithRuntime = getRuntimeModule ( joynrConfig ) ; IltDummyApplication application = ( IltDummyApplication ) new JoynrInjectorFactory ( joynrConfig , modulesWithRuntime ) . createApplication ( IltDummyApplication . class , appConfig ) ; objectMapper = application . getObjectMapper ( ) ; logger . info ( "getRuntime: Leaving" ) ; return application . getRuntime ( ) ; }
436
protected static JoynrRuntime getRuntime ( Properties joynrConfig , Module ... modules ) { logger . info ( "getRuntime: Entering" ) ; providerDomain = joynrConfig . getProperty ( "provider.domain" ) ; logger . info ( "getRuntime: providerDomain = " + providerDomain ) ; Properties appConfig = new Properties ( ) ; appConfig . setProperty ( INTER_LANGUAGE_PROVIDER_DOMAIN , providerDomain ) ; Module modulesWithRuntime = getRuntimeModule ( joynrConfig ) ; IltDummyApplication application = ( IltDummyApplication ) new JoynrInjectorFactory ( joynrConfig , modulesWithRuntime ) . createApplication ( IltDummyApplication . class , appConfig ) ; objectMapper = application . getObjectMapper ( ) ; return application . getRuntime ( ) ; }
protected static JoynrRuntime getRuntime ( Properties joynrConfig , Module ... modules ) { logger . info ( "getRuntime: Entering" ) ; providerDomain = joynrConfig . getProperty ( "provider.domain" ) ; logger . info ( "getRuntime: providerDomain = " + providerDomain ) ; Properties appConfig = new Properties ( ) ; appConfig . setProperty ( INTER_LANGUAGE_PROVIDER_DOMAIN , providerDomain ) ; Module modulesWithRuntime = getRuntimeModule ( joynrConfig ) ; IltDummyApplication application = ( IltDummyApplication ) new JoynrInjectorFactory ( joynrConfig , modulesWithRuntime ) . createApplication ( IltDummyApplication . class , appConfig ) ; objectMapper = application . getObjectMapper ( ) ; logger . info ( "getRuntime: Leaving" ) ; return application . getRuntime ( ) ; }
437
public void destroy ( ) { try { registry . destroy ( ) ; } catch ( Throwable e ) { LOGGER . error ( "Destroy failed, nodeType={}, identity={}" , config . getNodeType ( ) , config . getIdentity ( ) , e ) ; } }
public void destroy ( ) { try { registry . destroy ( ) ; LOGGER . info ( "Destroy success, nodeType={}, identity={}" , config . getNodeType ( ) , config . getIdentity ( ) ) ; } catch ( Throwable e ) { LOGGER . error ( "Destroy failed, nodeType={}, identity={}" , config . getNodeType ( ) , config . getIdentity ( ) , e ) ; } }
438
public void destroy ( ) { try { registry . destroy ( ) ; LOGGER . info ( "Destroy success, nodeType={}, identity={}" , config . getNodeType ( ) , config . getIdentity ( ) ) ; } catch ( Throwable e ) { } }
public void destroy ( ) { try { registry . destroy ( ) ; LOGGER . info ( "Destroy success, nodeType={}, identity={}" , config . getNodeType ( ) , config . getIdentity ( ) ) ; } catch ( Throwable e ) { LOGGER . error ( "Destroy failed, nodeType={}, identity={}" , config . getNodeType ( ) , config . getIdentity ( ) , e ) ; } }
439
public View getViewById ( UifDictionaryIndex index , String viewId ) { if ( StringUtils . isBlank ( viewId ) ) { throw new IllegalArgumentException ( "invalid (blank) view id" ) ; } if ( LOG . isDebugEnabled ( ) ) { } return index . getViewById ( viewId ) ; }
public View getViewById ( UifDictionaryIndex index , String viewId ) { if ( StringUtils . isBlank ( viewId ) ) { throw new IllegalArgumentException ( "invalid (blank) view id" ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "calling getViewById by id '" + viewId + "'" ) ; } return index . getViewById ( viewId ) ; }
440
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
441
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
442
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
443
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
444
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
445
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } }
@ SuppressWarnings ( "unchecked" ) public void globalOptimization ( long optimizationTS , Collection < ResourceScheduler < ? extends WorkerResourceDescription > > workers ) { LOGGER . debug ( LOG_PREFIX + " --- Start Global Optimization ---" ) ; int workersCount = workers . size ( ) ; if ( workersCount == 0 ) { return ; } OptimizationWorker [ ] optimizedWorkers = new OptimizationWorker [ workersCount ] ; LinkedList < OptimizationWorker > receivers = new LinkedList < > ( ) ; int i = 0 ; for ( ResourceScheduler < ? extends WorkerResourceDescription > worker : workers ) { optimizedWorkers [ i ] = new OptimizationWorker ( ( MOResourceScheduler < WorkerResourceDescription > ) worker ) ; i ++ ; } boolean hasDonated = true ; while ( hasDonated ) { optimizationTS = System . currentTimeMillis ( ) ; hasDonated = false ; LOGGER . debug ( LOG_PREFIX + " --- Iteration of global Optimization ---" ) ; for ( OptimizationWorker ow : optimizedWorkers ) { LOGGER . debug ( LOG_PREFIX + "Optimizing localy resource " + ow . getName ( ) ) ; ow . localOptimization ( optimizationTS ) ; LOGGER . debug ( LOG_PREFIX + "Resource " + ow . getName ( ) + " will end at " + ow . getDonationIndicator ( ) ) ; } LinkedList < OptimizationWorker > donors = determineDonorAndReceivers ( optimizedWorkers , receivers ) ; while ( ! hasDonated && ! donors . isEmpty ( ) ) { OptimizationWorker donor = donors . remove ( ) ; AllocatableAction candidate ; while ( ! hasDonated && ( candidate = donor . pollDonorAction ( ) ) != null ) { Iterator < OptimizationWorker > recIt = receivers . iterator ( ) ; while ( recIt . hasNext ( ) ) { OptimizationWorker receiver = recIt . next ( ) ; if ( move ( candidate , donor , receiver ) ) { hasDonated = true ; break ; } } } } LOGGER . debug ( LOG_PREFIX + "--- Optimization Iteration finished ---" ) ; } LOGGER . debug ( LOG_PREFIX + "--- Global Optimization finished ---" ) ; }
446
protected long computeNext ( long last ) { long millis = last / 1000 ; long counter = last % 1000 ; long now = clock . currentTime ( ) ; if ( millis >= now ) { if ( counter == 999 ) else counter += 1 ; } else { millis = now ; counter = 0 ; } return millis * 1000 + counter ; }
protected long computeNext ( long last ) { long millis = last / 1000 ; long counter = last % 1000 ; long now = clock . currentTime ( ) ; if ( millis >= now ) { if ( counter == 999 ) logger . warn ( "Sub-millisecond counter overflowed, some query timestamps will not be distinct" ) ; else counter += 1 ; } else { millis = now ; counter = 0 ; } return millis * 1000 + counter ; }
447
public static List < ? > loadWmlFile ( String hash ) { String name = OmFileHelper . getName ( hash , EXTENSION_WML ) ; File file = new File ( OmFileHelper . getUploadWmlDir ( ) , name ) ; XStream xstream = new XStream ( new XppDriver ( ) ) ; xstream . setMode ( XStream . NO_REFERENCES ) ; xstream . addPermission ( NoTypePermission . NONE ) ; xstream . addPermission ( NullPermission . NULL ) ; xstream . addPermission ( PrimitiveTypePermission . PRIMITIVES ) ; xstream . allowTypeHierarchy ( List . class ) ; xstream . allowTypeHierarchy ( String . class ) ; xstream . ignoreUnknownElements ( ) ; try ( InputStream is = new FileInputStream ( file ) ; BufferedReader reader = new BufferedReader ( new InputStreamReader ( is , UTF_8 ) ) ) { return ( List < ? > ) xstream . fromXML ( reader ) ; } catch ( Exception err ) { log . error ( "loadWmlFile" , err ) ; } return new ArrayList < > ( ) ; }
public static List < ? > loadWmlFile ( String hash ) { String name = OmFileHelper . getName ( hash , EXTENSION_WML ) ; File file = new File ( OmFileHelper . getUploadWmlDir ( ) , name ) ; log . debug ( "filepathComplete: {}" , file ) ; XStream xstream = new XStream ( new XppDriver ( ) ) ; xstream . setMode ( XStream . NO_REFERENCES ) ; xstream . addPermission ( NoTypePermission . NONE ) ; xstream . addPermission ( NullPermission . NULL ) ; xstream . addPermission ( PrimitiveTypePermission . PRIMITIVES ) ; xstream . allowTypeHierarchy ( List . class ) ; xstream . allowTypeHierarchy ( String . class ) ; xstream . ignoreUnknownElements ( ) ; try ( InputStream is = new FileInputStream ( file ) ; BufferedReader reader = new BufferedReader ( new InputStreamReader ( is , UTF_8 ) ) ) { return ( List < ? > ) xstream . fromXML ( reader ) ; } catch ( Exception err ) { log . error ( "loadWmlFile" , err ) ; } return new ArrayList < > ( ) ; }
448
public static List < ? > loadWmlFile ( String hash ) { String name = OmFileHelper . getName ( hash , EXTENSION_WML ) ; File file = new File ( OmFileHelper . getUploadWmlDir ( ) , name ) ; log . debug ( "filepathComplete: {}" , file ) ; XStream xstream = new XStream ( new XppDriver ( ) ) ; xstream . setMode ( XStream . NO_REFERENCES ) ; xstream . addPermission ( NoTypePermission . NONE ) ; xstream . addPermission ( NullPermission . NULL ) ; xstream . addPermission ( PrimitiveTypePermission . PRIMITIVES ) ; xstream . allowTypeHierarchy ( List . class ) ; xstream . allowTypeHierarchy ( String . class ) ; xstream . ignoreUnknownElements ( ) ; try ( InputStream is = new FileInputStream ( file ) ; BufferedReader reader = new BufferedReader ( new InputStreamReader ( is , UTF_8 ) ) ) { return ( List < ? > ) xstream . fromXML ( reader ) ; } catch ( Exception err ) { } return new ArrayList < > ( ) ; }
public static List < ? > loadWmlFile ( String hash ) { String name = OmFileHelper . getName ( hash , EXTENSION_WML ) ; File file = new File ( OmFileHelper . getUploadWmlDir ( ) , name ) ; log . debug ( "filepathComplete: {}" , file ) ; XStream xstream = new XStream ( new XppDriver ( ) ) ; xstream . setMode ( XStream . NO_REFERENCES ) ; xstream . addPermission ( NoTypePermission . NONE ) ; xstream . addPermission ( NullPermission . NULL ) ; xstream . addPermission ( PrimitiveTypePermission . PRIMITIVES ) ; xstream . allowTypeHierarchy ( List . class ) ; xstream . allowTypeHierarchy ( String . class ) ; xstream . ignoreUnknownElements ( ) ; try ( InputStream is = new FileInputStream ( file ) ; BufferedReader reader = new BufferedReader ( new InputStreamReader ( is , UTF_8 ) ) ) { return ( List < ? > ) xstream . fromXML ( reader ) ; } catch ( Exception err ) { log . error ( "loadWmlFile" , err ) ; } return new ArrayList < > ( ) ; }
449
public void rollback ( ) { if ( this . father != null ) { this . father . rollback ( ) ; return ; } try { if ( this . connection != null && ! this . connection . getAutoCommit ( ) && ! this . isClosed ) this . connection . rollback ( ) ; } catch ( Throwable e ) { } }
public void rollback ( ) { if ( this . father != null ) { this . father . rollback ( ) ; return ; } try { if ( this . connection != null && ! this . connection . getAutoCommit ( ) && ! this . isClosed ) this . connection . rollback ( ) ; } catch ( Throwable e ) { log . error ( "Errore durante la rollback." , e ) ; } }
450
@ Test ( timeout = 30000 ) public void testPageEviction ( ) throws Exception { int numLedgers = 10 ; byte [ ] masterKey = "blah" . getBytes ( ) ; conf . setOpenFileLimit ( 999999 ) . setPageLimit ( 3 ) ; newLedgerCache ( ) ; try { for ( int i = 1 ; i <= numLedgers ; i ++ ) { ledgerCache . setMasterKey ( ( long ) i , masterKey ) ; ledgerCache . putEntryOffset ( i , 0 , i * 8 ) ; ledgerCache . putEntryOffset ( i , 1 , i * 8 ) ; } ledgerCache . flushLedger ( true ) ; ledgerCache . flushLedger ( true ) ; for ( int i = 1 ; i <= numLedgers / 2 ; i ++ ) { ledgerCache . deleteLedger ( i ) ; } newLedgerCache ( ) ; for ( int i = 1 ; i <= numLedgers ; i ++ ) { try { ledgerCache . putEntryOffset ( i , 1 , i * 8 ) ; } catch ( NoLedgerException nsle ) { if ( i <= numLedgers / 2 ) { } else { fail ( "Should not reach here." ) ; } } } } catch ( Exception e ) { LOG . error ( "Got Exception." , e ) ; fail ( "Failed to add entry." ) ; } }
@ Test ( timeout = 30000 ) public void testPageEviction ( ) throws Exception { int numLedgers = 10 ; byte [ ] masterKey = "blah" . getBytes ( ) ; conf . setOpenFileLimit ( 999999 ) . setPageLimit ( 3 ) ; newLedgerCache ( ) ; try { for ( int i = 1 ; i <= numLedgers ; i ++ ) { ledgerCache . setMasterKey ( ( long ) i , masterKey ) ; ledgerCache . putEntryOffset ( i , 0 , i * 8 ) ; ledgerCache . putEntryOffset ( i , 1 , i * 8 ) ; } ledgerCache . flushLedger ( true ) ; ledgerCache . flushLedger ( true ) ; for ( int i = 1 ; i <= numLedgers / 2 ; i ++ ) { ledgerCache . deleteLedger ( i ) ; } newLedgerCache ( ) ; for ( int i = 1 ; i <= numLedgers ; i ++ ) { try { ledgerCache . putEntryOffset ( i , 1 , i * 8 ) ; } catch ( NoLedgerException nsle ) { if ( i <= numLedgers / 2 ) { } else { LOG . error ( "Error put entry offset : " , nsle ) ; fail ( "Should not reach here." ) ; } } } } catch ( Exception e ) { LOG . error ( "Got Exception." , e ) ; fail ( "Failed to add entry." ) ; } }
451
@ Test ( timeout = 30000 ) public void testPageEviction ( ) throws Exception { int numLedgers = 10 ; byte [ ] masterKey = "blah" . getBytes ( ) ; conf . setOpenFileLimit ( 999999 ) . setPageLimit ( 3 ) ; newLedgerCache ( ) ; try { for ( int i = 1 ; i <= numLedgers ; i ++ ) { ledgerCache . setMasterKey ( ( long ) i , masterKey ) ; ledgerCache . putEntryOffset ( i , 0 , i * 8 ) ; ledgerCache . putEntryOffset ( i , 1 , i * 8 ) ; } ledgerCache . flushLedger ( true ) ; ledgerCache . flushLedger ( true ) ; for ( int i = 1 ; i <= numLedgers / 2 ; i ++ ) { ledgerCache . deleteLedger ( i ) ; } newLedgerCache ( ) ; for ( int i = 1 ; i <= numLedgers ; i ++ ) { try { ledgerCache . putEntryOffset ( i , 1 , i * 8 ) ; } catch ( NoLedgerException nsle ) { if ( i <= numLedgers / 2 ) { } else { LOG . error ( "Error put entry offset : " , nsle ) ; fail ( "Should not reach here." ) ; } } } } catch ( Exception e ) { fail ( "Failed to add entry." ) ; } }
@ Test ( timeout = 30000 ) public void testPageEviction ( ) throws Exception { int numLedgers = 10 ; byte [ ] masterKey = "blah" . getBytes ( ) ; conf . setOpenFileLimit ( 999999 ) . setPageLimit ( 3 ) ; newLedgerCache ( ) ; try { for ( int i = 1 ; i <= numLedgers ; i ++ ) { ledgerCache . setMasterKey ( ( long ) i , masterKey ) ; ledgerCache . putEntryOffset ( i , 0 , i * 8 ) ; ledgerCache . putEntryOffset ( i , 1 , i * 8 ) ; } ledgerCache . flushLedger ( true ) ; ledgerCache . flushLedger ( true ) ; for ( int i = 1 ; i <= numLedgers / 2 ; i ++ ) { ledgerCache . deleteLedger ( i ) ; } newLedgerCache ( ) ; for ( int i = 1 ; i <= numLedgers ; i ++ ) { try { ledgerCache . putEntryOffset ( i , 1 , i * 8 ) ; } catch ( NoLedgerException nsle ) { if ( i <= numLedgers / 2 ) { } else { LOG . error ( "Error put entry offset : " , nsle ) ; fail ( "Should not reach here." ) ; } } } } catch ( Exception e ) { LOG . error ( "Got Exception." , e ) ; fail ( "Failed to add entry." ) ; } }
452
private static void deleteVLANConfiguration ( ) { ClientResponse response = null ; String url = "http://localhost:8888/opennaas/MACBridgeIOS/catalyst/VLANAwareBridge/deleteVLANConfiguration?vlanID=12345678" ; try { Client client = Client . create ( ) ; WebResource webResource = client . resource ( url ) ; response = webResource . type ( MediaType . APPLICATION_XML ) . delete ( ClientResponse . class ) ; } catch ( Exception e ) { LOGGER . error ( e . getMessage ( ) ) ; } }
private static void deleteVLANConfiguration ( ) { ClientResponse response = null ; String url = "http://localhost:8888/opennaas/MACBridgeIOS/catalyst/VLANAwareBridge/deleteVLANConfiguration?vlanID=12345678" ; try { Client client = Client . create ( ) ; WebResource webResource = client . resource ( url ) ; response = webResource . type ( MediaType . APPLICATION_XML ) . delete ( ClientResponse . class ) ; LOGGER . info ( "Response code: " + response . getStatus ( ) ) ; } catch ( Exception e ) { LOGGER . error ( e . getMessage ( ) ) ; } }
453
private static void deleteVLANConfiguration ( ) { ClientResponse response = null ; String url = "http://localhost:8888/opennaas/MACBridgeIOS/catalyst/VLANAwareBridge/deleteVLANConfiguration?vlanID=12345678" ; try { Client client = Client . create ( ) ; WebResource webResource = client . resource ( url ) ; response = webResource . type ( MediaType . APPLICATION_XML ) . delete ( ClientResponse . class ) ; LOGGER . info ( "Response code: " + response . getStatus ( ) ) ; } catch ( Exception e ) { } }
private static void deleteVLANConfiguration ( ) { ClientResponse response = null ; String url = "http://localhost:8888/opennaas/MACBridgeIOS/catalyst/VLANAwareBridge/deleteVLANConfiguration?vlanID=12345678" ; try { Client client = Client . create ( ) ; WebResource webResource = client . resource ( url ) ; response = webResource . type ( MediaType . APPLICATION_XML ) . delete ( ClientResponse . class ) ; LOGGER . info ( "Response code: " + response . getStatus ( ) ) ; } catch ( Exception e ) { LOGGER . error ( e . getMessage ( ) ) ; } }
454
public void info ( Marker marker , String string , Object o ) { }
public void info ( Marker marker , String string , Object o ) { logger . info ( marker , string , o ) ; }
455
void onSealExclusive ( Batch batch , long elapsedTimeMillis ) { try { doOnSealExclusive ( batch , elapsedTimeMillis ) ; } catch ( Throwable t ) { try { if ( ! concurrentBatch . compareAndSet ( batch , batch . batchNumber ) ) { } log . error ( t , "Serious error during onSealExclusive(), set currentBatch to the failed Batch.batchNumber" ) ; } catch ( Throwable t2 ) { t . addSuppressed ( t2 ) ; } throw t ; } }
void onSealExclusive ( Batch batch , long elapsedTimeMillis ) { try { doOnSealExclusive ( batch , elapsedTimeMillis ) ; } catch ( Throwable t ) { try { if ( ! concurrentBatch . compareAndSet ( batch , batch . batchNumber ) ) { log . error ( "Unexpected failure to set currentBatch to the failed Batch.batchNumber" ) ; } log . error ( t , "Serious error during onSealExclusive(), set currentBatch to the failed Batch.batchNumber" ) ; } catch ( Throwable t2 ) { t . addSuppressed ( t2 ) ; } throw t ; } }
456
void onSealExclusive ( Batch batch , long elapsedTimeMillis ) { try { doOnSealExclusive ( batch , elapsedTimeMillis ) ; } catch ( Throwable t ) { try { if ( ! concurrentBatch . compareAndSet ( batch , batch . batchNumber ) ) { log . error ( "Unexpected failure to set currentBatch to the failed Batch.batchNumber" ) ; } } catch ( Throwable t2 ) { t . addSuppressed ( t2 ) ; } throw t ; } }
void onSealExclusive ( Batch batch , long elapsedTimeMillis ) { try { doOnSealExclusive ( batch , elapsedTimeMillis ) ; } catch ( Throwable t ) { try { if ( ! concurrentBatch . compareAndSet ( batch , batch . batchNumber ) ) { log . error ( "Unexpected failure to set currentBatch to the failed Batch.batchNumber" ) ; } log . error ( t , "Serious error during onSealExclusive(), set currentBatch to the failed Batch.batchNumber" ) ; } catch ( Throwable t2 ) { t . addSuppressed ( t2 ) ; } throw t ; } }
457
public ThingUID getThingUID ( RemoteDevice device ) { try { String manufacturer = device . getDetails ( ) . getManufacturerDetails ( ) . getManufacturer ( ) ; String modelName = device . getDetails ( ) . getModelDetails ( ) . getModelName ( ) ; if ( ! manufacturer . toUpperCase ( ) . contains ( "LAMETRIC" ) || ! modelName . toUpperCase ( ) . contains ( "LAMETRIC TIME" ) ) { return null ; } String serialNumber = device . getDetails ( ) . getSerialNumber ( ) ; return new ThingUID ( THING_TYPE_DEVICE , serialNumber ) ; } catch ( Exception e ) { logger . debug ( "Discovery hit an unexpected error" , e ) ; return null ; } }
public ThingUID getThingUID ( RemoteDevice device ) { try { String manufacturer = device . getDetails ( ) . getManufacturerDetails ( ) . getManufacturer ( ) ; String modelName = device . getDetails ( ) . getModelDetails ( ) . getModelName ( ) ; if ( ! manufacturer . toUpperCase ( ) . contains ( "LAMETRIC" ) || ! modelName . toUpperCase ( ) . contains ( "LAMETRIC TIME" ) ) { return null ; } String serialNumber = device . getDetails ( ) . getSerialNumber ( ) ; logger . debug ( "Discovered '{}' model '{}' thing with serial number '{}'" , device . getDetails ( ) . getFriendlyName ( ) , modelName , serialNumber ) ; return new ThingUID ( THING_TYPE_DEVICE , serialNumber ) ; } catch ( Exception e ) { logger . debug ( "Discovery hit an unexpected error" , e ) ; return null ; } }
458
public ThingUID getThingUID ( RemoteDevice device ) { try { String manufacturer = device . getDetails ( ) . getManufacturerDetails ( ) . getManufacturer ( ) ; String modelName = device . getDetails ( ) . getModelDetails ( ) . getModelName ( ) ; if ( ! manufacturer . toUpperCase ( ) . contains ( "LAMETRIC" ) || ! modelName . toUpperCase ( ) . contains ( "LAMETRIC TIME" ) ) { return null ; } String serialNumber = device . getDetails ( ) . getSerialNumber ( ) ; logger . debug ( "Discovered '{}' model '{}' thing with serial number '{}'" , device . getDetails ( ) . getFriendlyName ( ) , modelName , serialNumber ) ; return new ThingUID ( THING_TYPE_DEVICE , serialNumber ) ; } catch ( Exception e ) { return null ; } }
public ThingUID getThingUID ( RemoteDevice device ) { try { String manufacturer = device . getDetails ( ) . getManufacturerDetails ( ) . getManufacturer ( ) ; String modelName = device . getDetails ( ) . getModelDetails ( ) . getModelName ( ) ; if ( ! manufacturer . toUpperCase ( ) . contains ( "LAMETRIC" ) || ! modelName . toUpperCase ( ) . contains ( "LAMETRIC TIME" ) ) { return null ; } String serialNumber = device . getDetails ( ) . getSerialNumber ( ) ; logger . debug ( "Discovered '{}' model '{}' thing with serial number '{}'" , device . getDetails ( ) . getFriendlyName ( ) , modelName , serialNumber ) ; return new ThingUID ( THING_TYPE_DEVICE , serialNumber ) ; } catch ( Exception e ) { logger . debug ( "Discovery hit an unexpected error" , e ) ; return null ; } }
459
private void configSasAuth ( IotHubConnectionString iotHubConnectionString ) { commonConstructorSetup ( iotHubConnectionString ) ; assertConnectionStringIsNotX509 ( iotHubConnectionString ) ; this . authenticationProvider = new IotHubSasTokenSoftwareAuthenticationProvider ( iotHubConnectionString . getHostName ( ) , iotHubConnectionString . getGatewayHostName ( ) , iotHubConnectionString . getDeviceId ( ) , iotHubConnectionString . getModuleId ( ) , iotHubConnectionString . getSharedAccessKey ( ) , iotHubConnectionString . getSharedAccessToken ( ) ) ; }
private void configSasAuth ( IotHubConnectionString iotHubConnectionString ) { commonConstructorSetup ( iotHubConnectionString ) ; assertConnectionStringIsNotX509 ( iotHubConnectionString ) ; this . authenticationProvider = new IotHubSasTokenSoftwareAuthenticationProvider ( iotHubConnectionString . getHostName ( ) , iotHubConnectionString . getGatewayHostName ( ) , iotHubConnectionString . getDeviceId ( ) , iotHubConnectionString . getModuleId ( ) , iotHubConnectionString . getSharedAccessKey ( ) , iotHubConnectionString . getSharedAccessToken ( ) ) ; log . debug ( "Device configured to use software based SAS authentication provider" ) ; }
460
public void forward ( PortletRequest request , PortletResponse response ) throws PortletException , IOException { if ( isTrace ) { } doDispatch ( request , response , false ) ; }
public void forward ( PortletRequest request , PortletResponse response ) throws PortletException , IOException { if ( isTrace ) { LOG . debug ( "Doing request dispatcher forward for portlet request." ) ; } doDispatch ( request , response , false ) ; }
461
public void insertDataToTable ( String projectId , String datasetId , String tableName , List < Map < String , Object > > rows ) throws IOException , InterruptedException { Sleeper sleeper = Sleeper . DEFAULT ; BackOff backoff = BackOffAdapter . toGcpBackOff ( BACKOFF_FACTORY . backoff ( ) ) ; IOException lastException = null ; do { if ( lastException != null ) { } try { List < Rows > dataRows = rows . stream ( ) . map ( row -> new Rows ( ) . setJson ( row ) ) . collect ( Collectors . toList ( ) ) ; TableDataInsertAllResponse response = this . bqClient . tabledata ( ) . insertAll ( projectId , datasetId , tableName , new TableDataInsertAllRequest ( ) . setRows ( dataRows ) ) . execute ( ) ; if ( response != null && ( response . getInsertErrors ( ) == null || response . getInsertErrors ( ) . isEmpty ( ) ) ) { LOG . info ( "Successfully inserted data into table : " + tableName ) ; return ; } else { if ( response == null || response . getInsertErrors ( ) == null ) { lastException = new IOException ( "Expected valid response from insert data job, but received null." ) ; } else { lastException = new IOException ( String . format ( "Got insertion error (%s)" , response . getInsertErrors ( ) . toString ( ) ) ) ; } } } catch ( IOException e ) { lastException = e ; } } while ( BackOffUtils . next ( sleeper , backoff ) ) ; throw new RuntimeException ( String . format ( "Unable to get BigQuery response after retrying %d times for table (%s)" , MAX_QUERY_RETRIES , tableName ) , lastException ) ; }
public void insertDataToTable ( String projectId , String datasetId , String tableName , List < Map < String , Object > > rows ) throws IOException , InterruptedException { Sleeper sleeper = Sleeper . DEFAULT ; BackOff backoff = BackOffAdapter . toGcpBackOff ( BACKOFF_FACTORY . backoff ( ) ) ; IOException lastException = null ; do { if ( lastException != null ) { LOG . warn ( "Retrying insert table ({}) after exception" , tableName , lastException ) ; } try { List < Rows > dataRows = rows . stream ( ) . map ( row -> new Rows ( ) . setJson ( row ) ) . collect ( Collectors . toList ( ) ) ; TableDataInsertAllResponse response = this . bqClient . tabledata ( ) . insertAll ( projectId , datasetId , tableName , new TableDataInsertAllRequest ( ) . setRows ( dataRows ) ) . execute ( ) ; if ( response != null && ( response . getInsertErrors ( ) == null || response . getInsertErrors ( ) . isEmpty ( ) ) ) { LOG . info ( "Successfully inserted data into table : " + tableName ) ; return ; } else { if ( response == null || response . getInsertErrors ( ) == null ) { lastException = new IOException ( "Expected valid response from insert data job, but received null." ) ; } else { lastException = new IOException ( String . format ( "Got insertion error (%s)" , response . getInsertErrors ( ) . toString ( ) ) ) ; } } } catch ( IOException e ) { lastException = e ; } } while ( BackOffUtils . next ( sleeper , backoff ) ) ; throw new RuntimeException ( String . format ( "Unable to get BigQuery response after retrying %d times for table (%s)" , MAX_QUERY_RETRIES , tableName ) , lastException ) ; }
462
public void insertDataToTable ( String projectId , String datasetId , String tableName , List < Map < String , Object > > rows ) throws IOException , InterruptedException { Sleeper sleeper = Sleeper . DEFAULT ; BackOff backoff = BackOffAdapter . toGcpBackOff ( BACKOFF_FACTORY . backoff ( ) ) ; IOException lastException = null ; do { if ( lastException != null ) { LOG . warn ( "Retrying insert table ({}) after exception" , tableName , lastException ) ; } try { List < Rows > dataRows = rows . stream ( ) . map ( row -> new Rows ( ) . setJson ( row ) ) . collect ( Collectors . toList ( ) ) ; TableDataInsertAllResponse response = this . bqClient . tabledata ( ) . insertAll ( projectId , datasetId , tableName , new TableDataInsertAllRequest ( ) . setRows ( dataRows ) ) . execute ( ) ; if ( response != null && ( response . getInsertErrors ( ) == null || response . getInsertErrors ( ) . isEmpty ( ) ) ) { return ; } else { if ( response == null || response . getInsertErrors ( ) == null ) { lastException = new IOException ( "Expected valid response from insert data job, but received null." ) ; } else { lastException = new IOException ( String . format ( "Got insertion error (%s)" , response . getInsertErrors ( ) . toString ( ) ) ) ; } } } catch ( IOException e ) { lastException = e ; } } while ( BackOffUtils . next ( sleeper , backoff ) ) ; throw new RuntimeException ( String . format ( "Unable to get BigQuery response after retrying %d times for table (%s)" , MAX_QUERY_RETRIES , tableName ) , lastException ) ; }
public void insertDataToTable ( String projectId , String datasetId , String tableName , List < Map < String , Object > > rows ) throws IOException , InterruptedException { Sleeper sleeper = Sleeper . DEFAULT ; BackOff backoff = BackOffAdapter . toGcpBackOff ( BACKOFF_FACTORY . backoff ( ) ) ; IOException lastException = null ; do { if ( lastException != null ) { LOG . warn ( "Retrying insert table ({}) after exception" , tableName , lastException ) ; } try { List < Rows > dataRows = rows . stream ( ) . map ( row -> new Rows ( ) . setJson ( row ) ) . collect ( Collectors . toList ( ) ) ; TableDataInsertAllResponse response = this . bqClient . tabledata ( ) . insertAll ( projectId , datasetId , tableName , new TableDataInsertAllRequest ( ) . setRows ( dataRows ) ) . execute ( ) ; if ( response != null && ( response . getInsertErrors ( ) == null || response . getInsertErrors ( ) . isEmpty ( ) ) ) { LOG . info ( "Successfully inserted data into table : " + tableName ) ; return ; } else { if ( response == null || response . getInsertErrors ( ) == null ) { lastException = new IOException ( "Expected valid response from insert data job, but received null." ) ; } else { lastException = new IOException ( String . format ( "Got insertion error (%s)" , response . getInsertErrors ( ) . toString ( ) ) ) ; } } } catch ( IOException e ) { lastException = e ; } } while ( BackOffUtils . next ( sleeper , backoff ) ) ; throw new RuntimeException ( String . format ( "Unable to get BigQuery response after retrying %d times for table (%s)" , MAX_QUERY_RETRIES , tableName ) , lastException ) ; }
463
Future < List < ModuleDescriptor > > pullSmart ( String remoteUrl , Collection < ModuleDescriptor > localList ) { return getList ( remoteUrl , localList ) . compose ( remoteList -> { List < ModuleDescriptor > mustAddList = new LinkedList < > ( ) ; List < ModuleDescriptor > briefList = new LinkedList < > ( ) ; Set < String > enabled = new TreeSet < > ( ) ; for ( ModuleDescriptor md : localList ) { enabled . add ( md . getId ( ) ) ; } for ( ModuleDescriptor md : remoteList ) { if ( ! "okapi" . equals ( md . getProduct ( ) ) && ! enabled . contains ( md . getId ( ) ) ) { mustAddList . add ( md ) ; briefList . add ( new ModuleDescriptor ( md , true ) ) ; } } return moduleManager . createList ( mustAddList , true , true , true , true ) . map ( briefList ) ; } ) ; }
Future < List < ModuleDescriptor > > pullSmart ( String remoteUrl , Collection < ModuleDescriptor > localList ) { return getList ( remoteUrl , localList ) . compose ( remoteList -> { List < ModuleDescriptor > mustAddList = new LinkedList < > ( ) ; List < ModuleDescriptor > briefList = new LinkedList < > ( ) ; Set < String > enabled = new TreeSet < > ( ) ; for ( ModuleDescriptor md : localList ) { enabled . add ( md . getId ( ) ) ; } for ( ModuleDescriptor md : remoteList ) { if ( ! "okapi" . equals ( md . getProduct ( ) ) && ! enabled . contains ( md . getId ( ) ) ) { mustAddList . add ( md ) ; briefList . add ( new ModuleDescriptor ( md , true ) ) ; } } logger . info ( "pull: {} MDs to insert" , mustAddList . size ( ) ) ; return moduleManager . createList ( mustAddList , true , true , true , true ) . map ( briefList ) ; } ) ; }
464
public void close ( ) throws Exception { if ( singletonServiceRegistration != null ) { try { singletonServiceRegistration . close ( ) ; } catch ( Exception e ) { LOG . warn ( "{} closed unexpectedly" , e . getMessage ( ) ) ; } singletonServiceRegistration = null ; } }
public void close ( ) throws Exception { LOG . info ( "Clustering provider closed for {}" , this . getClass ( ) . getSimpleName ( ) ) ; if ( singletonServiceRegistration != null ) { try { singletonServiceRegistration . close ( ) ; } catch ( Exception e ) { LOG . warn ( "{} closed unexpectedly" , e . getMessage ( ) ) ; } singletonServiceRegistration = null ; } }
465
public String replace ( Region region , String label ) { if ( label . startsWith ( "http://" ) ) { try { return new String ( HttpUtil . httpGet ( label , 0 ) . getBody ( ) , "UTF-8" ) ; } catch ( Exception ex ) { throw new RuntimeException ( ex ) ; } } JobMacro macro = jobMacroManager . getEntity ( label ) ; String target = null ; if ( macro != null ) { target = macro . getMacro ( ) ; } else { List < String > aliases = aliasManager . aliasToJobs ( label ) ; if ( aliases != null ) { target = joiner . join ( aliases ) ; } } if ( target != null ) { List < String > contents = new ArrayList < > ( ) ; List < String > delimiters = new ArrayList < > ( ) ; CommentTokenizer commentTokenizer = new CommentTokenizer ( target ) ; commentTokenizer . tokenize ( contents , delimiters ) ; StringBuilder builder = new StringBuilder ( ) ; int length = contents . size ( ) ; builder . append ( contents . get ( 0 ) ) ; builder . append ( delimiters . get ( 0 ) ) ; for ( int i = 1 ; i < length ; i ++ ) { String delimiter = delimiters . get ( i - 1 ) ; if ( delimiter . equals ( "//" ) || delimiter . equals ( "/*" ) ) { builder . append ( macroPattern . matcher ( contents . get ( i ) ) . replaceAll ( "%_{$1}_%" ) ) ; } else { builder . append ( contents . get ( i ) ) ; } builder . append ( delimiters . get ( i ) ) ; } return builder . toString ( ) ; } else { String msg = "non-existent macro referenced : " + label ; throw new RuntimeException ( msg ) ; } }
public String replace ( Region region , String label ) { if ( label . startsWith ( "http://" ) ) { try { return new String ( HttpUtil . httpGet ( label , 0 ) . getBody ( ) , "UTF-8" ) ; } catch ( Exception ex ) { throw new RuntimeException ( ex ) ; } } JobMacro macro = jobMacroManager . getEntity ( label ) ; String target = null ; if ( macro != null ) { target = macro . getMacro ( ) ; } else { List < String > aliases = aliasManager . aliasToJobs ( label ) ; if ( aliases != null ) { target = joiner . join ( aliases ) ; } } if ( target != null ) { List < String > contents = new ArrayList < > ( ) ; List < String > delimiters = new ArrayList < > ( ) ; CommentTokenizer commentTokenizer = new CommentTokenizer ( target ) ; commentTokenizer . tokenize ( contents , delimiters ) ; StringBuilder builder = new StringBuilder ( ) ; int length = contents . size ( ) ; builder . append ( contents . get ( 0 ) ) ; builder . append ( delimiters . get ( 0 ) ) ; for ( int i = 1 ; i < length ; i ++ ) { String delimiter = delimiters . get ( i - 1 ) ; if ( delimiter . equals ( "//" ) || delimiter . equals ( "/*" ) ) { builder . append ( macroPattern . matcher ( contents . get ( i ) ) . replaceAll ( "%_{$1}_%" ) ) ; } else { builder . append ( contents . get ( i ) ) ; } builder . append ( delimiters . get ( i ) ) ; } return builder . toString ( ) ; } else { String msg = "non-existent macro referenced : " + label ; log . warn ( msg ) ; throw new RuntimeException ( msg ) ; } }
466
public void doFilter ( ServletRequest req , ServletResponse resp , FilterChain chain ) throws IOException , ServletException { long before = System . currentTimeMillis ( ) ; try { Base . open ( jndiName ) ; Base . openTransaction ( ) ; chain . doFilter ( req , resp ) ; Base . commitTransaction ( ) ; } catch ( IOException e ) { Base . rollbackTransaction ( ) ; throw e ; } catch ( ServletException e ) { Base . rollbackTransaction ( ) ; throw e ; } finally { Base . close ( ) ; } }
public void doFilter ( ServletRequest req , ServletResponse resp , FilterChain chain ) throws IOException , ServletException { long before = System . currentTimeMillis ( ) ; try { Base . open ( jndiName ) ; Base . openTransaction ( ) ; chain . doFilter ( req , resp ) ; Base . commitTransaction ( ) ; } catch ( IOException e ) { Base . rollbackTransaction ( ) ; throw e ; } catch ( ServletException e ) { Base . rollbackTransaction ( ) ; throw e ; } finally { Base . close ( ) ; } LOGGER . info ( "Processing took: {} milliseconds" , System . currentTimeMillis ( ) - before ) ; }
467
public static double greatCircleAzimuthD ( LatLonAlt start , LatLonAlt end ) { double startLat = start . getLatD ( ) * MathUtil . DEG_TO_RAD ; double startLon = start . getLonD ( ) * MathUtil . DEG_TO_RAD ; double endLat = end . getLatD ( ) * MathUtil . DEG_TO_RAD ; double endLon = end . getLonD ( ) * MathUtil . DEG_TO_RAD ; if ( MathUtil . isZero ( startLon - endLon ) ) { if ( MathUtil . isZero ( startLat - endLat ) ) { return 0. ; } return startLat > endLat ? Math . PI : 0. ; } double y = Math . cos ( endLat ) * Math . sin ( endLon - startLon ) ; double x = Math . cos ( startLat ) * Math . sin ( endLat ) - Math . sin ( startLat ) * Math . cos ( endLat ) * Math . cos ( endLon - startLon ) ; double azimuthRadians = Math . atan2 ( y , x ) ; if ( Double . isNaN ( azimuthRadians ) ) { return 0. ; } return azimuthRadians * MathUtil . RAD_TO_DEG ; }
public static double greatCircleAzimuthD ( LatLonAlt start , LatLonAlt end ) { double startLat = start . getLatD ( ) * MathUtil . DEG_TO_RAD ; double startLon = start . getLonD ( ) * MathUtil . DEG_TO_RAD ; double endLat = end . getLatD ( ) * MathUtil . DEG_TO_RAD ; double endLon = end . getLonD ( ) * MathUtil . DEG_TO_RAD ; if ( MathUtil . isZero ( startLon - endLon ) ) { if ( MathUtil . isZero ( startLat - endLat ) ) { return 0. ; } return startLat > endLat ? Math . PI : 0. ; } double y = Math . cos ( endLat ) * Math . sin ( endLon - startLon ) ; double x = Math . cos ( startLat ) * Math . sin ( endLat ) - Math . sin ( startLat ) * Math . cos ( endLat ) * Math . cos ( endLon - startLon ) ; double azimuthRadians = Math . atan2 ( y , x ) ; if ( Double . isNaN ( azimuthRadians ) ) { LOGGER . error ( "greatCircleAzimuth calculation produced undefined results." ) ; return 0. ; } return azimuthRadians * MathUtil . RAD_TO_DEG ; }
468
public synchronized void run ( ) { isFlushTaskDue . set ( false ) ; long flushTillSeq = Math . max ( perSourceReceivedSeqNo . get ( bootstrapServers [ 0 ] ) , perSourceReceivedSeqNo . get ( bootstrapServers [ 1 ] ) ) ; if ( log . isDebugEnabled ( ) ) { } forceFlushBuffer ( flushTillSeq ) ; }
public synchronized void run ( ) { isFlushTaskDue . set ( false ) ; long flushTillSeq = Math . max ( perSourceReceivedSeqNo . get ( bootstrapServers [ 0 ] ) , perSourceReceivedSeqNo . get ( bootstrapServers [ 1 ] ) ) ; if ( log . isDebugEnabled ( ) ) { log . debug ( "Executing the buffer flushing task. Flushing buffers till " + flushTillSeq ) ; } forceFlushBuffer ( flushTillSeq ) ; }
469
public static String confirmationMethodHashS256 ( String certificateAsPem ) { if ( org . apache . commons . lang . StringUtils . isBlank ( certificateAsPem ) ) { return "" ; } try { certificateAsPem = org . apache . commons . lang . StringUtils . remove ( certificateAsPem , "-----BEGIN CERTIFICATE-----" ) ; certificateAsPem = org . apache . commons . lang . StringUtils . remove ( certificateAsPem , "-----END CERTIFICATE-----" ) ; certificateAsPem = StringUtils . replace ( certificateAsPem , "\n" , "" ) ; return Base64Util . base64urlencode ( DigestUtils . sha256 ( Base64 . decode ( certificateAsPem ) ) ) ; } catch ( Exception e ) { return "" ; } }
public static String confirmationMethodHashS256 ( String certificateAsPem ) { if ( org . apache . commons . lang . StringUtils . isBlank ( certificateAsPem ) ) { return "" ; } try { certificateAsPem = org . apache . commons . lang . StringUtils . remove ( certificateAsPem , "-----BEGIN CERTIFICATE-----" ) ; certificateAsPem = org . apache . commons . lang . StringUtils . remove ( certificateAsPem , "-----END CERTIFICATE-----" ) ; certificateAsPem = StringUtils . replace ( certificateAsPem , "\n" , "" ) ; return Base64Util . base64urlencode ( DigestUtils . sha256 ( Base64 . decode ( certificateAsPem ) ) ) ; } catch ( Exception e ) { log . error ( "Failed to hash certificate: " + certificateAsPem , e ) ; return "" ; } }
470
public void execute ( ) { if ( skip ) { return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
471
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
472
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
473
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
474
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
475
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; }
public void execute ( ) { if ( skip ) { logger . info ( "Skipping XML ingestion" ) ; return ; } long start = System . currentTimeMillis ( ) ; logger . info ( "Starting XML Ingester." ) ; for ( String xmlDocumentLocation : xmlDocumentLocations ) { logger . info ( "Ingesting XML documents listed in [{}]" , xmlDocumentLocation ) ; } List < XmlDocCollection > xmlDocumentCollections = getXmlDocCollectionList ( xmlDocumentLocations ) ; logger . info ( "Found {} files to ingest." , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; Collection < XmlDocCollection > failedXmlDocumentCollections = ingest ( xmlDocumentCollections ) ; validateNoFailures ( failedXmlDocumentCollections ) ; logger . info ( "There were zero failures ingesting {} XML documents" , Integer . valueOf ( xmlDocumentCollections . size ( ) ) ) ; long end = System . currentTimeMillis ( ) - start ; logger . info ( "Finished ingesting bootstrap XML - {}" , FormatUtils . getTime ( end ) ) ; }
476
private static String extractPeerDNFromClientSSLSocket ( SSLSocket sslSocket ) throws CertificateException { String dn = null ; SslContextFactory . ClientAuth clientAuth = getClientAuthStatus ( sslSocket ) ; if ( clientAuth != SslContextFactory . ClientAuth . NONE ) { try { final Certificate [ ] certChains = sslSocket . getSession ( ) . getPeerCertificates ( ) ; if ( certChains != null && certChains . length > 0 ) { X509Certificate x509Certificate = convertAbstractX509Certificate ( certChains [ 0 ] ) ; dn = x509Certificate . getSubjectDN ( ) . getName ( ) . trim ( ) ; logger . debug ( "Extracted DN={} from client certificate" , dn ) ; } } catch ( SSLPeerUnverifiedException e ) { if ( e . getMessage ( ) . equals ( PEER_NOT_AUTHENTICATED_MSG ) ) { logger . error ( "The incoming request did not contain client certificates and thus the DN cannot" + " be extracted. Check that the other endpoint is providing a complete client certificate chain" ) ; } if ( clientAuth == SslContextFactory . ClientAuth . WANT ) { logger . warn ( "Suppressing missing client certificate exception because client auth is set to 'want'" ) ; return dn ; } throw new CertificateException ( e ) ; } } return dn ; }
private static String extractPeerDNFromClientSSLSocket ( SSLSocket sslSocket ) throws CertificateException { String dn = null ; SslContextFactory . ClientAuth clientAuth = getClientAuthStatus ( sslSocket ) ; logger . debug ( "SSL Socket client auth status: {}" , clientAuth ) ; if ( clientAuth != SslContextFactory . ClientAuth . NONE ) { try { final Certificate [ ] certChains = sslSocket . getSession ( ) . getPeerCertificates ( ) ; if ( certChains != null && certChains . length > 0 ) { X509Certificate x509Certificate = convertAbstractX509Certificate ( certChains [ 0 ] ) ; dn = x509Certificate . getSubjectDN ( ) . getName ( ) . trim ( ) ; logger . debug ( "Extracted DN={} from client certificate" , dn ) ; } } catch ( SSLPeerUnverifiedException e ) { if ( e . getMessage ( ) . equals ( PEER_NOT_AUTHENTICATED_MSG ) ) { logger . error ( "The incoming request did not contain client certificates and thus the DN cannot" + " be extracted. Check that the other endpoint is providing a complete client certificate chain" ) ; } if ( clientAuth == SslContextFactory . ClientAuth . WANT ) { logger . warn ( "Suppressing missing client certificate exception because client auth is set to 'want'" ) ; return dn ; } throw new CertificateException ( e ) ; } } return dn ; }
477
private static String extractPeerDNFromClientSSLSocket ( SSLSocket sslSocket ) throws CertificateException { String dn = null ; SslContextFactory . ClientAuth clientAuth = getClientAuthStatus ( sslSocket ) ; logger . debug ( "SSL Socket client auth status: {}" , clientAuth ) ; if ( clientAuth != SslContextFactory . ClientAuth . NONE ) { try { final Certificate [ ] certChains = sslSocket . getSession ( ) . getPeerCertificates ( ) ; if ( certChains != null && certChains . length > 0 ) { X509Certificate x509Certificate = convertAbstractX509Certificate ( certChains [ 0 ] ) ; dn = x509Certificate . getSubjectDN ( ) . getName ( ) . trim ( ) ; } } catch ( SSLPeerUnverifiedException e ) { if ( e . getMessage ( ) . equals ( PEER_NOT_AUTHENTICATED_MSG ) ) { logger . error ( "The incoming request did not contain client certificates and thus the DN cannot" + " be extracted. Check that the other endpoint is providing a complete client certificate chain" ) ; } if ( clientAuth == SslContextFactory . ClientAuth . WANT ) { logger . warn ( "Suppressing missing client certificate exception because client auth is set to 'want'" ) ; return dn ; } throw new CertificateException ( e ) ; } } return dn ; }
private static String extractPeerDNFromClientSSLSocket ( SSLSocket sslSocket ) throws CertificateException { String dn = null ; SslContextFactory . ClientAuth clientAuth = getClientAuthStatus ( sslSocket ) ; logger . debug ( "SSL Socket client auth status: {}" , clientAuth ) ; if ( clientAuth != SslContextFactory . ClientAuth . NONE ) { try { final Certificate [ ] certChains = sslSocket . getSession ( ) . getPeerCertificates ( ) ; if ( certChains != null && certChains . length > 0 ) { X509Certificate x509Certificate = convertAbstractX509Certificate ( certChains [ 0 ] ) ; dn = x509Certificate . getSubjectDN ( ) . getName ( ) . trim ( ) ; logger . debug ( "Extracted DN={} from client certificate" , dn ) ; } } catch ( SSLPeerUnverifiedException e ) { if ( e . getMessage ( ) . equals ( PEER_NOT_AUTHENTICATED_MSG ) ) { logger . error ( "The incoming request did not contain client certificates and thus the DN cannot" + " be extracted. Check that the other endpoint is providing a complete client certificate chain" ) ; } if ( clientAuth == SslContextFactory . ClientAuth . WANT ) { logger . warn ( "Suppressing missing client certificate exception because client auth is set to 'want'" ) ; return dn ; } throw new CertificateException ( e ) ; } } return dn ; }
478
private static String extractPeerDNFromClientSSLSocket ( SSLSocket sslSocket ) throws CertificateException { String dn = null ; SslContextFactory . ClientAuth clientAuth = getClientAuthStatus ( sslSocket ) ; logger . debug ( "SSL Socket client auth status: {}" , clientAuth ) ; if ( clientAuth != SslContextFactory . ClientAuth . NONE ) { try { final Certificate [ ] certChains = sslSocket . getSession ( ) . getPeerCertificates ( ) ; if ( certChains != null && certChains . length > 0 ) { X509Certificate x509Certificate = convertAbstractX509Certificate ( certChains [ 0 ] ) ; dn = x509Certificate . getSubjectDN ( ) . getName ( ) . trim ( ) ; logger . debug ( "Extracted DN={} from client certificate" , dn ) ; } } catch ( SSLPeerUnverifiedException e ) { if ( e . getMessage ( ) . equals ( PEER_NOT_AUTHENTICATED_MSG ) ) { } if ( clientAuth == SslContextFactory . ClientAuth . WANT ) { logger . warn ( "Suppressing missing client certificate exception because client auth is set to 'want'" ) ; return dn ; } throw new CertificateException ( e ) ; } } return dn ; }
private static String extractPeerDNFromClientSSLSocket ( SSLSocket sslSocket ) throws CertificateException { String dn = null ; SslContextFactory . ClientAuth clientAuth = getClientAuthStatus ( sslSocket ) ; logger . debug ( "SSL Socket client auth status: {}" , clientAuth ) ; if ( clientAuth != SslContextFactory . ClientAuth . NONE ) { try { final Certificate [ ] certChains = sslSocket . getSession ( ) . getPeerCertificates ( ) ; if ( certChains != null && certChains . length > 0 ) { X509Certificate x509Certificate = convertAbstractX509Certificate ( certChains [ 0 ] ) ; dn = x509Certificate . getSubjectDN ( ) . getName ( ) . trim ( ) ; logger . debug ( "Extracted DN={} from client certificate" , dn ) ; } } catch ( SSLPeerUnverifiedException e ) { if ( e . getMessage ( ) . equals ( PEER_NOT_AUTHENTICATED_MSG ) ) { logger . error ( "The incoming request did not contain client certificates and thus the DN cannot" + " be extracted. Check that the other endpoint is providing a complete client certificate chain" ) ; } if ( clientAuth == SslContextFactory . ClientAuth . WANT ) { logger . warn ( "Suppressing missing client certificate exception because client auth is set to 'want'" ) ; return dn ; } throw new CertificateException ( e ) ; } } return dn ; }
479
private static String extractPeerDNFromClientSSLSocket ( SSLSocket sslSocket ) throws CertificateException { String dn = null ; SslContextFactory . ClientAuth clientAuth = getClientAuthStatus ( sslSocket ) ; logger . debug ( "SSL Socket client auth status: {}" , clientAuth ) ; if ( clientAuth != SslContextFactory . ClientAuth . NONE ) { try { final Certificate [ ] certChains = sslSocket . getSession ( ) . getPeerCertificates ( ) ; if ( certChains != null && certChains . length > 0 ) { X509Certificate x509Certificate = convertAbstractX509Certificate ( certChains [ 0 ] ) ; dn = x509Certificate . getSubjectDN ( ) . getName ( ) . trim ( ) ; logger . debug ( "Extracted DN={} from client certificate" , dn ) ; } } catch ( SSLPeerUnverifiedException e ) { if ( e . getMessage ( ) . equals ( PEER_NOT_AUTHENTICATED_MSG ) ) { logger . error ( "The incoming request did not contain client certificates and thus the DN cannot" + " be extracted. Check that the other endpoint is providing a complete client certificate chain" ) ; } if ( clientAuth == SslContextFactory . ClientAuth . WANT ) { return dn ; } throw new CertificateException ( e ) ; } } return dn ; }
private static String extractPeerDNFromClientSSLSocket ( SSLSocket sslSocket ) throws CertificateException { String dn = null ; SslContextFactory . ClientAuth clientAuth = getClientAuthStatus ( sslSocket ) ; logger . debug ( "SSL Socket client auth status: {}" , clientAuth ) ; if ( clientAuth != SslContextFactory . ClientAuth . NONE ) { try { final Certificate [ ] certChains = sslSocket . getSession ( ) . getPeerCertificates ( ) ; if ( certChains != null && certChains . length > 0 ) { X509Certificate x509Certificate = convertAbstractX509Certificate ( certChains [ 0 ] ) ; dn = x509Certificate . getSubjectDN ( ) . getName ( ) . trim ( ) ; logger . debug ( "Extracted DN={} from client certificate" , dn ) ; } } catch ( SSLPeerUnverifiedException e ) { if ( e . getMessage ( ) . equals ( PEER_NOT_AUTHENTICATED_MSG ) ) { logger . error ( "The incoming request did not contain client certificates and thus the DN cannot" + " be extracted. Check that the other endpoint is providing a complete client certificate chain" ) ; } if ( clientAuth == SslContextFactory . ClientAuth . WANT ) { logger . warn ( "Suppressing missing client certificate exception because client auth is set to 'want'" ) ; return dn ; } throw new CertificateException ( e ) ; } } return dn ; }
480
protected boolean onUnknownException ( Exception e , long nextDelayMs ) { return true ; }
protected boolean onUnknownException ( Exception e , long nextDelayMs ) { logger . error ( String . format ( "Unknown error during reconnection to %s, scheduling retry in %d milliseconds" , host , nextDelayMs ) , e ) ; return true ; }
481
public static String getRack ( String host ) { loadTopology ( ) ; if ( hostToRack . containsKey ( host ) ) { return hostToRack . get ( host ) ; } return host + "__RACK_NOT_FOUND" ; }
public static String getRack ( String host ) { loadTopology ( ) ; if ( hostToRack . containsKey ( host ) ) { return hostToRack . get ( host ) ; } LOG . warn ( "Did not find rack for host: " + host ) ; return host + "__RACK_NOT_FOUND" ; }
482
private Map < String , List < ModifiedTimeSortableFile > > getAllFiles ( String perentDirectory ) { Map < String , List < ModifiedTimeSortableFile > > groupedFiles = null ; try { groupedFiles = new HashMap < String , List < ModifiedTimeSortableFile > > ( ) ; List < ModifiedTimeSortableFile > files = null ; File dir = new File ( perentDirectory ) ; String [ ] names = dir . list ( ) ; String uniquePartOfName ; for ( int i = 0 ; i < names . length ; i ++ ) { int truncateToLength = names [ i ] . length ( ) - datePattern . length ( ) ; if ( names [ i ] . endsWith ( typeOfCompression ) ) { truncateToLength = truncateToLength - ( typeOfCompression . length ( ) + 1 ) ; } uniquePartOfName = names [ i ] . substring ( 0 , truncateToLength ) ; if ( groupedFiles . containsKey ( uniquePartOfName ) ) { files = groupedFiles . get ( uniquePartOfName ) ; } else { files = new ArrayList < ModifiedTimeSortableFile > ( ) ; } files . add ( new ModifiedTimeSortableFile ( dir + System . getProperty ( "file.separator" ) + names [ i ] ) ) ; groupedFiles . put ( uniquePartOfName , files ) ; } } catch ( Exception e ) { } return groupedFiles ; }
private Map < String , List < ModifiedTimeSortableFile > > getAllFiles ( String perentDirectory ) { Map < String , List < ModifiedTimeSortableFile > > groupedFiles = null ; try { groupedFiles = new HashMap < String , List < ModifiedTimeSortableFile > > ( ) ; List < ModifiedTimeSortableFile > files = null ; File dir = new File ( perentDirectory ) ; String [ ] names = dir . list ( ) ; String uniquePartOfName ; for ( int i = 0 ; i < names . length ; i ++ ) { int truncateToLength = names [ i ] . length ( ) - datePattern . length ( ) ; if ( names [ i ] . endsWith ( typeOfCompression ) ) { truncateToLength = truncateToLength - ( typeOfCompression . length ( ) + 1 ) ; } uniquePartOfName = names [ i ] . substring ( 0 , truncateToLength ) ; if ( groupedFiles . containsKey ( uniquePartOfName ) ) { files = groupedFiles . get ( uniquePartOfName ) ; } else { files = new ArrayList < ModifiedTimeSortableFile > ( ) ; } files . add ( new ModifiedTimeSortableFile ( dir + System . getProperty ( "file.separator" ) + names [ i ] ) ) ; groupedFiles . put ( uniquePartOfName , files ) ; } } catch ( Exception e ) { LogLog . error ( "LogLog Error during retrieving all files of parent folder. " , e ) ; } return groupedFiles ; }
483
private static DatagramPacket [ ] encode ( MulticastAnnouncement announcement , int maxPacketSize , InvocationConstraints constraints ) throws IOException { final int MIN_DATA_LEN = 28 ; checkConstraints ( constraints ) ; LinkedList groups = new LinkedList ( ) ; byte [ ] host = Plaintext . toUtf ( announcement . getHost ( ) ) ; String [ ] g = announcement . getGroups ( ) ; for ( int i = 0 ; i < g . length ; i ++ ) { byte [ ] b = Plaintext . toUtf ( g [ i ] ) ; if ( b . length + host . length + MIN_DATA_LEN > maxPacketSize ) { throw new DiscoveryProtocolException ( "group too long: " + g [ i ] ) ; } groups . add ( b ) ; } List packets = new ArrayList ( ) ; do { ByteBuffer buf = ByteBuffer . allocate ( maxPacketSize ) ; buf . putInt ( PROTOCOL_VERSION_1 ) ; buf . put ( ANNOUNCEMENT_TYPE ) ; buf . put ( host ) ; buf . putInt ( announcement . getPort ( ) ) ; ServiceID id = announcement . getServiceID ( ) ; buf . putLong ( id . getMostSignificantBits ( ) ) ; buf . putLong ( id . getLeastSignificantBits ( ) ) ; int ngroupsPos = buf . position ( ) ; int ngroups = 0 ; buf . putInt ( ngroups ) ; while ( ! groups . isEmpty ( ) ) { if ( ( ( byte [ ] ) groups . getFirst ( ) ) . length > buf . remaining ( ) ) { break ; } buf . put ( ( byte [ ] ) groups . removeFirst ( ) ) ; ngroups ++ ; } if ( ngroups > 0 ) { buf . putInt ( ngroupsPos , ngroups ) ; } packets . add ( new DatagramPacket ( buf . array ( ) , buf . position ( ) , Constants . getAnnouncementAddress ( ) , Constants . getDiscoveryPort ( ) ) ) ; } while ( ! groups . isEmpty ( ) ) ; if ( logger . isTraceEnabled ( ) ) { } return ( DatagramPacket [ ] ) packets . toArray ( new DatagramPacket [ packets . size ( ) ] ) ; }
private static DatagramPacket [ ] encode ( MulticastAnnouncement announcement , int maxPacketSize , InvocationConstraints constraints ) throws IOException { final int MIN_DATA_LEN = 28 ; checkConstraints ( constraints ) ; LinkedList groups = new LinkedList ( ) ; byte [ ] host = Plaintext . toUtf ( announcement . getHost ( ) ) ; String [ ] g = announcement . getGroups ( ) ; for ( int i = 0 ; i < g . length ; i ++ ) { byte [ ] b = Plaintext . toUtf ( g [ i ] ) ; if ( b . length + host . length + MIN_DATA_LEN > maxPacketSize ) { throw new DiscoveryProtocolException ( "group too long: " + g [ i ] ) ; } groups . add ( b ) ; } List packets = new ArrayList ( ) ; do { ByteBuffer buf = ByteBuffer . allocate ( maxPacketSize ) ; buf . putInt ( PROTOCOL_VERSION_1 ) ; buf . put ( ANNOUNCEMENT_TYPE ) ; buf . put ( host ) ; buf . putInt ( announcement . getPort ( ) ) ; ServiceID id = announcement . getServiceID ( ) ; buf . putLong ( id . getMostSignificantBits ( ) ) ; buf . putLong ( id . getLeastSignificantBits ( ) ) ; int ngroupsPos = buf . position ( ) ; int ngroups = 0 ; buf . putInt ( ngroups ) ; while ( ! groups . isEmpty ( ) ) { if ( ( ( byte [ ] ) groups . getFirst ( ) ) . length > buf . remaining ( ) ) { break ; } buf . put ( ( byte [ ] ) groups . removeFirst ( ) ) ; ngroups ++ ; } if ( ngroups > 0 ) { buf . putInt ( ngroupsPos , ngroups ) ; } packets . add ( new DatagramPacket ( buf . array ( ) , buf . position ( ) , Constants . getAnnouncementAddress ( ) , Constants . getDiscoveryPort ( ) ) ) ; } while ( ! groups . isEmpty ( ) ) ; if ( logger . isTraceEnabled ( ) ) { logger . trace ( "encoded {}" , announcement ) ; } return ( DatagramPacket [ ] ) packets . toArray ( new DatagramPacket [ packets . size ( ) ] ) ; }
484
protected void doIterationMDS ( SparkComputationGraph network , JavaRDD < MultiDataSet > split , int splitNum , int numSplits ) { if ( collectTrainingStats ) stats . logMapPartitionsStart ( ) ; JavaRDD < MultiDataSet > splitData = split ; if ( collectTrainingStats ) stats . logRepartitionStart ( ) ; if ( repartitioner != null ) { log . info ( "Repartitioning training data using repartitioner: {}" , repartitioner ) ; int minPerWorker = Math . max ( 1 , batchSizePerWorker / rddDataSetNumExamples ) ; splitData = repartitioner . repartition ( splitData , minPerWorker , numWorkers ) ; } else { log . info ( "Repartitioning training data using SparkUtils repartitioner" ) ; splitData = SparkUtils . repartitionEqually ( splitData , repartition , numWorkers ) ; } int nPartitions = splitData . partitions ( ) . size ( ) ; if ( collectTrainingStats && repartition != Repartition . Never ) stats . logRepartitionEnd ( ) ; FlatMapFunction < Iterator < MultiDataSet > , SharedTrainingResult > function = new SharedFlatMapMultiDataSet < > ( getWorkerInstance ( network ) ) ; JavaRDD < SharedTrainingResult > result = splitData . mapPartitions ( function ) ; processResults ( null , network , result ) ; if ( collectTrainingStats ) stats . logMapPartitionsEnd ( nPartitions ) ; }
protected void doIterationMDS ( SparkComputationGraph network , JavaRDD < MultiDataSet > split , int splitNum , int numSplits ) { log . info ( "Starting training of split {} of {}. workerMiniBatchSize={}, thresholdAlgorithm={}, Configured for {} workers" , splitNum , numSplits , batchSizePerWorker , thresholdAlgorithm , numWorkers ) ; if ( collectTrainingStats ) stats . logMapPartitionsStart ( ) ; JavaRDD < MultiDataSet > splitData = split ; if ( collectTrainingStats ) stats . logRepartitionStart ( ) ; if ( repartitioner != null ) { log . info ( "Repartitioning training data using repartitioner: {}" , repartitioner ) ; int minPerWorker = Math . max ( 1 , batchSizePerWorker / rddDataSetNumExamples ) ; splitData = repartitioner . repartition ( splitData , minPerWorker , numWorkers ) ; } else { log . info ( "Repartitioning training data using SparkUtils repartitioner" ) ; splitData = SparkUtils . repartitionEqually ( splitData , repartition , numWorkers ) ; } int nPartitions = splitData . partitions ( ) . size ( ) ; if ( collectTrainingStats && repartition != Repartition . Never ) stats . logRepartitionEnd ( ) ; FlatMapFunction < Iterator < MultiDataSet > , SharedTrainingResult > function = new SharedFlatMapMultiDataSet < > ( getWorkerInstance ( network ) ) ; JavaRDD < SharedTrainingResult > result = splitData . mapPartitions ( function ) ; processResults ( null , network , result ) ; if ( collectTrainingStats ) stats . logMapPartitionsEnd ( nPartitions ) ; }
485
protected void doIterationMDS ( SparkComputationGraph network , JavaRDD < MultiDataSet > split , int splitNum , int numSplits ) { log . info ( "Starting training of split {} of {}. workerMiniBatchSize={}, thresholdAlgorithm={}, Configured for {} workers" , splitNum , numSplits , batchSizePerWorker , thresholdAlgorithm , numWorkers ) ; if ( collectTrainingStats ) stats . logMapPartitionsStart ( ) ; JavaRDD < MultiDataSet > splitData = split ; if ( collectTrainingStats ) stats . logRepartitionStart ( ) ; if ( repartitioner != null ) { int minPerWorker = Math . max ( 1 , batchSizePerWorker / rddDataSetNumExamples ) ; splitData = repartitioner . repartition ( splitData , minPerWorker , numWorkers ) ; } else { log . info ( "Repartitioning training data using SparkUtils repartitioner" ) ; splitData = SparkUtils . repartitionEqually ( splitData , repartition , numWorkers ) ; } int nPartitions = splitData . partitions ( ) . size ( ) ; if ( collectTrainingStats && repartition != Repartition . Never ) stats . logRepartitionEnd ( ) ; FlatMapFunction < Iterator < MultiDataSet > , SharedTrainingResult > function = new SharedFlatMapMultiDataSet < > ( getWorkerInstance ( network ) ) ; JavaRDD < SharedTrainingResult > result = splitData . mapPartitions ( function ) ; processResults ( null , network , result ) ; if ( collectTrainingStats ) stats . logMapPartitionsEnd ( nPartitions ) ; }
protected void doIterationMDS ( SparkComputationGraph network , JavaRDD < MultiDataSet > split , int splitNum , int numSplits ) { log . info ( "Starting training of split {} of {}. workerMiniBatchSize={}, thresholdAlgorithm={}, Configured for {} workers" , splitNum , numSplits , batchSizePerWorker , thresholdAlgorithm , numWorkers ) ; if ( collectTrainingStats ) stats . logMapPartitionsStart ( ) ; JavaRDD < MultiDataSet > splitData = split ; if ( collectTrainingStats ) stats . logRepartitionStart ( ) ; if ( repartitioner != null ) { log . info ( "Repartitioning training data using repartitioner: {}" , repartitioner ) ; int minPerWorker = Math . max ( 1 , batchSizePerWorker / rddDataSetNumExamples ) ; splitData = repartitioner . repartition ( splitData , minPerWorker , numWorkers ) ; } else { log . info ( "Repartitioning training data using SparkUtils repartitioner" ) ; splitData = SparkUtils . repartitionEqually ( splitData , repartition , numWorkers ) ; } int nPartitions = splitData . partitions ( ) . size ( ) ; if ( collectTrainingStats && repartition != Repartition . Never ) stats . logRepartitionEnd ( ) ; FlatMapFunction < Iterator < MultiDataSet > , SharedTrainingResult > function = new SharedFlatMapMultiDataSet < > ( getWorkerInstance ( network ) ) ; JavaRDD < SharedTrainingResult > result = splitData . mapPartitions ( function ) ; processResults ( null , network , result ) ; if ( collectTrainingStats ) stats . logMapPartitionsEnd ( nPartitions ) ; }
486
protected void doIterationMDS ( SparkComputationGraph network , JavaRDD < MultiDataSet > split , int splitNum , int numSplits ) { log . info ( "Starting training of split {} of {}. workerMiniBatchSize={}, thresholdAlgorithm={}, Configured for {} workers" , splitNum , numSplits , batchSizePerWorker , thresholdAlgorithm , numWorkers ) ; if ( collectTrainingStats ) stats . logMapPartitionsStart ( ) ; JavaRDD < MultiDataSet > splitData = split ; if ( collectTrainingStats ) stats . logRepartitionStart ( ) ; if ( repartitioner != null ) { log . info ( "Repartitioning training data using repartitioner: {}" , repartitioner ) ; int minPerWorker = Math . max ( 1 , batchSizePerWorker / rddDataSetNumExamples ) ; splitData = repartitioner . repartition ( splitData , minPerWorker , numWorkers ) ; } else { splitData = SparkUtils . repartitionEqually ( splitData , repartition , numWorkers ) ; } int nPartitions = splitData . partitions ( ) . size ( ) ; if ( collectTrainingStats && repartition != Repartition . Never ) stats . logRepartitionEnd ( ) ; FlatMapFunction < Iterator < MultiDataSet > , SharedTrainingResult > function = new SharedFlatMapMultiDataSet < > ( getWorkerInstance ( network ) ) ; JavaRDD < SharedTrainingResult > result = splitData . mapPartitions ( function ) ; processResults ( null , network , result ) ; if ( collectTrainingStats ) stats . logMapPartitionsEnd ( nPartitions ) ; }
protected void doIterationMDS ( SparkComputationGraph network , JavaRDD < MultiDataSet > split , int splitNum , int numSplits ) { log . info ( "Starting training of split {} of {}. workerMiniBatchSize={}, thresholdAlgorithm={}, Configured for {} workers" , splitNum , numSplits , batchSizePerWorker , thresholdAlgorithm , numWorkers ) ; if ( collectTrainingStats ) stats . logMapPartitionsStart ( ) ; JavaRDD < MultiDataSet > splitData = split ; if ( collectTrainingStats ) stats . logRepartitionStart ( ) ; if ( repartitioner != null ) { log . info ( "Repartitioning training data using repartitioner: {}" , repartitioner ) ; int minPerWorker = Math . max ( 1 , batchSizePerWorker / rddDataSetNumExamples ) ; splitData = repartitioner . repartition ( splitData , minPerWorker , numWorkers ) ; } else { log . info ( "Repartitioning training data using SparkUtils repartitioner" ) ; splitData = SparkUtils . repartitionEqually ( splitData , repartition , numWorkers ) ; } int nPartitions = splitData . partitions ( ) . size ( ) ; if ( collectTrainingStats && repartition != Repartition . Never ) stats . logRepartitionEnd ( ) ; FlatMapFunction < Iterator < MultiDataSet > , SharedTrainingResult > function = new SharedFlatMapMultiDataSet < > ( getWorkerInstance ( network ) ) ; JavaRDD < SharedTrainingResult > result = splitData . mapPartitions ( function ) ; processResults ( null , network , result ) ; if ( collectTrainingStats ) stats . logMapPartitionsEnd ( nPartitions ) ; }
487
private void preparePremasterSecret ( SrpClientKeyExchangeMessage msg ) { msg . getComputations ( ) . setPremasterSecret ( premasterSecret ) ; premasterSecret = msg . getComputations ( ) . getPremasterSecret ( ) . getValue ( ) ; }
private void preparePremasterSecret ( SrpClientKeyExchangeMessage msg ) { msg . getComputations ( ) . setPremasterSecret ( premasterSecret ) ; premasterSecret = msg . getComputations ( ) . getPremasterSecret ( ) . getValue ( ) ; LOGGER . debug ( "PremasterSecret: " + ArrayConverter . bytesToHexString ( msg . getComputations ( ) . getPremasterSecret ( ) . getValue ( ) ) ) ; }
488
protected void handleUnsupportedAttributes ( Collection < Attribute > attributes ) { if ( ! attributes . isEmpty ( ) ) { StringBuilder sb = new StringBuilder ( ) ; for ( Attribute attr : attributes ) { if ( sb . length ( ) > 0 ) { sb . append ( ", " ) ; } sb . append ( attr . name ( ) ) ; } switch ( nativeFileSystemView . getUnsupportedAttributePolicy ( ) ) { case Ignore : break ; case Warn : break ; case ThrowException : throw new UnsupportedOperationException ( "Unsupported attributes: " + sb . toString ( ) ) ; } } }
protected void handleUnsupportedAttributes ( Collection < Attribute > attributes ) { if ( ! attributes . isEmpty ( ) ) { StringBuilder sb = new StringBuilder ( ) ; for ( Attribute attr : attributes ) { if ( sb . length ( ) > 0 ) { sb . append ( ", " ) ; } sb . append ( attr . name ( ) ) ; } switch ( nativeFileSystemView . getUnsupportedAttributePolicy ( ) ) { case Ignore : break ; case Warn : LOG . warn ( "Unsupported attributes: " + sb . toString ( ) ) ; break ; case ThrowException : throw new UnsupportedOperationException ( "Unsupported attributes: " + sb . toString ( ) ) ; } } }
489
public void loadHotModifiedProps ( ) throws QueryProcessException { URL url = getPropsUrl ( ) ; if ( url == null ) { return ; } try ( InputStream inputStream = url . openStream ( ) ) { logger . info ( "Start to reload config file {}" , url ) ; Properties properties = new Properties ( ) ; properties . load ( inputStream ) ; loadHotModifiedProps ( properties ) ; } catch ( Exception e ) { logger . warn ( "Fail to reload config file {}" , url , e ) ; throw new QueryProcessException ( String . format ( "Fail to reload config file %s because %s" , url , e . getMessage ( ) ) ) ; } }
public void loadHotModifiedProps ( ) throws QueryProcessException { URL url = getPropsUrl ( ) ; if ( url == null ) { logger . warn ( "Couldn't load the configuration from any of the known sources." ) ; return ; } try ( InputStream inputStream = url . openStream ( ) ) { logger . info ( "Start to reload config file {}" , url ) ; Properties properties = new Properties ( ) ; properties . load ( inputStream ) ; loadHotModifiedProps ( properties ) ; } catch ( Exception e ) { logger . warn ( "Fail to reload config file {}" , url , e ) ; throw new QueryProcessException ( String . format ( "Fail to reload config file %s because %s" , url , e . getMessage ( ) ) ) ; } }
490
public void loadHotModifiedProps ( ) throws QueryProcessException { URL url = getPropsUrl ( ) ; if ( url == null ) { logger . warn ( "Couldn't load the configuration from any of the known sources." ) ; return ; } try ( InputStream inputStream = url . openStream ( ) ) { Properties properties = new Properties ( ) ; properties . load ( inputStream ) ; loadHotModifiedProps ( properties ) ; } catch ( Exception e ) { logger . warn ( "Fail to reload config file {}" , url , e ) ; throw new QueryProcessException ( String . format ( "Fail to reload config file %s because %s" , url , e . getMessage ( ) ) ) ; } }
public void loadHotModifiedProps ( ) throws QueryProcessException { URL url = getPropsUrl ( ) ; if ( url == null ) { logger . warn ( "Couldn't load the configuration from any of the known sources." ) ; return ; } try ( InputStream inputStream = url . openStream ( ) ) { logger . info ( "Start to reload config file {}" , url ) ; Properties properties = new Properties ( ) ; properties . load ( inputStream ) ; loadHotModifiedProps ( properties ) ; } catch ( Exception e ) { logger . warn ( "Fail to reload config file {}" , url , e ) ; throw new QueryProcessException ( String . format ( "Fail to reload config file %s because %s" , url , e . getMessage ( ) ) ) ; } }
491
public void loadHotModifiedProps ( ) throws QueryProcessException { URL url = getPropsUrl ( ) ; if ( url == null ) { logger . warn ( "Couldn't load the configuration from any of the known sources." ) ; return ; } try ( InputStream inputStream = url . openStream ( ) ) { logger . info ( "Start to reload config file {}" , url ) ; Properties properties = new Properties ( ) ; properties . load ( inputStream ) ; loadHotModifiedProps ( properties ) ; } catch ( Exception e ) { throw new QueryProcessException ( String . format ( "Fail to reload config file %s because %s" , url , e . getMessage ( ) ) ) ; } }
public void loadHotModifiedProps ( ) throws QueryProcessException { URL url = getPropsUrl ( ) ; if ( url == null ) { logger . warn ( "Couldn't load the configuration from any of the known sources." ) ; return ; } try ( InputStream inputStream = url . openStream ( ) ) { logger . info ( "Start to reload config file {}" , url ) ; Properties properties = new Properties ( ) ; properties . load ( inputStream ) ; loadHotModifiedProps ( properties ) ; } catch ( Exception e ) { logger . warn ( "Fail to reload config file {}" , url , e ) ; throw new QueryProcessException ( String . format ( "Fail to reload config file %s because %s" , url , e . getMessage ( ) ) ) ; } }
492
public void setEvictionPolicy ( int evictionPolicy ) { if ( LOG . isDebugEnabled ( ) ) }
public void setEvictionPolicy ( int evictionPolicy ) { if ( LOG . isDebugEnabled ( ) ) LOG . debug ( "Ignoring eviction policy setting for NullSessionCaches" ) ; }
493
public void updateMaxTimestamp ( long previousMaxTimestamp , long newMaxTimestamp ) throws IOException { if ( newMaxTimestamp < 0 ) { throw new IllegalArgumentException ( ) ; } if ( newMaxTimestamp <= previousMaxTimestamp ) { LOG . error ( "maxTimestamp {} <= previousMaxTimesamp: {}" , newMaxTimestamp , previousMaxTimestamp ) ; throw new IllegalArgumentException ( ) ; } AtomicValue < Long > compareAndSet ; try { compareAndSet = timestamp . compareAndSet ( previousMaxTimestamp , newMaxTimestamp ) ; } catch ( Exception e ) { throw new IOException ( "Problem setting timestamp in ZK" , e ) ; } if ( ! compareAndSet . succeeded ( ) ) { throw new IOException ( "GetAndSet operation for storing timestamp in ZK did not succeed " + compareAndSet . preValue ( ) + " " + compareAndSet . postValue ( ) ) ; } }
public void updateMaxTimestamp ( long previousMaxTimestamp , long newMaxTimestamp ) throws IOException { if ( newMaxTimestamp < 0 ) { LOG . error ( "Negative value received for maxTimestamp: {}" , newMaxTimestamp ) ; throw new IllegalArgumentException ( ) ; } if ( newMaxTimestamp <= previousMaxTimestamp ) { LOG . error ( "maxTimestamp {} <= previousMaxTimesamp: {}" , newMaxTimestamp , previousMaxTimestamp ) ; throw new IllegalArgumentException ( ) ; } AtomicValue < Long > compareAndSet ; try { compareAndSet = timestamp . compareAndSet ( previousMaxTimestamp , newMaxTimestamp ) ; } catch ( Exception e ) { throw new IOException ( "Problem setting timestamp in ZK" , e ) ; } if ( ! compareAndSet . succeeded ( ) ) { throw new IOException ( "GetAndSet operation for storing timestamp in ZK did not succeed " + compareAndSet . preValue ( ) + " " + compareAndSet . postValue ( ) ) ; } }
494
public void updateMaxTimestamp ( long previousMaxTimestamp , long newMaxTimestamp ) throws IOException { if ( newMaxTimestamp < 0 ) { LOG . error ( "Negative value received for maxTimestamp: {}" , newMaxTimestamp ) ; throw new IllegalArgumentException ( ) ; } if ( newMaxTimestamp <= previousMaxTimestamp ) { throw new IllegalArgumentException ( ) ; } AtomicValue < Long > compareAndSet ; try { compareAndSet = timestamp . compareAndSet ( previousMaxTimestamp , newMaxTimestamp ) ; } catch ( Exception e ) { throw new IOException ( "Problem setting timestamp in ZK" , e ) ; } if ( ! compareAndSet . succeeded ( ) ) { throw new IOException ( "GetAndSet operation for storing timestamp in ZK did not succeed " + compareAndSet . preValue ( ) + " " + compareAndSet . postValue ( ) ) ; } }
public void updateMaxTimestamp ( long previousMaxTimestamp , long newMaxTimestamp ) throws IOException { if ( newMaxTimestamp < 0 ) { LOG . error ( "Negative value received for maxTimestamp: {}" , newMaxTimestamp ) ; throw new IllegalArgumentException ( ) ; } if ( newMaxTimestamp <= previousMaxTimestamp ) { LOG . error ( "maxTimestamp {} <= previousMaxTimesamp: {}" , newMaxTimestamp , previousMaxTimestamp ) ; throw new IllegalArgumentException ( ) ; } AtomicValue < Long > compareAndSet ; try { compareAndSet = timestamp . compareAndSet ( previousMaxTimestamp , newMaxTimestamp ) ; } catch ( Exception e ) { throw new IOException ( "Problem setting timestamp in ZK" , e ) ; } if ( ! compareAndSet . succeeded ( ) ) { throw new IOException ( "GetAndSet operation for storing timestamp in ZK did not succeed " + compareAndSet . preValue ( ) + " " + compareAndSet . postValue ( ) ) ; } }
495
public static Context createContext ( Map < String , String > properties ) throws Exception { fileSystem = FileSystem . get ( new Configuration ( ) ) ; baseDir = Files . createTempDir ( ) ; logDir = assertCreateDir ( new File ( baseDir , "log" ) ) ; confDir = assertCreateDir ( new File ( baseDir , "etc" ) ) ; dataDir = assertCreateDir ( new File ( baseDir , "data" ) ) ; policyFile = new File ( confDir , HiveServerFactory . AUTHZ_PROVIDER_FILENAME ) ; hiveServer = HiveServerFactory . create ( properties , baseDir , confDir , logDir , policyFile . getPath ( ) , fileSystem ) ; hiveServer . start ( ) ; return new Context ( hiveServer , fileSystem , baseDir , dataDir , policyFile ) ; }
public static Context createContext ( Map < String , String > properties ) throws Exception { fileSystem = FileSystem . get ( new Configuration ( ) ) ; baseDir = Files . createTempDir ( ) ; LOGGER . info ( "BaseDir = " + baseDir ) ; logDir = assertCreateDir ( new File ( baseDir , "log" ) ) ; confDir = assertCreateDir ( new File ( baseDir , "etc" ) ) ; dataDir = assertCreateDir ( new File ( baseDir , "data" ) ) ; policyFile = new File ( confDir , HiveServerFactory . AUTHZ_PROVIDER_FILENAME ) ; hiveServer = HiveServerFactory . create ( properties , baseDir , confDir , logDir , policyFile . getPath ( ) , fileSystem ) ; hiveServer . start ( ) ; return new Context ( hiveServer , fileSystem , baseDir , dataDir , policyFile ) ; }
496
public void disable ( GL gl , int refID ) { synchronized ( LOCK ) { if ( ! referencedRenderFragments . contains ( refID ) ) { if ( textureID == null ) { return ; } } referencedRenderFragments . remove ( refID ) ; if ( textureID != null ) { if ( referencedRenderFragments . isEmpty ( ) ) { LOG . debug ( "disabling and freeing texture memory." ) ; gl . glDeleteTextures ( 1 , textureID , 0 ) ; textureID = null ; } } } }
public void disable ( GL gl , int refID ) { synchronized ( LOCK ) { if ( ! referencedRenderFragments . contains ( refID ) ) { LOG . warn ( "Trying to remove a reference, which was not registered, this is strange: " + textureID ) ; if ( textureID == null ) { return ; } } referencedRenderFragments . remove ( refID ) ; if ( textureID != null ) { if ( referencedRenderFragments . isEmpty ( ) ) { LOG . debug ( "disabling and freeing texture memory." ) ; gl . glDeleteTextures ( 1 , textureID , 0 ) ; textureID = null ; } } } }
497
public void disable ( GL gl , int refID ) { synchronized ( LOCK ) { if ( ! referencedRenderFragments . contains ( refID ) ) { LOG . warn ( "Trying to remove a reference, which was not registered, this is strange: " + textureID ) ; if ( textureID == null ) { return ; } } referencedRenderFragments . remove ( refID ) ; if ( textureID != null ) { if ( referencedRenderFragments . isEmpty ( ) ) { gl . glDeleteTextures ( 1 , textureID , 0 ) ; textureID = null ; } } } }
public void disable ( GL gl , int refID ) { synchronized ( LOCK ) { if ( ! referencedRenderFragments . contains ( refID ) ) { LOG . warn ( "Trying to remove a reference, which was not registered, this is strange: " + textureID ) ; if ( textureID == null ) { return ; } } referencedRenderFragments . remove ( refID ) ; if ( textureID != null ) { if ( referencedRenderFragments . isEmpty ( ) ) { LOG . debug ( "disabling and freeing texture memory." ) ; gl . glDeleteTextures ( 1 , textureID , 0 ) ; textureID = null ; } } } }
498
public void onAdd ( Entity entity ) throws FalconException { if ( entity . getEntityType ( ) == EntityType . FEED ) { Feed feed = ( Feed ) entity ; List < Cluster > clusters = feed . getClusters ( ) . getClusters ( ) ; for ( Cluster cluster : clusters ) { if ( DeploymentUtil . getCurrentClusters ( ) . contains ( cluster . getName ( ) ) ) { List < Location > clusterSpecificLocations = FeedHelper . getLocations ( FeedHelper . getCluster ( feed , cluster . getName ( ) ) , feed ) ; if ( clusterSpecificLocations != null ) { for ( Location location : clusterSpecificLocations ) { if ( location != null && StringUtils . isNotBlank ( location . getPath ( ) ) ) { FeedLookupResult . FeedProperties value = new FeedLookupResult . FeedProperties ( feed . getName ( ) , location . getType ( ) , cluster . getName ( ) ) ; store . insert ( StringUtils . trim ( location . getPath ( ) ) , value ) ; } } } } } } }
public void onAdd ( Entity entity ) throws FalconException { if ( entity . getEntityType ( ) == EntityType . FEED ) { Feed feed = ( Feed ) entity ; List < Cluster > clusters = feed . getClusters ( ) . getClusters ( ) ; for ( Cluster cluster : clusters ) { if ( DeploymentUtil . getCurrentClusters ( ) . contains ( cluster . getName ( ) ) ) { List < Location > clusterSpecificLocations = FeedHelper . getLocations ( FeedHelper . getCluster ( feed , cluster . getName ( ) ) , feed ) ; if ( clusterSpecificLocations != null ) { for ( Location location : clusterSpecificLocations ) { if ( location != null && StringUtils . isNotBlank ( location . getPath ( ) ) ) { FeedLookupResult . FeedProperties value = new FeedLookupResult . FeedProperties ( feed . getName ( ) , location . getType ( ) , cluster . getName ( ) ) ; store . insert ( StringUtils . trim ( location . getPath ( ) ) , value ) ; LOG . debug ( "Inserted location: {} for feed: {} and cluster: {}" , location . getPath ( ) , feed . getName ( ) , cluster . getName ( ) ) ; } } } } } } }
499
public void handleStateChange ( ExecutionInstance instance , InstanceState . EVENT event , InstanceStateChangeHandler handler ) throws FalconException { InstanceID id = new InstanceID ( instance ) ; if ( ! stateStore . executionInstanceExists ( id ) ) { if ( event == InstanceState . EVENT . TRIGGER ) { callbackHandler ( instance , InstanceState . EVENT . TRIGGER , handler ) ; stateStore . putExecutionInstance ( new InstanceState ( instance ) ) ; ( ( ProcessExecutionInstance ) instance ) . registerForNotifications ( false ) ; LOG . debug ( "Instance {} triggered due to event {}." , id , event . name ( ) ) ; } else if ( event == InstanceState . EVENT . EXTERNAL_TRIGGER ) { callbackHandler ( instance , InstanceState . EVENT . EXTERNAL_TRIGGER , handler ) ; stateStore . updateExecutionInstance ( new InstanceState ( instance ) ) ; LOG . debug ( "Instance {} triggered due to event {}." , id , event . name ( ) ) ; } else { throw new FalconException ( "Instance " + id + "does not exist." ) ; } } else { InstanceState instanceState = stateStore . getExecutionInstance ( id ) ; InstanceState . STATE newState = instanceState . nextTransition ( event ) ; callbackHandler ( instance , event , handler ) ; instanceState = new InstanceState ( instance ) ; instanceState . setCurrentState ( newState ) ; stateStore . updateExecutionInstance ( instanceState ) ; } }
public void handleStateChange ( ExecutionInstance instance , InstanceState . EVENT event , InstanceStateChangeHandler handler ) throws FalconException { InstanceID id = new InstanceID ( instance ) ; if ( ! stateStore . executionInstanceExists ( id ) ) { if ( event == InstanceState . EVENT . TRIGGER ) { callbackHandler ( instance , InstanceState . EVENT . TRIGGER , handler ) ; stateStore . putExecutionInstance ( new InstanceState ( instance ) ) ; ( ( ProcessExecutionInstance ) instance ) . registerForNotifications ( false ) ; LOG . debug ( "Instance {} triggered due to event {}." , id , event . name ( ) ) ; } else if ( event == InstanceState . EVENT . EXTERNAL_TRIGGER ) { callbackHandler ( instance , InstanceState . EVENT . EXTERNAL_TRIGGER , handler ) ; stateStore . updateExecutionInstance ( new InstanceState ( instance ) ) ; LOG . debug ( "Instance {} triggered due to event {}." , id , event . name ( ) ) ; } else { throw new FalconException ( "Instance " + id + "does not exist." ) ; } } else { InstanceState instanceState = stateStore . getExecutionInstance ( id ) ; InstanceState . STATE newState = instanceState . nextTransition ( event ) ; callbackHandler ( instance , event , handler ) ; instanceState = new InstanceState ( instance ) ; instanceState . setCurrentState ( newState ) ; stateStore . updateExecutionInstance ( instanceState ) ; LOG . debug ( "State of instance: {} changed to: {} as a result of event: {}." , id , instanceState . getCurrentState ( ) , event . name ( ) ) ; } }