signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class UTEHelperImpl { /** * Internal method to do the creation given a name a ddf . */ private void createDestination ( String name , com . ibm . wsspi . sib . core . DestinationType destType , Reliability defaultReliability ) throws JMSException { } }
if ( tcInt . isEntryEnabled ( ) ) SibTr . entry ( tcInt , "createDestination(String, DestinationType)" ) ; if ( tcInt . isDebugEnabled ( ) ) { SibTr . debug ( tcInt , "name: " + name ) ; SibTr . debug ( tcInt , "type: " + destType ) ; } try { try { // Obtain information about the destination from the core connection . SIDestinationAddress sida = JmsServiceFacade . getSIDestinationAddressFactory ( ) . createSIDestinationAddress ( name , null ) ; DestinationConfiguration dc = coreConnection . getDestinationConfiguration ( sida ) ; // If the Destination exists , then recreate it . if ( dc != null ) { String uuid = dc . getUUID ( ) ; if ( tcInt . isDebugEnabled ( ) ) SibTr . debug ( tcInt , "delete UUID: " + uuid ) ; // The destination has been found , so delete it . admin . deleteDestinationLocalization ( uuid , null ) ; } else { if ( tcInt . isDebugEnabled ( ) ) SibTr . debug ( tcInt , "No object was returned from getDestinationConfiguration" ) ; } } catch ( SINotPossibleInCurrentConfigurationException f ) { // No FFDC code needed // This is OK . No action required . } // lohith liberty change com . ibm . ws . sib . admin . DestinationDefinition adminDDF = null ; // JsAdminFactory . getInstance ( . createDestinationDefinition ( destType , name ) ; adminDDF . setMaxReliability ( Reliability . ASSURED_PERSISTENT ) ; adminDDF . setDefaultReliability ( defaultReliability ) ; // Set the default exception destination String excDestinationName = SIMPConstants . SYSTEM_DEFAULT_EXCEPTION_DESTINATION + getMEName ( ) ; adminDDF . setExceptionDestination ( excDestinationName ) ; // lohith liberty change LocalizationDefinition dloc = null ; // JsAdminFactory . getInstance ( ) . createLocalizationDefinition ( name ) ; // Make sure the max message count is outside the range used by the tests . dloc . setDestinationHighMsgs ( 30000 ) ; // lohith liberty change admin . createDestinationLocalization ( adminDDF , dloc ) ; // , null , null , null , null ) ; } catch ( Exception se ) { // No FFDC code needed if ( tcInt . isDebugEnabled ( ) ) SibTr . debug ( tcInt , "Exception creating" , se ) ; // NB . No need to NLS this because it is only for unit test . JMSException jmse = new JMSException ( "Exception received creating destination" ) ; jmse . setLinkedException ( se ) ; jmse . initCause ( se ) ; if ( tcInt . isEntryEnabled ( ) ) SibTr . exit ( tcInt , "createDestination(String, DestinationDefinition)" ) ; throw jmse ; } if ( tcInt . isEntryEnabled ( ) ) SibTr . exit ( tcInt , "createDestination(String, DestinationDefinition)" ) ;
public class RegularPathGraph { /** * Add a path - edge to the path - graph . Edges are only added to the vertex of * lowest rank ( see . constructor ) . * @ param edge path edge */ private void add ( final PathEdge edge ) { } }
int u = edge . either ( ) ; int v = edge . other ( u ) ; if ( rank [ u ] < rank [ v ] ) graph [ u ] . add ( edge ) ; else graph [ v ] . add ( edge ) ;
public class LocalQueueBrowserEnumeration { /** * Fetch the next browsable message in the associated queue * @ return a message or null * @ throws JMSException on queue browsing error */ private AbstractMessage fetchNext ( ) throws JMSException { } }
if ( nextMessage != null ) return nextMessage ; // Already fetched nextMessage = localQueue . browse ( cursor , parsedSelector ) ; // Lookup next candidate if ( nextMessage == null ) close ( ) ; // Auto - close enumeration at end of queue return nextMessage ;
public class JaxbContexts { /** * Get the shared unmarshaller for this context . * NOTE : Since this is shared , always synchronize on this object . * @ return */ public Marshaller getMarshaller ( String soapPackage ) throws JAXBException { } }
JAXBContextHolder jAXBContextHolder = this . get ( soapPackage ) ; if ( jAXBContextHolder == null ) return null ; // Never return jAXBContextHolder . getMarshaller ( ) ;
public class CodedConstant { /** * get return required field check java expression . * @ param express java expression * @ param field java field * @ return full java expression */ public static String getRetRequiredCheck ( String express , Field field ) { } }
String code = "if (CodedConstant.isNull(" + express + ")) {\n" ; code += "throw new UninitializedMessageException(CodedConstant.asList(\"" + field . getName ( ) + "\"))" + ClassCode . JAVA_LINE_BREAK ; code += "}\n" ; return code ;
public class MessageValidatorRegistry { /** * Gets the default message header validator . * @ return */ public MessageValidator getDefaultMessageHeaderValidator ( ) { } }
return messageValidators . stream ( ) . filter ( validator -> DefaultMessageHeaderValidator . class . isAssignableFrom ( validator . getClass ( ) ) ) . findFirst ( ) . orElse ( null ) ;
public class SerDeState { /** * There for ByteArrayOutputStream cases this can be optimized */ public void writeOutputTo ( OutputStream os ) throws IOException { } }
os . write ( output . getBuffer ( ) , 0 , output . position ( ) ) ;
public class SqlMapper { /** * 查询返回指定的结果类型 * @ param sql 执行的sql * @ param resultType 返回的结果类型 * @ param < T > 泛型类型 * @ return result */ public < T > List < T > selectList ( String sql , Class < T > resultType ) { } }
String msId ; if ( resultType == null ) { msId = msUtils . select ( sql ) ; } else { msId = msUtils . select ( sql , resultType ) ; } return sqlSession . selectList ( msId ) ;
public class FxmlLoader { /** * 加密fxml文件 * @ param url { @ link URL } * @ return { @ link Scene } * @ throws IOException 异常 */ public static Scene loadFxml ( URL url ) throws IOException { } }
logger . info ( "load fxml from url: " + url ) ; BorderPane root = FXMLLoader . load ( url ) ; return new Scene ( root ) ;
public class RDBMEntityGroupStore { /** * Find and return an instance of the group . * @ param rs the SQL result set * @ return IEntityGroup */ private IEntityGroup instanceFromResultSet ( ResultSet rs ) throws SQLException , GroupsException { } }
IEntityGroup eg = null ; String key = rs . getString ( 1 ) ; String creatorID = rs . getString ( 2 ) ; Integer entityTypeID = rs . getInt ( 3 ) ; Class entityType = EntityTypesLocator . getEntityTypes ( ) . getEntityTypeFromID ( entityTypeID ) ; String groupName = rs . getString ( 4 ) ; String description = rs . getString ( 5 ) ; if ( key != null ) { eg = newInstance ( key , entityType , creatorID , groupName , description ) ; } return eg ;
public class PieChart { /** * Update a series by updating the pie slide value * @ param seriesName * @ param value * @ return */ public PieSeries updatePieSeries ( String seriesName , Number value ) { } }
Map < String , PieSeries > seriesMap = getSeriesMap ( ) ; PieSeries series = seriesMap . get ( seriesName ) ; if ( series == null ) { throw new IllegalArgumentException ( "Series name >" + seriesName + "< not found!!!" ) ; } series . replaceData ( value ) ; return series ;
public class VariableSafeAbsRef { /** * Dereference the variable , and return the reference value . Note that lazy * evaluation will occur . If a variable within scope is not found , a warning * will be sent to the error listener , and an empty nodeset will be returned . * @ param xctxt The runtime execution context . * @ return The evaluated variable , or an empty nodeset if not found . * @ throws javax . xml . transform . TransformerException */ public XObject execute ( XPathContext xctxt , boolean destructiveOK ) throws javax . xml . transform . TransformerException { } }
XNodeSet xns = ( XNodeSet ) super . execute ( xctxt , destructiveOK ) ; DTMManager dtmMgr = xctxt . getDTMManager ( ) ; int context = xctxt . getContextNode ( ) ; if ( dtmMgr . getDTM ( xns . getRoot ( ) ) . getDocument ( ) != dtmMgr . getDTM ( context ) . getDocument ( ) ) { Expression expr = ( Expression ) xns . getContainedIter ( ) ; xns = ( XNodeSet ) expr . asIterator ( xctxt , context ) ; } return xns ;
public class appflowpolicylabel_binding { /** * Use this API to fetch appflowpolicylabel _ binding resource of given name . */ public static appflowpolicylabel_binding get ( nitro_service service , String labelname ) throws Exception { } }
appflowpolicylabel_binding obj = new appflowpolicylabel_binding ( ) ; obj . set_labelname ( labelname ) ; appflowpolicylabel_binding response = ( appflowpolicylabel_binding ) obj . get_resource ( service ) ; return response ;
public class SemanticVersion { /** * Get the prev logical version in line . For example : * 1.2.3 becomes 1.2.2 * This gets ugly if we underflow , cause we don ' t know what the top patch version * for the lower minor version could be . */ public SemanticVersion getPrevVersion ( ) { } }
int major = head . getMajorVersion ( ) ; int minor = head . getMinorVersion ( ) ; int patch = head . getPatchVersion ( ) ; if ( patch > 0 ) { return new SemanticVersion ( major , minor , patch - 1 ) ; } if ( minor > 0 ) { return new SemanticVersion ( major , minor - 1 , 999 ) ; } if ( major > 0 ) { return new SemanticVersion ( major - 1 , 999 , 999 ) ; } return new SemanticVersion ( 0 , 0 , 0 ) ;
public class DefaultErrorHandler { /** * Handler methods for HTTP client errors * @ param requestUri - Request URI * @ param requestMethod - Request HTTP Method * @ param statusCode - HTTP status code * @ param statusMessage - HTTP status message * @ param errorBody - HTTP response body */ protected void handleError ( URI requestUri , HttpMethod requestMethod , int statusCode , String statusMessage , ByteSource errorBody ) throws RestEndpointIOException { } }
throw new RestEndpointException ( requestUri , requestMethod , statusCode , statusMessage , errorBody ) ;
public class ARCoreHelper { /** * Converts from AR world space to GVRf world space . */ private void ar2gvr ( float [ ] poseMatrix , float scale ) { } }
// Real world scale Matrix . scaleM ( poseMatrix , 0 , scale , scale , scale ) ; poseMatrix [ 12 ] = poseMatrix [ 12 ] * scale ; poseMatrix [ 13 ] = poseMatrix [ 13 ] * scale ; poseMatrix [ 14 ] = poseMatrix [ 14 ] * scale ;
public class CommerceAccountUserRelPersistenceImpl { /** * Returns the commerce account user rels before and after the current commerce account user rel in the ordered set where commerceAccountId = & # 63 ; . * @ param commerceAccountUserRelPK the primary key of the current commerce account user rel * @ param commerceAccountId the commerce account ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the previous , current , and next commerce account user rel * @ throws NoSuchAccountUserRelException if a commerce account user rel with the primary key could not be found */ @ Override public CommerceAccountUserRel [ ] findByCommerceAccountId_PrevAndNext ( CommerceAccountUserRelPK commerceAccountUserRelPK , long commerceAccountId , OrderByComparator < CommerceAccountUserRel > orderByComparator ) throws NoSuchAccountUserRelException { } }
CommerceAccountUserRel commerceAccountUserRel = findByPrimaryKey ( commerceAccountUserRelPK ) ; Session session = null ; try { session = openSession ( ) ; CommerceAccountUserRel [ ] array = new CommerceAccountUserRelImpl [ 3 ] ; array [ 0 ] = getByCommerceAccountId_PrevAndNext ( session , commerceAccountUserRel , commerceAccountId , orderByComparator , true ) ; array [ 1 ] = commerceAccountUserRel ; array [ 2 ] = getByCommerceAccountId_PrevAndNext ( session , commerceAccountUserRel , commerceAccountId , orderByComparator , false ) ; return array ; } catch ( Exception e ) { throw processException ( e ) ; } finally { closeSession ( session ) ; }
public class PieChart { /** * Add a series for a Pie type chart * @ param seriesName * @ param value * @ return */ public PieSeries addSeries ( String seriesName , Number value ) { } }
PieSeries series = new PieSeries ( seriesName , value ) ; if ( seriesMap . keySet ( ) . contains ( seriesName ) ) { throw new IllegalArgumentException ( "Series name >" + seriesName + "< has already been used. Use unique names for each series!!!" ) ; } seriesMap . put ( seriesName , series ) ; return series ;
public class FeatureWebSecurityConfigImpl { /** * { @ inheritDoc } */ @ Override public String getLoginErrorURL ( ) { } }
WebAppSecurityConfig globalConfig = WebAppSecurityCollaboratorImpl . getGlobalWebAppSecurityConfig ( ) ; if ( globalConfig != null ) return WebAppSecurityCollaboratorImpl . getGlobalWebAppSecurityConfig ( ) . getLoginErrorURL ( ) ; else return null ;
public class SerializationUtils { /** * Serialize an object into a String . The object is first serialized into a byte array , * which is converted into a String using { @ link BaseEncoding # base64 ( ) } . * @ param obj A { @ link Serializable } object * @ return A String representing the input object * @ throws IOException if it fails to serialize the object */ public static < T extends Serializable > String serialize ( T obj ) throws IOException { } }
return serialize ( obj , DEFAULT_ENCODING ) ;
public class WSJdbcResultSet { /** * Retrieves the number , types and properties of a ResultSet ' s columns . * @ return * the description of a ResultSet ' s columns * @ throws SQLException if a database access error occurs . */ public ResultSetMetaData getMetaData ( ) throws SQLException { } }
if ( tc . isEntryEnabled ( ) ) Tr . entry ( this , tc , "getMetaData" ) ; // First , check if a ResultSetMetaData wrapper for this ResultSet already exists . ResultSetMetaData rsetMData = null ; try // get a meta data { rsetMData = rsetImpl . getMetaData ( ) ; } catch ( SQLException ex ) { FFDCFilter . processException ( ex , "com.ibm.ws.rsadapter.jdbc.WSJdbcResultSet.getMetaData" , "1579" , this ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "getMetaData" , "Exception" ) ; throw WSJdbcUtil . mapException ( this , ex ) ; } catch ( NullPointerException nullX ) { // No FFDC code needed ; we might be closed . if ( tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "getMetaData" , "Exception" ) ; throw runtimeXIfNotClosed ( nullX ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "getMetaData" , rsetMData ) ; return rsetMData ;
public class InclusiveByteRange { public long getFirst ( long size ) { } }
if ( first < 0 ) { long tf = size - last ; if ( tf < 0 ) tf = 0 ; return tf ; } return first ;
public class MeasurementUnitsImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public Object eGet ( int featureID , boolean resolve , boolean coreType ) { } }
switch ( featureID ) { case AfplibPackage . MEASUREMENT_UNITS__XOA_BASE : return getXoaBase ( ) ; case AfplibPackage . MEASUREMENT_UNITS__YOA_BASE : return getYoaBase ( ) ; case AfplibPackage . MEASUREMENT_UNITS__XOA_UNITS : return getXoaUnits ( ) ; case AfplibPackage . MEASUREMENT_UNITS__YOA_UNITS : return getYoaUnits ( ) ; } return super . eGet ( featureID , resolve , coreType ) ;
public class ListHandler { /** * Prepare an objectname patttern from a path ( or " null " if no path is given ) * @ param pPathStack path * @ return created object name ( either plain or a pattern ) */ private ObjectName objectNameFromPath ( Stack < String > pPathStack ) throws MalformedObjectNameException { } }
if ( pPathStack . empty ( ) ) { return null ; } Stack < String > path = ( Stack < String > ) pPathStack . clone ( ) ; String domain = path . pop ( ) ; if ( path . empty ( ) ) { return new ObjectName ( domain + ":*" ) ; } String props = path . pop ( ) ; ObjectName mbean = new ObjectName ( domain + ":" + props ) ; if ( mbean . isPattern ( ) ) { throw new IllegalArgumentException ( "Cannot use an MBean pattern as path (given MBean: " + mbean + ")" ) ; } return mbean ;
public class PostgreSQLLiaison { /** * from DatabaseLiaison */ public void createGenerator ( Connection conn , String tableName , String columnName , int initValue ) throws SQLException { } }
if ( initValue == 1 ) { return ; // that ' s the default ! yay , do nothing } String seqname = "\"" + tableName + "_" + columnName + "_seq\"" ; Statement stmt = conn . createStatement ( ) ; try { stmt . executeQuery ( "select setval('" + seqname + "', " + initValue + ", false)" ) ; } finally { JDBCUtil . close ( stmt ) ; } log ( "Initial value of " + seqname + " set to " + initValue + "." ) ;
public class DemoFescarWebLauncher { /** * 订购信息 */ @ Ok ( "json:full" ) @ At ( "/api/purchase" ) public NutMap purchase ( String userId , String commodityCode , int orderCount , boolean dofail ) { } }
try { businessService . purchase ( userId , commodityCode , orderCount , dofail ) ; return new NutMap ( "ok" , true ) ; } catch ( Throwable e ) { log . debug ( "purchase fail" , e ) ; return new NutMap ( "ok" , false ) ; }
public class CoffeeScriptGenerator { /** * Returns the resource input stream * @ param path * the resource path * @ return the resource input stream */ private InputStream getResourceInputStream ( String path ) { } }
InputStream is = config . getContext ( ) . getResourceAsStream ( path ) ; if ( is == null ) { try { is = ClassLoaderResourceUtils . getResourceAsStream ( path , this ) ; } catch ( FileNotFoundException e ) { throw new BundlingProcessException ( e ) ; } } return is ;
public class AWSOrganizationsClient { /** * Creates an organizational unit ( OU ) within a root or parent OU . An OU is a container for accounts that enables * you to organize your accounts to apply policies according to your business requirements . The number of levels * deep that you can nest OUs is dependent upon the policy types enabled for that root . For service control * policies , the limit is five . * For more information about OUs , see < a * href = " https : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ ous . html " > Managing Organizational * Units < / a > in the < i > AWS Organizations User Guide < / i > . * This operation can be called only from the organization ' s master account . * @ param createOrganizationalUnitRequest * @ return Result of the CreateOrganizationalUnit operation returned by the service . * @ throws AccessDeniedException * You don ' t have permissions to perform the requested operation . The user or role that is making the * request must have at least one IAM permissions policy attached that grants the required permissions . For * more information , see < a href = " https : / / docs . aws . amazon . com / IAM / latest / UserGuide / access . html " > Access * Management < / a > in the < i > IAM User Guide < / i > . * @ throws AWSOrganizationsNotInUseException * Your account isn ' t a member of an organization . To make this request , you must use the credentials of an * account that belongs to an organization . * @ throws ConcurrentModificationException * The target of the operation is currently being modified by a different request . Try again later . * @ throws ConstraintViolationException * Performing this operation violates a minimum or maximum value limit . For example , attempting to remove * the last service control policy ( SCP ) from an OU or root , inviting or creating too many accounts to the * organization , or attaching too many policies to an account , OU , or root . This exception includes a reason * that contains additional information about the violated limit . < / p > * Some of the reasons in the following list might not be applicable to this specific API or operation : * < ul > * < li > * ACCOUNT _ NUMBER _ LIMIT _ EXCEEDED : You attempted to exceed the limit on the number of accounts in an * organization . If you need more accounts , contact < a * href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > to request an increase in your * limit . * Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in * your organization . Send fewer invitations or contact AWS Support to request an increase in the number of * accounts . * < note > * Deleted and closed accounts still count toward your limit . * < / note > < important > * If you get receive this exception when running a command immediately after creating the organization , * wait one hour and try again . If after an hour it continues to fail with this error , contact < a * href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > . * < / important > < / li > * < li > * HANDSHAKE _ RATE _ LIMIT _ EXCEEDED : You attempted to exceed the number of handshakes that you can send in one * day . * < / li > * < li > * OU _ NUMBER _ LIMIT _ EXCEEDED : You attempted to exceed the number of OUs that you can have in an organization . * < / li > * < li > * OU _ DEPTH _ LIMIT _ EXCEEDED : You attempted to create an OU tree that is too many levels deep . * < / li > * < li > * ORGANIZATION _ NOT _ IN _ ALL _ FEATURES _ MODE : You attempted to perform an operation that requires the * organization to be configured to support all features . An organization that supports only consolidated * billing features can ' t perform this operation . * < / li > * < li > * POLICY _ NUMBER _ LIMIT _ EXCEEDED . You attempted to exceed the number of policies that you can have in an * organization . * < / li > * < li > * MAX _ POLICY _ TYPE _ ATTACHMENT _ LIMIT _ EXCEEDED : You attempted to exceed the number of policies of a certain * type that can be attached to an entity at one time . * < / li > * < li > * MIN _ POLICY _ TYPE _ ATTACHMENT _ LIMIT _ EXCEEDED : You attempted to detach a policy from an entity that would * cause the entity to have fewer than the minimum number of policies of a certain type required . * < / li > * < li > * ACCOUNT _ CANNOT _ LEAVE _ WITHOUT _ EULA : You attempted to remove an account from the organization that doesn ' t * yet have enough information to exist as a standalone account . This account requires you to first agree to * the AWS Customer Agreement . Follow the steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * ACCOUNT _ CANNOT _ LEAVE _ WITHOUT _ PHONE _ VERIFICATION : You attempted to remove an account from the organization * that doesn ' t yet have enough information to exist as a standalone account . This account requires you to * first complete phone verification . Follow the steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * MASTER _ ACCOUNT _ PAYMENT _ INSTRUMENT _ REQUIRED : To create an organization with this master account , you first * must associate a valid payment instrument , such as a credit card , with the account . Follow the steps at * < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * MEMBER _ ACCOUNT _ PAYMENT _ INSTRUMENT _ REQUIRED : To complete this operation with this member account , you * first must associate a valid payment instrument , such as a credit card , with the account . Follow the * steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * ACCOUNT _ CREATION _ RATE _ LIMIT _ EXCEEDED : You attempted to exceed the number of accounts that you can create * in one day . * < / li > * < li > * MASTER _ ACCOUNT _ ADDRESS _ DOES _ NOT _ MATCH _ MARKETPLACE : To create an account in this organization , you first * must migrate the organization ' s master account to the marketplace that corresponds to the master * account ' s address . For example , accounts with India addresses must be associated with the AISPL * marketplace . All accounts in an organization must be associated with the same marketplace . * < / li > * < li > * MASTER _ ACCOUNT _ MISSING _ CONTACT _ INFO : To complete this operation , you must first provide contact a valid * address and phone number for the master account . Then try the operation again . * < / li > * < li > * MASTER _ ACCOUNT _ NOT _ GOVCLOUD _ ENABLED : To complete this operation , the master account must have an * associated account in the AWS GovCloud ( US - West ) Region . For more information , see < a * href = " http : / / docs . aws . amazon . com / govcloud - us / latest / UserGuide / govcloud - organizations . html " > AWS * Organizations < / a > in the < i > AWS GovCloud User Guide . < / i > * < / li > * @ throws DuplicateOrganizationalUnitException * An OU with the same name already exists . * @ throws InvalidInputException * The requested operation failed because you provided invalid values for one or more of the request * parameters . This exception includes a reason that contains additional information about the violated * limit : < / p > < note > * Some of the reasons in the following list might not be applicable to this specific API or operation : * < / note > * < ul > * < li > * IMMUTABLE _ POLICY : You specified a policy that is managed by AWS and can ' t be modified . * < / li > * < li > * INPUT _ REQUIRED : You must include a value for all required parameters . * < / li > * < li > * INVALID _ ENUM : You specified a value that isn ' t valid for that parameter . * < / li > * < li > * INVALID _ FULL _ NAME _ TARGET : You specified a full name that contains invalid characters . * < / li > * < li > * INVALID _ LIST _ MEMBER : You provided a list to a parameter that contains at least one invalid value . * < / li > * < li > * INVALID _ PARTY _ TYPE _ TARGET : You specified the wrong type of entity ( account , organization , or email ) as a * party . * < / li > * < li > * INVALID _ PAGINATION _ TOKEN : Get the value for the < code > NextToken < / code > parameter from the response to a * previous call of the operation . * < / li > * < li > * INVALID _ PATTERN : You provided a value that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ PATTERN _ TARGET _ ID : You specified a policy target ID that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ ROLE _ NAME : You provided a role name that isn ' t valid . A role name can ' t begin with the reserved * prefix < code > AWSServiceRoleFor < / code > . * < / li > * < li > * INVALID _ SYNTAX _ ORGANIZATION _ ARN : You specified an invalid Amazon Resource Name ( ARN ) for the * organization . * < / li > * < li > * INVALID _ SYNTAX _ POLICY _ ID : You specified an invalid policy ID . * < / li > * < li > * MAX _ FILTER _ LIMIT _ EXCEEDED : You can specify only one filter parameter for the operation . * < / li > * < li > * MAX _ LENGTH _ EXCEEDED : You provided a string parameter that is longer than allowed . * < / li > * < li > * MAX _ VALUE _ EXCEEDED : You provided a numeric parameter that has a larger value than allowed . * < / li > * < li > * MIN _ LENGTH _ EXCEEDED : You provided a string parameter that is shorter than allowed . * < / li > * < li > * MIN _ VALUE _ EXCEEDED : You provided a numeric parameter that has a smaller value than allowed . * < / li > * < li > * MOVING _ ACCOUNT _ BETWEEN _ DIFFERENT _ ROOTS : You can move an account only between entities in the same root . * < / li > * @ throws ParentNotFoundException * We can ' t find a root or OU with the < code > ParentId < / code > that you specified . * @ throws ServiceException * AWS Organizations can ' t complete your request because of an internal service error . Try again later . * @ throws TooManyRequestsException * You ' ve sent too many requests in too short a period of time . The limit helps protect against * denial - of - service attacks . Try again later . < / p > * For information on limits that affect Organizations , see < a * href = " https : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ reference _ limits . html " > Limits of * AWS Organizations < / a > in the < i > AWS Organizations User Guide < / i > . * @ sample AWSOrganizations . CreateOrganizationalUnit * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / organizations - 2016-11-28 / CreateOrganizationalUnit " * target = " _ top " > AWS API Documentation < / a > */ @ Override public CreateOrganizationalUnitResult createOrganizationalUnit ( CreateOrganizationalUnitRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateOrganizationalUnit ( request ) ;
public class Jaxrs20GlobalHandlerServiceImpl { /** * DS - driven component activation */ @ Activate protected void activate ( ComponentContext cContext , Map < String , Object > properties ) throws Exception { } }
this . cContext = cContext ; globalHandlerServiceSR . activate ( cContext ) ; listener = new JaxRSGlobalHandlerBusListener ( ) ; LibertyApplicationBusFactory . getInstance ( ) . registerApplicationBusListener ( listener ) ;
public class AbstractManagedType { /** * ( non - Javadoc ) * @ see * javax . persistence . metamodel . ManagedType # getSingularAttribute ( java . lang * . String , java . lang . Class ) */ @ Override public < Y > SingularAttribute < ? super X , Y > getSingularAttribute ( String paramString , Class < Y > paramClass ) { } }
SingularAttribute < ? super X , Y > attribute = getDeclaredSingularAttribute ( paramString , paramClass , false ) ; if ( superClazzType != null && attribute == null ) { return superClazzType . getSingularAttribute ( paramString , paramClass ) ; } checkForValid ( paramString , attribute ) ; return attribute ;
public class CitrusArchiveProcessor { /** * Adds Citrus archive dependencies and all transitive dependencies to archive . * @ param archive */ protected void addDependencies ( Archive < ? > archive ) { } }
String version = getConfiguration ( ) . getCitrusVersion ( ) ; CitrusArchiveBuilder archiveBuilder ; if ( version != null ) { archiveBuilder = CitrusArchiveBuilder . version ( version ) ; } else { archiveBuilder = CitrusArchiveBuilder . latestVersion ( ) ; } if ( archive instanceof EnterpriseArchive ) { EnterpriseArchive ear = ( EnterpriseArchive ) archive ; ear . addAsModules ( archiveBuilder . all ( ) . build ( ) ) ; } else if ( archive instanceof WebArchive ) { WebArchive war = ( WebArchive ) archive ; war . addAsLibraries ( archiveBuilder . all ( ) . build ( ) ) ; }
public class Value { /** * Returns an { @ code ARRAY < BYTES > } value . * @ param v the source of element values . This may be { @ code null } to produce a value for which * { @ code isNull ( ) } is { @ code true } . Individual elements may also be { @ code null } . */ public static Value bytesArray ( @ Nullable Iterable < ByteArray > v ) { } }
return new BytesArrayImpl ( v == null , v == null ? null : immutableCopyOf ( v ) ) ;
public class SomeOfChainMatcher { /** * { @ inheritDoc } */ @ Override public boolean matches ( Object item ) { } }
int count = 0 ; for ( Matcher < ? > m : matchers ) { if ( m . matches ( item ) ) { count ++ ; } } return countMatcher . matches ( count ) ;
public class IntStreamEx { /** * Folds the elements of this stream using the provided seed object and * accumulation function , going left to right . This is equivalent to : * < pre > * { @ code * int result = seed ; * for ( int element : this stream ) * result = accumulator . apply ( result , element ) * return result ; * < / pre > * This is a terminal operation . * This method cannot take all the advantages of parallel streams as it must * process elements strictly left to right . If your accumulator function is * associative , consider using { @ link # reduce ( int , IntBinaryOperator ) } * method . * For parallel stream it ' s not guaranteed that accumulator will always be * executed in the same thread . * @ param seed the starting value * @ param accumulator a * < a href = " package - summary . html # NonInterference " > non - interfering * < / a > , < a href = " package - summary . html # Statelessness " > stateless < / a > * function for incorporating an additional element into a result * @ return the result of the folding * @ see # reduce ( int , IntBinaryOperator ) * @ see # foldLeft ( IntBinaryOperator ) * @ since 0.4.0 */ public int foldLeft ( int seed , IntBinaryOperator accumulator ) { } }
int [ ] box = new int [ ] { seed } ; forEachOrdered ( t -> box [ 0 ] = accumulator . applyAsInt ( box [ 0 ] , t ) ) ; return box [ 0 ] ;
public class ConfigReader { /** * Configures component by passing configuration parameters . * @ param config configuration parameters to be set . */ public void configure ( ConfigParams config ) { } }
ConfigParams parameters = config . getSection ( "parameters" ) ; if ( parameters . size ( ) > 0 ) { _parameters = parameters ; }
public class Cache { /** * Implementation of the eviction strategy . */ @ Trivial protected synchronized void evictStaleEntries ( ) { } }
/* * final String METHODNAME = " evictStaleEntries " ; * if ( TraceComponent . isAnyTracingEnabled ( ) & & tc . isDebugEnabled ( ) ) { * int size = primaryTable . size ( ) + secondaryTable . size ( ) + tertiaryTable . size ( ) ; * Tr . debug ( tc , METHODNAME + " The current cache size is " + size + " ( " + primaryTable . size ( ) + " , " + secondaryTable . size ( ) + " , " + tertiaryTable . size ( ) + " ) " ) ; */ // log only when we evict the last table if ( ! tertiaryTable . isEmpty ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "evictStaleEntries Evicting tertiaryTable cache " + cacheName + ", size is " + tertiaryTable . size ( ) ) ; } } tertiaryTable = secondaryTable ; secondaryTable = primaryTable ; primaryTable = new ConcurrentHashMap < String , Object > ( ( minSize > secondaryTable . size ( ) ) ? minSize : secondaryTable . size ( ) ) ;
public class ApiOvhSaascsp2 { /** * Changes the tenant administrator ' s password * REST : POST / saas / csp2 / { serviceName } / changeAdministratorPassword * @ param newPassword [ required ] New password for the tenant administrator * @ param serviceName [ required ] The unique identifier of your Office service * API beta */ public OvhOfficeTask serviceName_changeAdministratorPassword_POST ( String serviceName , String newPassword ) throws IOException { } }
String qPath = "/saas/csp2/{serviceName}/changeAdministratorPassword" ; StringBuilder sb = path ( qPath , serviceName ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "newPassword" , newPassword ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , OvhOfficeTask . class ) ;
public class BytesWritable { /** * copy the byte array to the dest array , and return the number of bytes * copied . * @ param dest * @ param maxLen * @ param start * @ return */ public int copyTo ( byte [ ] dest , int start ) throws BufferTooSmallException { } }
if ( size > ( dest . length - start ) ) { throw new BufferTooSmallException ( "size is " + size + ", buffer availabe size is " + ( dest . length - start ) ) ; } if ( size > 0 ) { System . arraycopy ( bytes , 0 , dest , start , size ) ; } return size ;
public class ColumnSpec { /** * Parses the encoded column specifications and returns a ColumnSpec object that represents the * string . Variables are expanded using the given LayoutMap . * @ param encodedColumnSpec the encoded column specification * @ param layoutMap expands layout column variables * @ return a ColumnSpec instance for the given specification * @ throws NullPointerException if { @ code encodedColumnSpec } or { @ code layoutMap } is * { @ code null } * @ throws IllegalArgumentException if { @ code encodedColumnSpec } is empty or whitespace * @ see # decodeSpecs ( String , LayoutMap ) * @ since 1.2 */ public static ColumnSpec decode ( String encodedColumnSpec , LayoutMap layoutMap ) { } }
checkNotBlank ( encodedColumnSpec , "The encoded column specification must not be null, empty or whitespace." ) ; checkNotNull ( layoutMap , "The LayoutMap must not be null." ) ; String trimmed = encodedColumnSpec . trim ( ) ; String lower = trimmed . toLowerCase ( Locale . ENGLISH ) ; return decodeExpanded ( layoutMap . expand ( lower , true ) ) ;
public class TaskGroup { /** * Mark root of this task task group depends on the given task group ' s root . * This ensure this task group ' s root get picked for execution only after the completion * of all tasks in the given group . * @ param dependencyTaskGroup the task group that this task group depends on */ public void addDependencyTaskGroup ( TaskGroup dependencyTaskGroup ) { } }
if ( dependencyTaskGroup . proxyTaskGroupWrapper . isActive ( ) ) { dependencyTaskGroup . proxyTaskGroupWrapper . addDependentTaskGroup ( this ) ; } else { DAGraph < TaskItem , TaskGroupEntry < TaskItem > > dependencyGraph = dependencyTaskGroup ; super . addDependencyGraph ( dependencyGraph ) ; }
public class AnyUtils { /** * Finds a matching attribute for a given value . * @ param meta the metadataobject * @ param value the value * @ return the attribute which will accept the given value */ public static MetaAttribute findAttribute ( MetaDataObject meta , Object value ) { } }
if ( value == null ) { throw new IllegalArgumentException ( "null as value not supported" ) ; } for ( MetaAttribute attr : meta . getAttributes ( ) ) { if ( attr . getName ( ) . equals ( TYPE_ATTRIBUTE ) ) { continue ; } if ( attr . isDerived ( ) ) { // we only consider persisted classes , not derived ones like // " value " itself continue ; } if ( attr . getType ( ) . getImplementationClass ( ) . isAssignableFrom ( value . getClass ( ) ) ) { return attr ; } } throw new IllegalArgumentException ( "cannot find anyType attribute for value '" + value + '\'' ) ;
public class SpinScriptEnv { /** * Get the spin scripting environment * @ param language the language name * @ return the environment script as string or null if the language is * not in the set of languages supported by spin . */ public static String get ( String language ) { } }
language = language . toLowerCase ( ) ; if ( "ecmascript" . equals ( language ) ) { language = "javascript" ; } String extension = extensions . get ( language ) ; if ( extension == null ) { return null ; } else { return loadScriptEnv ( language , extension ) ; }
public class CmsEncoder { /** * Escapes the wildcard characters in a string which will be used as the pattern for a SQL LIKE clause . < p > * @ param pattern the pattern * @ param escapeChar the character which should be used as the escape character * @ return the escaped pattern */ public static String escapeSqlLikePattern ( String pattern , char escapeChar ) { } }
char [ ] special = new char [ ] { escapeChar , '%' , '_' } ; String result = pattern ; for ( char charToEscape : special ) { result = result . replaceAll ( "" + charToEscape , "" + escapeChar + charToEscape ) ; } return result ;
public class Tuples { /** * Create a { @ link Serializer } for tuples of size 2. * @ param first the serializer for the first slot * @ param second the serializer for the second slot * @ return the serializer ; never null */ public static < T1 , T2 > Serializer < Tuple2 < T1 , T2 > > serializer ( Serializer < T1 > first , Serializer < T2 > second ) { } }
return new Tuple2Serializer < > ( first , second ) ;
public class DssatSoilFileHelper { /** * Generate the soil file name for auto - generating ( extend name not * included ) * @ param soilData soil data holder * @ return the soil id ( 10 - bit ) */ public String getSoilID ( Map soilData ) { } }
String hash = getObjectOr ( soilData , "soil_id" , "" ) ; if ( hashToName . containsKey ( hash ) ) { return hashToName . get ( hash ) ; } else { String soil_id ; if ( hash . length ( ) > 10 ) { soil_id = hash . substring ( 0 , 6 ) + "0001" ; } else if ( hash . equals ( "" ) ) { soil_id = "AGMIP_0001" ; } else { soil_id = hash ; while ( soil_id . length ( ) < 10 ) { soil_id += "_" ; } } int count ; while ( names . contains ( soil_id ) ) { try { count = Integer . parseInt ( soil_id . substring ( 6 , soil_id . length ( ) ) ) ; count ++ ; } catch ( Exception e ) { count = 1 ; } soil_id = soil_id . substring ( 0 , 6 ) + String . format ( "%04d" , count ) ; } names . add ( soil_id ) ; if ( hash . equals ( "" ) ) { hash = soil_id ; } hashToName . put ( hash , soil_id ) ; return soil_id ; }
public class Dictionary { /** * Gets a property ' s value as a String . Returns null if the value doesn ' t exist , or its value is not a String . * @ param key the key * @ return the String or null . */ @ Override public String getString ( @ NonNull String key ) { } }
if ( key == null ) { throw new IllegalArgumentException ( "key cannot be null." ) ; } synchronized ( lock ) { final Object obj = getMValue ( internalDict , key ) . asNative ( internalDict ) ; return obj instanceof String ? ( String ) obj : null ; }
public class TypeConversion { /** * A utility method to convert the long into bytes in an array . * @ param value * The long . * @ param bytes * The byte array to which the long should be copied . * @ param offset * The index where the long should start . */ public static void longToBytes ( long value , byte [ ] bytes , int offset ) { } }
for ( int i = offset + 7 ; i >= offset ; -- i ) { bytes [ i ] = ( byte ) value ; value = value >> 8 ; }
public class HppResponse { /** * Creates the security hash from a number of fields and the shared secret . * @ param secret * @ return String */ private String generateHash ( String secret ) { } }
// check for any null values and set them to empty string for hashing String timeStamp = null == this . timeStamp ? "" : this . timeStamp ; String merchantId = null == this . merchantId ? "" : this . merchantId ; String orderId = null == this . orderId ? "" : this . orderId ; String result = null == this . result ? "" : this . result ; String message = null == this . message ? "" : this . message ; String pasRef = null == this . pasRef ? "" : this . pasRef ; String authCode = null == this . authCode ? "" : this . authCode ; // create String to hash String toHash = new StringBuilder ( ) . append ( timeStamp ) . append ( "." ) . append ( merchantId ) . append ( "." ) . append ( orderId ) . append ( "." ) . append ( result ) . append ( "." ) . append ( message ) . append ( "." ) . append ( pasRef ) . append ( "." ) . append ( authCode ) . toString ( ) ; return GenerationUtils . generateHash ( toHash , secret ) ;
public class RequestHelper { /** * Get the HTTP version associated with the given HTTP request * @ param aHttpRequest * The http request to query . May not be < code > null < / code > . * @ return < code > null < / code > if no supported HTTP version is contained */ @ Nullable public static EHttpVersion getHttpVersion ( @ Nonnull final HttpServletRequest aHttpRequest ) { } }
ValueEnforcer . notNull ( aHttpRequest , "HttpRequest" ) ; final String sProtocol = aHttpRequest . getProtocol ( ) ; return EHttpVersion . getFromNameOrNull ( sProtocol ) ;
public class EndpointExpander { /** * Given a format string that may contain any of the following conversions * < ul > * < li > % t which is replaced with the provided table name < / li > * < li > % p which is replaced with the provided partition id < / li > * < li > % g which is replaced with the provided generation < / li > * < li > % d which is replaced with the provided date < / li > * < / ul > * A percent sign may be used to escape another * Requires : * - template * - date ( if % d is in the template ) * - table name * - template contains % t and % p * Use the given parameters and place them where their respective * conversions are . * @ param tmpl format string * @ param tn table name * @ param p partition id * @ param gn generation * @ param dt date * @ param tz timezone into which dates are formatted * @ return expanded string with the applied parameter substitution conversions */ public static String expand ( String tmpl , String tn , int p , long gn , Date dt , TimeZone tz ) { } }
Preconditions . checkArgument ( tmpl != null && ! tmpl . trim ( ) . isEmpty ( ) , "null or empty format string" ) ; int conversionMask = conversionMaskFor ( tmpl ) ; boolean hasDateConversion = ( conversionMask & DATE ) == DATE ; boolean hasTableConversion = ( conversionMask & TABLE ) == TABLE ; Preconditions . checkArgument ( ( dt != null && hasDateConversion ) || ! hasDateConversion , "null date" ) ; Preconditions . checkArgument ( ( tn != null && ! tn . trim ( ) . isEmpty ( ) && hasTableConversion ) || ! hasTableConversion , "null or empty table name" ) ; SimpleDateFormat dateFormatter = null ; if ( hasDateConversion ) { dateFormatter = new SimpleDateFormat ( DATE_FORMAT ) ; if ( tz != null ) { dateFormatter . setTimeZone ( tz ) ; } } StringBuilder sb = new StringBuilder ( tmpl . length ( ) + 256 ) . append ( tmpl ) ; for ( int i = 0 ; i < sb . length ( ) ; ) { if ( isEscaped ( sb , i ) ) { sb . deleteCharAt ( i ) ; i += 1 ; } else if ( isExpandable ( sb , i ) ) { String r = "" ; switch ( sb . charAt ( i + 1 ) ) { case 't' : r = tn ; break ; case 'g' : r = Long . toString ( gn , Character . MAX_RADIX ) ; break ; case 'd' : r = dateFormatter . format ( dt ) ; break ; case 'p' : r = Integer . toString ( p ) ; break ; } sb . replace ( i , i + 2 , r ) ; i += r . length ( ) ; } else { i += 1 ; } } return sb . toString ( ) ;
public class JNPStrategy { /** * { @ inheritDoc } */ public void bind ( String jndiName , Object o ) throws NamingException { } }
if ( jndiName == null ) throw new NamingException ( ) ; if ( o == null ) throw new NamingException ( ) ; Context context = createContext ( ) ; try { String className = o . getClass ( ) . getName ( ) ; if ( trace ) log . trace ( "Binding " + className + " under " + jndiName ) ; Reference ref = new Reference ( className , new StringRefAddr ( "class" , className ) , JNPStrategy . class . getName ( ) , null ) ; ref . add ( new StringRefAddr ( "name" , jndiName ) ) ; if ( objs . putIfAbsent ( qualifiedName ( jndiName , className ) , o ) != null ) { throw new NamingException ( bundle . deploymentFailedSinceJndiNameHasDeployed ( className , jndiName ) ) ; } if ( o instanceof Referenceable ) { Referenceable referenceable = ( Referenceable ) o ; referenceable . setReference ( ref ) ; } Util . bind ( context , jndiName , o ) ; if ( log . isDebugEnabled ( ) ) log . debug ( "Bound " + className + " under " + jndiName ) ; } finally { if ( context != null ) { try { context . close ( ) ; } catch ( NamingException ne ) { // Ignore } } }
public class UsersInner { /** * Modify properties of users . * @ param resourceGroupName The name of the resource group . * @ param labAccountName The name of the lab Account . * @ param labName The name of the lab . * @ param userName The name of the user . * @ param user The User registered to a lab * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < UserInner > updateAsync ( String resourceGroupName , String labAccountName , String labName , String userName , UserFragment user , final ServiceCallback < UserInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( updateWithServiceResponseAsync ( resourceGroupName , labAccountName , labName , userName , user ) , serviceCallback ) ;
public class SimpleArangoRepository { /** * Saves the given iterable of entities to the database * @ param entities * the iterable of entities to be saved to the database * @ return the iterable of updated entities with any id / key / rev saved in each entity */ @ SuppressWarnings ( "deprecation" ) @ Override public < S extends T > Iterable < S > saveAll ( final Iterable < S > entities ) { } }
if ( arangoOperations . getVersion ( ) . getVersion ( ) . compareTo ( "3.4.0" ) < 0 ) { arangoOperations . upsert ( entities , UpsertStrategy . UPDATE ) ; } else { final S first = StreamSupport . stream ( entities . spliterator ( ) , false ) . findFirst ( ) . get ( ) ; arangoOperations . repsert ( entities , ( Class < S > ) first . getClass ( ) ) ; } return entities ;
public class SAXSerializer { /** * / * Implements XMLReader method . */ @ Override public void parse ( final String mSystemID ) throws IOException , SAXException { } }
emitStartDocument ( ) ; try { super . call ( ) ; } catch ( final Exception exc ) { exc . printStackTrace ( ) ; } emitEndDocument ( ) ;
public class Singles { /** * Combine the provided Single with the first element ( if present ) in the provided Publisher using the provided BiFunction * @ param single Single to combine with a Publisher * @ param fn Publisher to combine with a Single * @ param app Combining function * @ return Combined Single */ public static < T1 , T2 , R > Single < R > zip ( Single < ? extends T1 > single , BiFunction < ? super T1 , ? super T2 , ? extends R > fn , Publisher < ? extends T2 > app ) { } }
Single < R > res = Single . fromPublisher ( Future . fromPublisher ( single . toFlowable ( ) ) . zip ( fn , app ) ) ; return res ;
public class SyntaxReader { /** * Return the last token read , and also advance by one token . This is equivalent to calling { @ link # lastToken } * followed by { @ link # nextToken } , but returning the result of lastToken . * @ return The last token . * @ throws LexException */ protected LexToken readToken ( ) throws LexException { } }
LexToken tok = reader . getLast ( ) ; reader . nextToken ( ) ; return tok ;
public class DefaultPlexusCipher { public String decrypt ( final String str , final String passPhrase ) throws PlexusCipherException { } }
if ( str == null || str . length ( ) < 1 ) { return str ; } return _cipher . decrypt64 ( str , passPhrase ) ;
public class DagBuilder { /** * Creates a new node and adds it to the DagBuilder . * @ param name name of the node * @ param nodeProcessor node processor associated with this node * @ return a new node * @ throws DagException if the name is not unique in the DAG . */ public Node createNode ( final String name , final NodeProcessor nodeProcessor ) { } }
checkIsBuilt ( ) ; if ( this . nameToNodeMap . get ( name ) != null ) { throw new DagException ( String . format ( "Node names in %s need to be unique. The name " + "(%s) already exists." , this , name ) ) ; } final Node node = new Node ( name , nodeProcessor , this . dag ) ; this . nameToNodeMap . put ( name , node ) ; return node ;
public class TypeSimplifier { /** * Given a set of referenced types , works out which of them should be imported and what the * resulting spelling of each one is . * < p > This method operates on a { @ code Set < TypeMirror > } rather than just a { @ code Set < String > } * because it is not strictly possible to determine what part of a fully - qualified type name is * the package and what part is the top - level class . For example , { @ code java . util . Map . Entry } is a * class called { @ code Map . Entry } in a package called { @ code java . util } assuming Java conventions * are being followed , but it could theoretically also be a class called { @ code Entry } in a * package called { @ code java . util . Map } . Since we are operating as part of the compiler , our goal * should be complete correctness , and the only way to achieve that is to operate on the real * representations of types . * @ param codePackageName The name of the package where the class containing these references is * defined . Other classes within the same package do not need to be imported . * @ param referenced The complete set of declared types ( classes and interfaces ) that will be * referenced in the generated code . * @ param defined The complete set of declared types ( classes and interfaces ) that are defined * within the scope of the generated class ( i . e . nested somewhere in its superclass chain , or * in its interface set ) * @ return a map where the keys are fully - qualified types and the corresponding values indicate * whether the type should be imported , and how the type should be spelled in the source code . */ private static Map < String , Spelling > findImports ( Elements elementUtils , Types typeUtils , String codePackageName , Set < TypeMirror > referenced , Set < TypeMirror > defined ) { } }
Map < String , Spelling > imports = new HashMap < > ( ) ; Set < TypeMirror > typesInScope = new TypeMirrorSet ( ) ; typesInScope . addAll ( referenced ) ; typesInScope . addAll ( defined ) ; Set < String > ambiguous = ambiguousNames ( typeUtils , typesInScope ) ; for ( TypeMirror type : referenced ) { TypeElement typeElement = ( TypeElement ) typeUtils . asElement ( type ) ; String fullName = typeElement . getQualifiedName ( ) . toString ( ) ; String simpleName = typeElement . getSimpleName ( ) . toString ( ) ; String pkg = packageNameOf ( typeElement ) ; boolean importIt ; String spelling ; if ( ambiguous . contains ( simpleName ) ) { importIt = false ; spelling = fullName ; } else if ( pkg . equals ( "java.lang" ) ) { importIt = false ; spelling = javaLangSpelling ( elementUtils , codePackageName , typeElement ) ; } else if ( pkg . equals ( codePackageName ) ) { importIt = false ; spelling = fullName . substring ( pkg . isEmpty ( ) ? 0 : pkg . length ( ) + 1 ) ; } else { importIt = true ; spelling = simpleName ; } imports . put ( fullName , new Spelling ( spelling , importIt ) ) ; } return imports ;
public class FacesBackingBean { /** * Ensures that any changes to this object will be replicated in a cluster ( for failover ) , * even if the replication scheme uses a change - detection algorithm that relies on * HttpSession . setAttribute to be aware of changes . Note that this method is used by the framework * and does not need to be called explicitly in most cases . * @ param request the current HttpServletRequest */ public void ensureFailover ( HttpServletRequest request ) { } }
StorageHandler sh = Handlers . get ( getServletContext ( ) ) . getStorageHandler ( ) ; HttpServletRequest unwrappedRequest = PageFlowUtils . unwrapMultipart ( request ) ; RequestContext rc = new RequestContext ( unwrappedRequest , null ) ; String attr = ScopedServletUtils . getScopedSessionAttrName ( InternalConstants . FACES_BACKING_ATTR , unwrappedRequest ) ; sh . ensureFailover ( rc , attr , this ) ;
public class Bits { /** * Writes an int to a ByteBuffer * @ param num the int to be written * @ param buf the buffer */ public static void writeInt ( int num , ByteBuffer buf ) { } }
if ( num == 0 ) { buf . put ( ( byte ) 0 ) ; return ; } final byte bytes_needed = bytesRequiredFor ( num ) ; buf . put ( bytes_needed ) ; for ( int i = 0 ; i < bytes_needed ; i ++ ) buf . put ( getByteAt ( num , i ) ) ;
public class Graphics { /** * Sets the RGB value of the ambientLight */ public void setAmbientLight ( float r , float g , float b ) { } }
float ambient [ ] = { r , g , b , 255 } ; normalize ( ambient ) ; gl . glEnable ( GL2 . GL_LIGHTING ) ; gl . glEnable ( GL2 . GL_LIGHT0 ) ; gl . glLightfv ( GL2 . GL_LIGHT0 , GL2 . GL_AMBIENT , ambient , 0 ) ;
public class FilelistenerWriteTrx { /** * { @ inheritDoc } */ @ Override public synchronized void removeFile ( String pRelativePath ) throws TTException { } }
// If the file already exists we just override it // and remove the last meta entry since the key won ' t be correct anymore . getBucketTransaction ( ) . getMetaBucket ( ) . remove ( new MetaKey ( pRelativePath ) ) ;
public class UIUtils { /** * Format the duration in milliseconds to a human readable String , with " yr " , " days " , " hr " etc prefixes * @ param durationMs Duration in milliseconds * @ return Human readable string */ public static String formatDuration ( long durationMs ) { } }
Period period = Period . seconds ( ( int ) ( durationMs / 1000L ) ) ; Period p2 = period . normalizedStandard ( PeriodType . yearMonthDayTime ( ) ) ; PeriodFormatter formatter = new PeriodFormatterBuilder ( ) . appendYears ( ) . appendSuffix ( " yr " ) . appendMonths ( ) . appendSuffix ( " months " ) . appendDays ( ) . appendSuffix ( " days " ) . appendHours ( ) . appendSuffix ( " hr " ) . appendMinutes ( ) . appendSuffix ( " min " ) . appendSeconds ( ) . appendSuffix ( " sec" ) . toFormatter ( ) ; return formatter . print ( p2 ) ;
public class Packet { /** * Writes a single { @ link String } encoded with the specified { @ link Charset } and { @ link ByteOrder } to this * { @ link Packet } ' s payload . * < br > < br > * A { @ code short } is used to store the length of the { @ link String } in the payload header , which imposes a * maximum { @ link String } length of { @ code 65,535 } with a { @ link StandardCharsets # UTF _ 8 } encoding or * { @ code 32,767 } ( or less ) with a different encoding . * @ param s The { @ link String } to write . * @ param charset The { @ link Charset } of the { @ link String } being written . * @ param order The internal byte order of the { @ link String } . * @ return The { @ link Packet } to allow for chained writes . */ public Packet putString ( String s , Charset charset , ByteOrder order ) { } }
var bytes = s . getBytes ( charset ) ; putShort ( bytes . length , order ) ; putBytes ( bytes ) ; return this ;
public class BaseRecordMessageFilter { /** * Get the name / value pairs in an ordered tree . * Note : Replace this with a DOM tree when it is available in the basic SDK . * @ return A matrix with the name , type , etc . */ public Object [ ] [ ] createNameValueTree ( Object mxString [ ] [ ] , Map < String , Object > properties ) { } }
mxString = super . createNameValueTree ( mxString , properties ) ; if ( properties != null ) { mxString = this . addNameValue ( mxString , DB_NAME , properties . get ( DB_NAME ) ) ; mxString = this . addNameValue ( mxString , TABLE_NAME , properties . get ( TABLE_NAME ) ) ; } return mxString ;
public class BitapPattern { /** * Returns a BitapMatcher preforming a fuzzy search in a whole { @ code sequence } . Search allows no more than { @ code * maxNumberOfErrors } number of substitutions / insertions / deletions . Matcher will return positions of first matched * letter in the motif in descending order . * @ param maxNumberOfErrors maximal number of allowed substitutions / insertions / deletions * @ param sequence target sequence * @ return matcher which will return positions of first matched letter in the motif in descending order */ public BitapMatcher substitutionAndIndelMatcherFirst ( int maxNumberOfErrors , final Sequence sequence ) { } }
return substitutionAndIndelMatcherFirst ( maxNumberOfErrors , sequence , 0 , sequence . size ( ) ) ;
public class InputMaskRenderer { /** * Translates the client side mask to to a { @ link Pattern } base on : * https : / / github . com / digitalBush / jquery . maskedinput * a - Represents an alpha character ( A - Z , a - z ) * 9 - Represents a numeric character ( 0-9) * * - Represents an alphanumeric character ( A - Z , a - z , 0-9) * ? - Makes the following input optional * @ param context The { @ link FacesContext } * @ param mask The mask value of component * @ return The generated { @ link Pattern } */ protected Pattern translateMaskIntoRegex ( FacesContext context , String mask ) { } }
StringBuilder regex = SharedStringBuilder . get ( context , SB_PATTERN ) ; boolean optionalFound = false ; for ( char c : mask . toCharArray ( ) ) { if ( c == '?' ) { optionalFound = true ; } else { regex . append ( translateMaskCharIntoRegex ( c , optionalFound ) ) ; } } return Pattern . compile ( regex . toString ( ) ) ;
public class EventServicesImpl { /** * Method that creates the event log based on the passed in params * @ param pEventName * @ param pEventCategory * @ param pEventSource * @ param pEventOwner * @ param pEventOwnerId * @ return EventLog */ public Long createEventLog ( String pEventName , String pEventCategory , String pEventSubCat , String pEventSource , String pEventOwner , Long pEventOwnerId , String user , String modUser , String comments ) throws DataAccessException , EventException { } }
TransactionWrapper transaction = null ; EngineDataAccessDB edao = new EngineDataAccessDB ( ) ; try { transaction = edao . startTransaction ( ) ; Long id = edao . recordEventLog ( pEventName , pEventCategory , pEventSubCat , pEventSource , pEventOwner , pEventOwnerId , user , modUser , comments ) ; return id ; } catch ( SQLException e ) { edao . rollbackTransaction ( transaction ) ; throw new EventException ( "Failed to create event log" , e ) ; } finally { edao . stopTransaction ( transaction ) ; }
public class AbstractResourceBundleCli { /** * This method gets the { @ link NlsResourceBundleLocator } . * @ return the { @ link NlsResourceBundleLocator } . */ public NlsResourceBundleLocator getResourceBundleLocator ( ) { } }
if ( this . resourceBundleLocator == null ) { NlsResourceBundleLocatorImpl impl = new NlsResourceBundleLocatorImpl ( ) ; impl . initialize ( ) ; this . resourceBundleLocator = impl ; } return this . resourceBundleLocator ;
public class UserPreferences { /** * Helper method to read array of strings out of the properties file , using * a Findbugs style format . * @ param props * The properties file to read the array from . * @ param keyPrefix * The key prefix of the array . * @ return The array of Strings , or an empty array if no values exist . */ private static Map < String , Boolean > readProperties ( Properties props , String keyPrefix ) { } }
Map < String , Boolean > filters = new TreeMap < > ( ) ; int counter = 0 ; boolean keyFound = true ; while ( keyFound ) { String property = props . getProperty ( keyPrefix + counter ) ; if ( property != null ) { int pipePos = property . indexOf ( BOOL_SEPARATOR ) ; if ( pipePos >= 0 ) { String name = property . substring ( 0 , pipePos ) ; String enabled = property . substring ( pipePos + 1 ) ; filters . put ( name , Boolean . valueOf ( enabled ) ) ; } else { filters . put ( property , Boolean . TRUE ) ; } counter ++ ; } else { keyFound = false ; } } return filters ;
public class MtasSpanFullyAlignedWithSpans { /** * Go to next start position . * @ return true , if successful * @ throws IOException Signals that an I / O exception has occurred . */ private boolean goToNextStartPosition ( ) throws IOException { } }
int nextSpans1StartPosition ; int nextSpans1EndPosition ; int nextSpans2StartPosition ; int nextSpans2EndPosition ; // loop over span1 while ( ( nextSpans1StartPosition = spans1 . spans . nextStartPosition ( ) ) != NO_MORE_POSITIONS ) { nextSpans1EndPosition = spans1 . spans . endPosition ( ) ; if ( noMorePositionsSpan2 && nextSpans1StartPosition > lastSpans2StartPosition ) { noMorePositions = true ; return false ; // check if start / en span1 matches start / end span2 from last or previous } else if ( ( nextSpans1StartPosition == lastSpans2StartPosition && nextSpans1EndPosition == lastSpans2EndPosition ) || ( nextSpans1StartPosition == previousSpans2StartPosition && previousSpans2EndPositions . contains ( nextSpans1EndPosition ) ) ) { return true ; } else { // try to find matching span2 while ( ! noMorePositionsSpan2 && nextSpans1StartPosition >= lastSpans2StartPosition ) { // get new span2 nextSpans2StartPosition = spans2 . spans . nextStartPosition ( ) ; // check for finished span2 if ( nextSpans2StartPosition == NO_MORE_POSITIONS ) { noMorePositionsSpan2 = true ; } else { // get end for new span2 nextSpans2EndPosition = spans2 . spans . endPosition ( ) ; // check for registering last span2 as previous if ( nextSpans1StartPosition <= lastSpans2StartPosition ) { if ( previousSpans2StartPosition != lastSpans2StartPosition ) { previousSpans2StartPosition = lastSpans2StartPosition ; previousSpans2EndPositions . clear ( ) ; } previousSpans2EndPositions . add ( lastSpans2EndPosition ) ; } // register span2 as last lastSpans2StartPosition = nextSpans2StartPosition ; lastSpans2EndPosition = nextSpans2EndPosition ; // check for match if ( nextSpans1StartPosition == nextSpans2StartPosition && nextSpans1EndPosition == nextSpans2EndPosition ) { return true ; } } } } } noMorePositions = true ; return false ;
public class AmazonAppStreamClient { /** * Immediately stops the specified streaming session . * @ param expireSessionRequest * @ return Result of the ExpireSession operation returned by the service . * @ sample AmazonAppStream . ExpireSession * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / appstream - 2016-12-01 / ExpireSession " target = " _ top " > AWS API * Documentation < / a > */ @ Override public ExpireSessionResult expireSession ( ExpireSessionRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeExpireSession ( request ) ;
public class AWSWAFRegionalClient { /** * Returns the status of a < code > ChangeToken < / code > that you got by calling < a > GetChangeToken < / a > . * < code > ChangeTokenStatus < / code > is one of the following values : * < ul > * < li > * < code > PROVISIONED < / code > : You requested the change token by calling < code > GetChangeToken < / code > , but you haven ' t * used it yet in a call to create , update , or delete an AWS WAF object . * < / li > * < li > * < code > PENDING < / code > : AWS WAF is propagating the create , update , or delete request to all AWS WAF servers . * < / li > * < li > * < code > INSYNC < / code > : Propagation is complete . * < / li > * < / ul > * @ param getChangeTokenStatusRequest * @ return Result of the GetChangeTokenStatus operation returned by the service . * @ throws WAFNonexistentItemException * The operation failed because the referenced object doesn ' t exist . * @ throws WAFInternalErrorException * The operation failed because of a system problem , even though the request was valid . Retry your request . * @ sample AWSWAFRegional . GetChangeTokenStatus * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / waf - regional - 2016-11-28 / GetChangeTokenStatus " * target = " _ top " > AWS API Documentation < / a > */ @ Override public GetChangeTokenStatusResult getChangeTokenStatus ( GetChangeTokenStatusRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetChangeTokenStatus ( request ) ;
public class DiagnosticsInner { /** * List Site Detector Responses . * List Site Detector Responses . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; DetectorResponseInner & gt ; object */ public Observable < ServiceResponse < Page < DetectorResponseInner > > > listSiteDetectorResponsesNextWithServiceResponseAsync ( final String nextPageLink ) { } }
return listSiteDetectorResponsesNextSinglePageAsync ( nextPageLink ) . concatMap ( new Func1 < ServiceResponse < Page < DetectorResponseInner > > , Observable < ServiceResponse < Page < DetectorResponseInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < DetectorResponseInner > > > call ( ServiceResponse < Page < DetectorResponseInner > > page ) { String nextPageLink = page . body ( ) . nextPageLink ( ) ; if ( nextPageLink == null ) { return Observable . just ( page ) ; } return Observable . just ( page ) . concatWith ( listSiteDetectorResponsesNextWithServiceResponseAsync ( nextPageLink ) ) ; } } ) ;
public class PrimitiveFloatCastExtensions { /** * Convert the given value to { @ code AtomicInteger } . * @ param number a number of { @ code float } type . * @ return the equivalent value to { @ code number } of { @ code AtomicInteger } type . */ @ Pure @ Inline ( value = "new $2(($3) $1)" , imported = { } }
AtomicInteger . class , int . class } ) public static AtomicInteger toAtomicInteger ( float number ) { return new AtomicInteger ( ( int ) number ) ;
public class H2O { /** * package - private for unit tests */ static < T extends RemoteRunnable > T runOnH2ONode ( H2ONode node , T runnable ) { } }
if ( node == H2O . SELF ) { // run directly runnable . run ( ) ; return runnable ; } else { RunnableWrapperTask < T > task = new RunnableWrapperTask < > ( runnable ) ; try { return new RPC < > ( node , task ) . call ( ) . get ( ) . _runnable ; } catch ( DistributedException e ) { Log . trace ( "Exception in calling runnable on a remote node" , e ) ; Throwable cause = e . getCause ( ) ; throw cause instanceof RuntimeException ? ( RuntimeException ) cause : e ; } }
public class FileServersInner { /** * Gets information about the specified Cluster . * @ param resourceGroupName Name of the resource group to which the resource belongs . * @ param fileServerName The name of the file server within the specified resource group . File server names can only contain a combination of alphanumeric characters along with dash ( - ) and underscore ( _ ) . The name must be from 1 through 64 characters long . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < FileServerInner > getByResourceGroupAsync ( String resourceGroupName , String fileServerName , final ServiceCallback < FileServerInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getByResourceGroupWithServiceResponseAsync ( resourceGroupName , fileServerName ) , serviceCallback ) ;
public class MockEC2QueryHandler { /** * Parse instance states from query parameters . * @ param queryParams * map of query parameters in http request * @ return a set of instance states in the parameter map */ private Set < String > parseInstanceStates ( final Map < String , String [ ] > queryParams ) { } }
Set < String > instanceStates = new TreeSet < String > ( ) ; for ( String queryKey : queryParams . keySet ( ) ) { // e . g . Filter . 1 . Value . 1 : running , Filter . 1 . Value . 2 : pending if ( queryKey . startsWith ( "Filter.1.Value" ) ) { for ( String state : queryParams . get ( queryKey ) ) { instanceStates . add ( state ) ; } } } return instanceStates ;
public class ModelsImpl { /** * Gets information about the hierarchical entity models . * @ param appId The application ID . * @ param versionId The version ID . * @ param listHierarchicalEntitiesOptionalParameter the object representing the optional parameters to be set before calling this API * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < List < HierarchicalEntityExtractor > > listHierarchicalEntitiesAsync ( UUID appId , String versionId , ListHierarchicalEntitiesOptionalParameter listHierarchicalEntitiesOptionalParameter , final ServiceCallback < List < HierarchicalEntityExtractor > > serviceCallback ) { } }
return ServiceFuture . fromResponse ( listHierarchicalEntitiesWithServiceResponseAsync ( appId , versionId , listHierarchicalEntitiesOptionalParameter ) , serviceCallback ) ;
public class AbstractRasMethodAdapter { /** * Inject a simple stack map frame and a PUSH NULL , POP . This method * assumes that the onMethodEntry modified code and returned immediately * after visiting the target label of a trace guard . */ private void visitFrameAfterOnMethodEntry ( ) { } }
if ( ! visitFramesAfterCallbacks ) return ; // The frame that is required after the trace guard must be // fully specified as ' this ' is no longer an ' uninitialized this ' if ( isConstructor ( ) ) { List < Object > stackLocals = new ArrayList < Object > ( argTypes . length + 1 ) ; stackLocals . add ( classAdapter . getClassType ( ) . getInternalName ( ) ) ; for ( Type type : argTypes ) { switch ( type . getSort ( ) ) { case Type . ARRAY : case Type . OBJECT : stackLocals . add ( type . getInternalName ( ) ) ; break ; case Type . BOOLEAN : case Type . CHAR : case Type . BYTE : case Type . SHORT : case Type . INT : stackLocals . add ( INTEGER ) ; break ; case Type . LONG : stackLocals . add ( LONG ) ; break ; case Type . FLOAT : stackLocals . add ( FLOAT ) ; break ; case Type . DOUBLE : stackLocals . add ( DOUBLE ) ; break ; } } visitFrame ( F_FULL , stackLocals . size ( ) , stackLocals . toArray ( ) , 0 , new Object [ ] { } ) ; visitInsn ( NOP ) ; } else { visitFrame ( F_SAME , 0 , null , 0 , null ) ; visitInsn ( NOP ) ; }
public class Matrix4x3f { /** * Reset this matrix to the identity . * Please note that if a call to { @ link # identity ( ) } is immediately followed by a call to : * { @ link # translate ( float , float , float ) translate } , * { @ link # rotate ( float , float , float , float ) rotate } , * { @ link # scale ( float , float , float ) scale } , * { @ link # ortho ( float , float , float , float , float , float ) ortho } , * { @ link # ortho2D ( float , float , float , float ) ortho2D } , * { @ link # lookAt ( float , float , float , float , float , float , float , float , float ) lookAt } , * { @ link # lookAlong ( float , float , float , float , float , float ) lookAlong } , * or any of their overloads , then the call to { @ link # identity ( ) } can be omitted and the subsequent call replaced with : * { @ link # translation ( float , float , float ) translation } , * { @ link # rotation ( float , float , float , float ) rotation } , * { @ link # scaling ( float , float , float ) scaling } , * { @ link # setOrtho ( float , float , float , float , float , float ) setOrtho } , * { @ link # setOrtho2D ( float , float , float , float ) setOrtho2D } , * { @ link # setLookAt ( float , float , float , float , float , float , float , float , float ) setLookAt } , * { @ link # setLookAlong ( float , float , float , float , float , float ) setLookAlong } , * or any of their overloads . * @ return this */ public Matrix4x3f identity ( ) { } }
if ( ( properties & PROPERTY_IDENTITY ) != 0 ) return this ; MemUtil . INSTANCE . identity ( this ) ; properties = PROPERTY_IDENTITY | PROPERTY_TRANSLATION | PROPERTY_ORTHONORMAL ; return this ;
public class ObjectOutputStream { /** * Write class descriptor { @ code classDesc } into the receiver . It is * assumed the class descriptor has not been dumped yet . The class * descriptors for the superclass chain will be dumped as well . Returns * the handle for this object ( class descriptor ) which is dumped here . * @ param classDesc * The { @ code ObjectStreamClass } object to dump * @ throws IOException * If an IO exception happened when writing the class * descriptor . */ private void writeNewClassDesc ( ObjectStreamClass classDesc ) throws IOException { } }
output . writeUTF ( classDesc . getName ( ) ) ; output . writeLong ( classDesc . getSerialVersionUID ( ) ) ; byte flags = classDesc . getFlags ( ) ; boolean externalizable = classDesc . isExternalizable ( ) ; if ( externalizable ) { if ( protocolVersion == PROTOCOL_VERSION_1 ) { flags &= NOT_SC_BLOCK_DATA ; } else { // Change for 1.2 . Objects can be saved in old format // ( PROTOCOL _ VERSION _ 1 ) or in the 1.2 format ( PROTOCOL _ VERSION _ 2 ) . flags |= SC_BLOCK_DATA ; } } output . writeByte ( flags ) ; if ( ( SC_ENUM | SC_SERIALIZABLE ) != classDesc . getFlags ( ) ) { writeFieldDescriptors ( classDesc , externalizable ) ; } else { // enum write no fields output . writeShort ( 0 ) ; }
public class Polynomial { /** * Wraps the polynomial around the array : < br > * f ( x ) = c [ 0 ] + c [ 1 ] * x + . . . + c [ n ] * x < sup > n - 1 < / sup > * @ param coefficients Polynomial coefficients * @ return new instance of a polyonimial which is identical to the input array */ public static Polynomial wrap ( double ... coefficients ) { } }
Polynomial p = new Polynomial ( coefficients . length ) ; p . setTo ( coefficients , coefficients . length ) ; return p ;
public class FileClassManager { /** * Gets the contents of the specified file , or null if the file does not * exist . * @ param file The < code > File < / code > whose contents to obtain . * @ return The file contents , or null if the file does not exist . */ private byte [ ] getFileContents ( File file ) { } }
if ( file . exists ( ) ) { try { return FileUtil . getFileContents ( file ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } return null ;
public class DOMHelper { /** * Wait for a few milliseconds * @ param milliseconds */ private static void waiting ( int milliseconds ) { } }
long t0 , t1 ; t0 = System . currentTimeMillis ( ) ; do { t1 = System . currentTimeMillis ( ) ; } while ( ( t1 - t0 ) < milliseconds ) ;
public class ZooKeeperHelper { /** * Recursively create empty znodes ( if missing ) analogous to { @ code mkdir - p } . * @ param zookeeper ZooKeeper instance to work with . * @ param znode Path to create . * @ throws org . apache . zookeeper . KeeperException * @ throws InterruptedException */ static void mkdirp ( ZooKeeper zookeeper , String znode ) throws KeeperException , InterruptedException { } }
boolean createPath = false ; for ( String path : pathParts ( znode ) ) { if ( ! createPath ) { Stat stat = zookeeper . exists ( path , false ) ; if ( stat == null ) { createPath = true ; } } if ( createPath ) { create ( zookeeper , path ) ; } }
public class ScrollReader { /** * Same as read ( String , Token , String ) above , but does not include checking the current field name to see if it ' s an array . */ protected Object readListItem ( String fieldName , Token t , String fieldMapping , Parser parser ) { } }
if ( t == Token . START_ARRAY ) { return list ( fieldName , fieldMapping , parser ) ; } // handle nested nodes first else if ( t == Token . START_OBJECT ) { // Don ' t need special handling for nested fields since this field is already in an array . return map ( fieldMapping , parser ) ; } FieldType esType = mapping ( fieldMapping , parser ) ; if ( t . isValue ( ) ) { String rawValue = parser . text ( ) ; try { return parseValue ( parser , esType ) ; } catch ( Exception ex ) { throw new EsHadoopParsingException ( String . format ( Locale . ROOT , "Cannot parse value [%s] for field [%s]" , rawValue , fieldName ) , ex ) ; } } return null ;
public class ConnectionFactoryService { /** * Indicates whether or not reauthentication of connections is enabled . * @ return true if reauthentication of connections is enabled . Otherwise false . */ @ Override @ Trivial public boolean getReauthenticationSupport ( ) { } }
return Boolean . TRUE . equals ( bootstrapContextRef . getReference ( ) . getProperty ( REAUTHENTICATION_SUPPORT ) ) ;
public class SmjpegDecoder { /** * 8 bytes magic - " ^ @ \ nSMJPEG " * Uint32 version = 0 * Uint32 length of clip in milliseconds */ private void decodeMandatoryHeader ( ) throws IOException { } }
byte [ ] magicBytes = new byte [ 8 ] ; file . readFully ( magicBytes ) ; if ( ! Arrays . equals ( SmjpegMagic . MANDATORY_HEADER , magicBytes ) ) { throw new SmjpegParsingException ( "This is not a SMJPEG file" ) ; } int version = file . readInt ( ) ; if ( version != 0 ) { throw new SmjpegParsingException ( "Unknow version " + version ) ; } this . clipLengthInMilliseconds = file . readInt ( ) ;
public class MMFMonitoringDataManager { /** * This method is used to compute the monitoring directory name which will be used by Agrona in order * to create a file in which to write the data in shared memory . * The monitoring directory will be dependent of the operating system . * For Linux we will use the OS implementation of the shared memory . So the directory will be created * in / dev / shm . For the other operating systems we will create a monitoring folder under the * gateway folder . * @ return the monitoring directory name */ private String getMonitoringDirName ( ) { } }
String monitoringDirName = IoUtil . tmpDirName ( ) + MONITOR_DIR_NAME ; if ( LINUX . equalsIgnoreCase ( System . getProperty ( OS_NAME_SYSTEM_PROPERTY ) ) ) { final File devShmDir = new File ( LINUX_DEV_SHM_DIRECTORY ) ; if ( devShmDir . exists ( ) ) { monitoringDirName = LINUX_DEV_SHM_DIRECTORY + monitoringDirName ; } } return monitoringDirName ;
public class LatchedObserver { /** * Create a LatchedObserver with the given callback function ( s ) and a shared latch . */ public static < T > LatchedObserver < T > create ( Action1 < ? super T > onNext , CountDownLatch latch ) { } }
return new LatchedObserverImpl < T > ( onNext , Functionals . emptyThrowable ( ) , Functionals . empty ( ) , latch ) ;
public class Value { /** * intended byte [ ] . Also , the value is NOT on the deserialize ' d machines disk */ public final AutoBuffer write_impl ( AutoBuffer ab ) { } }
return ab . put1 ( _persist ) . put2 ( _type ) . putA1 ( memOrLoad ( ) ) ;
public class CacheCommandFactory { /** * Add region so that commands can be cleared on shutdown . * @ param region instance to keep track of */ public void addRegion ( InfinispanBaseRegion region ) { } }
allRegions . put ( ByteString . fromString ( region . getCache ( ) . getName ( ) ) , region ) ;
public class ObjectUtils { /** * Determine if the given objects are equal , returning { @ code true } if both are { @ code null } or * { @ code false } if only one is { @ code null } . * Compares arrays with { @ code Arrays . equals } , performing an equality check based on the array * elements rather than the array reference . * @ param o1 first Object to compare * @ param o2 second Object to compare * @ return whether the given objects are equal * @ see Object # equals ( Object ) * @ see java . util . Arrays # equals */ public static boolean nullSafeEquals ( @ Nullable final Object o1 , @ Nullable final Object o2 ) { } }
if ( o1 == o2 ) { return true ; } if ( o1 == null || o2 == null ) { return false ; } if ( o1 . equals ( o2 ) ) { return true ; } if ( o1 . getClass ( ) . isArray ( ) && o2 . getClass ( ) . isArray ( ) ) { return ObjectUtils . arrayEquals ( o1 , o2 ) ; } return false ;
public class MoreCollectors { /** * Applies the specified { @ link Joiner } to the current stream . * @ throws NullPointerException of { @ code joiner } is { @ code null } * @ throws IllegalStateException if a merge operation happens because parallel processing has been enabled on the current stream */ public static < E > Collector < E , List < E > , String > join ( Joiner joiner ) { } }
requireNonNull ( joiner , "Joiner can't be null" ) ; return Collector . of ( ArrayList :: new , List :: add , mergeNotSupportedMerger ( ) , joiner :: join ) ;
public class RelationalOperations { /** * Returns true if polygon _ a is disjoint from polyline _ b . */ private static boolean polygonDisjointPolyline_ ( Polygon polygon_a , Polyline polyline_b , double tolerance , ProgressTracker progress_tracker ) { } }
// Quick rasterize test to see whether the the geometries are disjoint , // or if one is contained in the other . int relation = tryRasterizedContainsOrDisjoint_ ( polygon_a , polyline_b , tolerance , true ) ; if ( relation == Relation . disjoint ) return true ; if ( relation == Relation . contains || relation == Relation . intersects ) return false ; return polygonDisjointMultiPath_ ( polygon_a , polyline_b , tolerance , progress_tracker ) ;
public class RealVoltDB { /** * recover the partition assignment from one of lost hosts in the same placement group for rejoin * Use the placement group of the recovering host to find a matched host from the lost nodes in the topology * If the partition count from the lost node is the same as the site count of the recovering host , * The partitions on the lost node will be placed on the recovering host . Partition group layout will be maintained . * Topology will be updated on ZK if successful * @ param topology The topology from ZK , which contains the partition assignments for live or lost hosts * @ param haGroup The placement group of the recovering host * @ param recoverPartitions the partition placement to be recovered on this host * @ return A list of partitions if recover effort is a success . */ private AbstractTopology recoverPartitions ( AbstractTopology topology , String haGroup , Set < Integer > recoverPartitions ) { } }
long version = topology . version ; if ( ! recoverPartitions . isEmpty ( ) ) { // In rejoin case , partition list from the rejoining node could be out of range if the rejoining // host is a previously elastic removed node or some other used nodes , if out of range , do not restore if ( Collections . max ( recoverPartitions ) > Collections . max ( m_cartographer . getPartitions ( ) ) ) { recoverPartitions . clear ( ) ; } } AbstractTopology recoveredTopo = AbstractTopology . mutateRecoverTopology ( topology , m_messenger . getLiveHostIds ( ) , m_messenger . getHostId ( ) , haGroup , recoverPartitions ) ; if ( recoveredTopo == null ) { return null ; } List < Integer > partitions = Lists . newArrayList ( recoveredTopo . getPartitionIdList ( m_messenger . getHostId ( ) ) ) ; if ( partitions != null && partitions . size ( ) == m_catalogContext . getNodeSettings ( ) . getLocalSitesCount ( ) ) { TopologyZKUtils . updateTopologyToZK ( m_messenger . getZK ( ) , recoveredTopo ) ; } if ( version < recoveredTopo . version && ! recoverPartitions . isEmpty ( ) ) { consoleLog . info ( "Partition placement layout has been restored for rejoining." ) ; } return recoveredTopo ;
public class Collector { /** * { @ inheritDoc } */ public Object execute ( final Object value , final CsvContext context ) { } }
collection . add ( value ) ; return next . execute ( value , context ) ;
public class BitVector { /** * Creates a { @ link BitVector } by copying the bits in a < code > BitSet < / code > . * @ param bitSet * a < code > BitSet < / code > * @ param size * the size of { @ link BitVector } to create , in bits * @ return a bit vector containing the bits of the bit set . * @ see Bits # asStore ( BitSet , int ) */ public static BitVector fromBitSet ( BitSet bitSet , int size ) { } }
if ( bitSet == null ) throw new IllegalArgumentException ( ) ; if ( size < 0 ) throw new IllegalArgumentException ( ) ; final int length = bitSet . length ( ) ; return fromBitSetImpl ( bitSet , size , length ) ;