Thursday, 12 October 2017

Textual Representation of logo

Integrate CloudWatch Logs with Cloudhub Mule

In this blog, i will explain how to enable AWS Cloudwatch Logs for your Mule CloudHub Application. Cloudwatch Logs Services are provided by AWS so that you can better mange your logs. It is relatively cheaper than splunk. Since cloudhub automatically rolls over logs more than 100 MB, we require a mechanism to manage our logs more efficiently. For this, we create this Custom appender which will send the logs to cloudwatch.

package com.javaroots.appenders;

import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toList;

import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.Filter;
import org.apache.logging.log4j.core.Layout;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.config.plugins.Plugin;
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
import org.apache.logging.log4j.core.config.plugins.PluginElement;
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
import org.apache.logging.log4j.status.StatusLogger;

import com.amazonaws.regions.Regions;
import com.amazonaws.services.logs.AWSLogs;
import com.amazonaws.services.logs.model.CreateLogGroupRequest;
import com.amazonaws.services.logs.model.CreateLogStreamRequest;
import com.amazonaws.services.logs.model.CreateLogStreamResult;
import com.amazonaws.services.logs.model.DataAlreadyAcceptedException;
import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
import com.amazonaws.services.logs.model.InputLogEvent;
import com.amazonaws.services.logs.model.InvalidSequenceTokenException;
import com.amazonaws.services.logs.model.LogGroup;
import com.amazonaws.services.logs.model.LogStream;
import com.amazonaws.services.logs.model.PutLogEventsRequest;
import com.amazonaws.services.logs.model.PutLogEventsResult;

@Plugin(name = "CLOUDW", category = "Core", elementType = "appender", printObject = true)
public class CloudwatchAppender extends AbstractAppender {
 
 /**
  * 
  */
 private static final long serialVersionUID = 12321345L;
 
 private static Logger logger2 = LogManager.getLogger(CloudwatchAppender.class);

 private final Boolean DEBUG_MODE = System.getProperty("log4j.debug") != null;

    /**
     * Used to make sure that on close() our daemon thread isn't also trying to sendMessage()s
     */
    private Object sendMessagesLock = new Object();

    /**
     * The queue used to buffer log entries
     */
    private LinkedBlockingQueue loggingEventsQueue;

    /**
     * the AWS Cloudwatch Logs API client
     */
    private AWSLogs awsLogsClient;

    private AtomicReference lastSequenceToken = new AtomicReference<>();

    /**
     * The AWS Cloudwatch Log group name
     */
    private String logGroupName;

    /**
     * The AWS Cloudwatch Log stream name
     */
    private String logStreamName;

    /**
     * The queue / buffer size
     */
    private int queueLength = 1024;

    /**
     * The maximum number of log entries to send in one go to the AWS Cloudwatch Log service
     */
    private int messagesBatchSize = 128;

    private AtomicBoolean cloudwatchAppenderInitialised = new AtomicBoolean(false);
 

    private CloudwatchAppender(final String name,
                           final Layout layout,
                           final Filter filter,
                           final boolean ignoreExceptions,String logGroupName, 
                           String logStreamName,
                           Integer queueLength,
                           Integer messagesBatchSize) {
        super(name, filter, layout, ignoreExceptions);
        this.logGroupName = logGroupName;
        this.logStreamName = logStreamName;
        this.queueLength = queueLength;
        this.messagesBatchSize = messagesBatchSize;
        this.activateOptions();
    }

    @Override
    public void append(LogEvent event) {
      if (cloudwatchAppenderInitialised.get()) {
             loggingEventsQueue.offer(event);
         } else {
             // just do nothing
         }
    }
    
    public void activateOptions() {
        if (isBlank(logGroupName) || isBlank(logStreamName)) {
            logger2.error("Could not initialise CloudwatchAppender because either or both LogGroupName(" + logGroupName + ") and LogStreamName(" + logStreamName + ") are null or empty");
            this.stop();
        } else {
          //below lines work with aws version 1.9.40 for local build
          //this.awsLogsClient = new AWSLogsClient();
          //awsLogsClient.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_2));
          this.awsLogsClient = com.amazonaws.services.logs.AWSLogsClientBuilder.standard().withRegion(Regions.AP_SOUTHEAST_2).build();
            loggingEventsQueue = new LinkedBlockingQueue<>(queueLength);
            try {
                initializeCloudwatchResources();
                initCloudwatchDaemon();
                cloudwatchAppenderInitialised.set(true);
            } catch (Exception e) {
                logger2.error("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName, e);
                if (DEBUG_MODE) {
                    System.err.println("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName);
                    e.printStackTrace();
                }
            }
        }
    }
    
    private void initCloudwatchDaemon() {
     Thread t = new Thread(() -> {
            while (true) {
                try {
                    if (loggingEventsQueue.size() > 0) {
                        sendMessages();
                    }
                    Thread.currentThread().sleep(20L);
                } catch (InterruptedException e) {
                    if (DEBUG_MODE) {
                        e.printStackTrace();
                    }
                }
            }
        });
     t.setName("CloudwatchThread");
     t.setDaemon(true);
     t.start();
    }
    
    private void sendMessages() {
        synchronized (sendMessagesLock) {
            LogEvent polledLoggingEvent;
            final Layout layout = getLayout();
            List loggingEvents = new ArrayList<>();

            try {

                while ((polledLoggingEvent = loggingEventsQueue.poll()) != null && loggingEvents.size() <= messagesBatchSize) {
                    loggingEvents.add(polledLoggingEvent);
                }
                List inputLogEvents = loggingEvents.stream()
                        .map(loggingEvent -> new InputLogEvent().withTimestamp(loggingEvent.getTimeMillis())
                          .withMessage
                          (
                            layout == null ?
                            loggingEvent.getMessage().getFormattedMessage():
                            new String(layout.toByteArray(loggingEvent), StandardCharsets.UTF_8)
                            )
                          )
                        .sorted(comparing(InputLogEvent::getTimestamp))
                        .collect(toList());

                if (!inputLogEvents.isEmpty()) {

                    PutLogEventsRequest putLogEventsRequest = new PutLogEventsRequest(
                            logGroupName,
                            logStreamName,
                            inputLogEvents);

                    try {
                        putLogEventsRequest.setSequenceToken(lastSequenceToken.get());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                    } catch (DataAlreadyAcceptedException dataAlreadyAcceptedExcepted) {
                     
                        putLogEventsRequest.setSequenceToken(dataAlreadyAcceptedExcepted.getExpectedSequenceToken());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                        if (DEBUG_MODE) {
                            dataAlreadyAcceptedExcepted.printStackTrace();
                        }
                    } catch (InvalidSequenceTokenException invalidSequenceTokenException) {
                        putLogEventsRequest.setSequenceToken(invalidSequenceTokenException.getExpectedSequenceToken());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                        if (DEBUG_MODE) {
                            invalidSequenceTokenException.printStackTrace();
                        }
                    }
                }
            } catch (Exception e) {
                if (DEBUG_MODE) {
                 logger2.error(" error inserting cloudwatch:",e);
                    e.printStackTrace();
                }
            }
        }
    }

    private void initializeCloudwatchResources() {

        DescribeLogGroupsRequest describeLogGroupsRequest = new DescribeLogGroupsRequest();
        describeLogGroupsRequest.setLogGroupNamePrefix(logGroupName);

        Optional logGroupOptional = awsLogsClient
                .describeLogGroups(describeLogGroupsRequest)
                .getLogGroups()
                .stream()
                .filter(logGroup -> logGroup.getLogGroupName().equals(logGroupName))
                .findFirst();

        if (!logGroupOptional.isPresent()) {
            CreateLogGroupRequest createLogGroupRequest = new CreateLogGroupRequest().withLogGroupName(logGroupName);
            awsLogsClient.createLogGroup(createLogGroupRequest);
        }

        DescribeLogStreamsRequest describeLogStreamsRequest = new DescribeLogStreamsRequest().withLogGroupName(logGroupName).withLogStreamNamePrefix(logStreamName);

        Optional logStreamOptional = awsLogsClient
                .describeLogStreams(describeLogStreamsRequest)
                .getLogStreams()
                .stream()
                .filter(logStream -> logStream.getLogStreamName().equals(logStreamName))
                .findFirst();
        if (!logStreamOptional.isPresent()) {
            CreateLogStreamRequest createLogStreamRequest = new CreateLogStreamRequest().withLogGroupName(logGroupName).withLogStreamName(logStreamName);
            CreateLogStreamResult o = awsLogsClient.createLogStream(createLogStreamRequest);
        }

    }
    
    private boolean isBlank(String string) {
        return null == string || string.trim().length() == 0;
    }
    protected String getSimpleStacktraceAsString(final Throwable thrown) {
        final StringBuilder stackTraceBuilder = new StringBuilder();
        for (StackTraceElement stackTraceElement : thrown.getStackTrace()) {
            new Formatter(stackTraceBuilder).format("%s.%s(%s:%d)%n",
                    stackTraceElement.getClassName(),
                    stackTraceElement.getMethodName(),
                    stackTraceElement.getFileName(),
                    stackTraceElement.getLineNumber());
        }
        return stackTraceBuilder.toString();
    }

    @Override
    public void start() {
        super.start();
    }

    @Override
    public void stop() {
        super.stop();
        while (loggingEventsQueue != null && !loggingEventsQueue.isEmpty()) {
            this.sendMessages();
        }
    }

    @Override
    public String toString() {
        return CloudwatchAppender.class.getSimpleName() + "{"
                + "name=" + getName() + " loggroupName=" + logGroupName
                +" logstreamName=" + logStreamName;
               
    }

    @PluginFactory
    @SuppressWarnings("unused")
    public static CloudwatchAppender createCloudWatchAppender(
      @PluginAttribute(value = "queueLength" ) Integer queueLength,
                                                  @PluginElement("Layout") Layout layout,
                                                  @PluginAttribute(value = "logGroupName") String logGroupName,
                                                  @PluginAttribute(value = "logStreamName") String logStreamName,
                                                  @PluginAttribute(value = "name") String name,
                                                  @PluginAttribute(value = "ignoreExceptions", defaultBoolean = false) Boolean ignoreExceptions,
                                                  
                                                  @PluginAttribute(value = "messagesBatchSize") Integer messagesBatchSize)
    {
     return new CloudwatchAppender(name, layout, null, ignoreExceptions, logGroupName, logStreamName ,queueLength,messagesBatchSize);
    }
}

We add the dependency in our pom.xml file.
<dependency>
   <groupId>com.amazonaws</groupId>
   <artifactId>aws-java-sdk-logs</artifactId>
   <!-- for local 3.8.5 we need to use this version cloudhub 3.8.5 has jackson 2.6.6 -->
   <!-- <version>1.9.40</version> -->
   <version>1.11.105</version>
   <exclusions>
    <exclusion>  <!-- declare the exclusion here -->
     <groupId>org.apache.logging.log4j</groupId>
     <artifactId>log4j-1.2-api</artifactId>
    </exclusion>
    <exclusion>  <!-- declare the exclusion here -->
     <groupId>com.fasterxml.jackson.core</groupId>
     <artifactId>jackson-core</artifactId>
    </exclusion>
    <exclusion>  <!-- declare the exclusion here -->
     <groupId>com.fasterxml.jackson.core</groupId>
     <artifactId>jackson-databind</artifactId>
    </exclusion>
   </exclusions>
  </dependency>
  <!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-api -->
  <dependency>
   <groupId>org.apache.logging.log4j</groupId>
   <artifactId>log4j-api</artifactId>
   <version>2.5</version>
  </dependency>
  <!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core -->
  <dependency>
   <groupId>org.apache.logging.log4j</groupId>
   <artifactId>log4j-core</artifactId>
   <version>2.5</version>
  </dependency>
Now we need to modify our log4j2.xml. add out custom cloudwatch appender and CloudhubLogs appender as well so that we get the logs on cloudhub as well.
<?xml version="1.0" encoding="utf-8"?>
<Configuration status="trace" packages="au.edu.vu.appenders,com.mulesoft.ch.logging.appender">

 <!--These are some of the loggers you can enable. 
     There are several more you can find in the documentation. 
        Besides this log4j configuration, you can also use Java VM environment variables
        to enable other logs like network (-Djavax.net.debug=ssl or all) and 
        Garbage Collector (-XX:+PrintGC). These will be append to the console, so you will 
        see them in the mule_ee.log file. -->


    <Appenders>
         <CLOUDW name="CloudW" logGroupName="test-log-stream" 
        logStreamName="test44" messagesBatchSize="${sys:cloudwatch.msg.batch.size}" queueLength="${sys:cloudwatch.queue.length}">
   <PatternLayout pattern="%d [%t] %-5p %c - %m%n"/>
  </CLOUDW>
  
  <Log4J2CloudhubLogAppender name="CLOUDHUB"
                                   addressProvider="com.mulesoft.ch.logging.DefaultAggregatorAddressProvider"
                                   applicationContext="com.mulesoft.ch.logging.DefaultApplicationContext"
                                   appendRetryIntervalMs="${sys:logging.appendRetryInterval}"
                                   appendMaxAttempts="${sys:logging.appendMaxAttempts}"
                                   batchSendIntervalMs="${sys:logging.batchSendInterval}"
                                   batchMaxRecords="${sys:logging.batchMaxRecords}"
                                   memBufferMaxSize="${sys:logging.memBufferMaxSize}"
                                   journalMaxWriteBatchSize="${sys:logging.journalMaxBatchSize}"
                                   journalMaxFileSize="${sys:logging.journalMaxFileSize}"
                                   clientMaxPacketSize="${sys:logging.clientMaxPacketSize}"
                                   clientConnectTimeoutMs="${sys:logging.clientConnectTimeout}"
                                   clientSocketTimeoutMs="${sys:logging.clientSocketTimeout}"
                                   serverAddressPollIntervalMs="${sys:logging.serverAddressPollInterval}"
                                   serverHeartbeatSendIntervalMs="${sys:logging.serverHeartbeatSendIntervalMs}"
                                   statisticsPrintIntervalMs="${sys:logging.statisticsPrintIntervalMs}">

            <PatternLayout pattern="[%d{MM-dd HH:mm:ss}] %-5p %c{1} [%t] CUSTOM: %m%n"/>
        </Log4J2CloudhubLogAppender>
        
    </Appenders>
    <Loggers>
     
     
  <!-- Http Logger shows wire traffic on DEBUG -->
  <AsyncLogger name="org.mule.module.http.internal.HttpMessageLogger" level="WARN"/>
 
  <!-- JDBC Logger shows queries and parameters values on DEBUG -->
  <AsyncLogger name="com.mulesoft.mule.transport.jdbc" level="WARN"/>
    
        <!-- CXF is used heavily by Mule for web services -->
        <AsyncLogger name="org.apache.cxf" level="WARN"/>

        <!-- Apache Commons tend to make a lot of noise which can clutter the log-->
        <AsyncLogger name="org.apache" level="WARN"/>

        <!-- Reduce startup noise -->
        <AsyncLogger name="org.springframework.beans.factory" level="WARN"/>

        <!-- Mule classes -->
        <AsyncLogger name="org.mule" level="INFO"/>
        <AsyncLogger name="com.mulesoft" level="INFO"/>

        <!-- Reduce DM verbosity -->
        <AsyncLogger name="org.jetel" level="WARN"/>
        <AsyncLogger name="Tracking" level="WARN"/>
        
        <AsyncRoot level="INFO">
            <AppenderRef ref="CLOUDHUB" level="INFO"/>
            <AppenderRef ref="CloudW" level="INFO"/>
        </AsyncRoot>
    </Loggers>
</Configuration>
Finally we need to disable cloudhub logs on cloudhub run time manager.

This is working with cloudhub mule runtime version 3.8.4. some issue with cloudhub 3.8.5 version, where it is getting initilized properly, and sending logs, but events and messages are missing.




Tuesday, 26 September 2017

Textual description of firstImageUrl

How to Set Up dedicated load balancer on Cloudhub

MuleSoft cloudhub provides inbuilt loadbalancer in the form of {appname}.cloudhub.io, which forwards the request on mule-worker-{appname}.au.cloudhub.io:8081, if we are using http port.


When you deploy your application on cloudhub, the app http listener always listens on port 8081 for http and 8082 for https.


This is a step by step guide to set up dedicated loadbalancer on cloudhub.


For creating a dedicated load balancer, first go to load balancer tab and click on create load balancer button.

Now put the load balancer name and select the target VPC, the VPC which you have created for your cloudhub.

Select inbound http mode to redirect so that calls coming as http will automatically redirected to https.

Add your certicates and pem files on the next page.

now load balancer is created with a default rule.

│ Index │ Input URI                    │ App Name                     │ App URI                      │ Upstream Protocol            │
├───────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┤
│ 0     │ /{app}/                      │ {app}                        │ /                            │ http                         │
└───────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┘

it means your load balancer URL which is like this : https://{load-balancer}-dlb.lb.anypointdns.net/{app} will be forwarded to mule-worker-{appname}.au.cloudhub.io:8091
Please note that in case of inbuilt load balancer, the request is forwarded to mule-worker-{appname}.au.cloudhub.io:8081,but in case of a dedicated load balancer it is forwarded to mule-worker-{appname}.au.cloudhub.io:8091

now since we have configured our app to listen to {http.port}, it always listens to port 8081. and dedicated load balancer url will always result in 502 error like this.




so, we need to change our app to listen to port 8091. I have tried changing the port by adding the property to cloud hub, but it does not work.

You have to change your application to listen to {http.private.port} like this in order to make your dedicated load balancer work:

<http:listener-config name="HTTP_Listener_Configuration"  host="0.0.0.0" port="${http.private.port}" doc:name="HTTP Listener Configuration"/>
Once you do that, dedicated load balancer url will work correctly.

Sunday, 3 September 2017

Textual Representation of logo

How to Call Another Flow From Dataweave

This is a simple example showing how to call another flow and include that result in the resultant xml file. The opertaion "lookup" should be used in dataweave.


<?xml version="1.0" encoding="UTF-8"?>

<mule xmlns:http="http://www.mulesoft.org/schema/mule/http" xmlns:vm="http://www.mulesoft.org/schema/mule/vm" xmlns:metadata="http://www.mulesoft.org/schema/mule/metadata" xmlns:dw="http://www.mulesoft.org/schema/mule/ee/dw" xmlns="http://www.mulesoft.org/schema/mule/core" xmlns:doc="http://www.mulesoft.org/schema/mule/documentation"
 xmlns:spring="http://www.springframework.org/schema/beans" 
 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-current.xsd
http://www.mulesoft.org/schema/mule/core http://www.mulesoft.org/schema/mule/core/current/mule.xsd
http://www.mulesoft.org/schema/mule/ee/dw http://www.mulesoft.org/schema/mule/ee/dw/current/dw.xsd
http://www.mulesoft.org/schema/mule/vm http://www.mulesoft.org/schema/mule/vm/current/mule-vm.xsd
http://www.mulesoft.org/schema/mule/http http://www.mulesoft.org/schema/mule/http/current/mule-http.xsd">
    <http:listener-config name="HTTP_Listener_Configuration" host="0.0.0.0" port="8081" doc:name="HTTP Listener Configuration"/>
    <flow name="dataweaveexampleFlow">
        <!-- <http:listener config-ref="HTTP_Listener_Configuration" path="/ab" doc:name="HTTP"/> -->
        <poll doc:name="Poll">
            <fixed-frequency-scheduler frequency="1000" />
            <expression-component doc:name="Expression">
             <![CDATA[
     dataweaveexample.User u1 = new dataweaveexample.User();
     u1.setName("DataWeave");
     u1.setEmail("contact@javaroots.com");
     u1.setRate(148.3385);
     dataweaveexample.User u2 = new dataweaveexample.User();
     u2.setName("Example");
     u2.setEmail("contact@javaroots.com");
     u2.setRate(28.3385);
     List list = new java.util.ArrayList();
     list.add(u1);
     list.add(u2);
     payload =  list;
   ]]>
            </expression-component> 
        </poll>
         <logger level="INFO" message="payload is :#[payload]" doc:name="Logger"/>
         <dw:transform-message doc:name="Transform Message">
         <dw:input-payload mimeType="application/java"/>
            <dw:set-payload><![CDATA[%dw 1.0
%output application/xml
---
{
  users:{(payload map {
   user:{
    name:$.name,
    email:$.email,
    count:$$+1,
    rate: $.rate as :string {format :"###,00"},
    flowValue:lookup("callFromAnotherFlow",$)
   }  
  })   
  }
}]]></dw:set-payload>
</dw:transform-message>
        <logger level="INFO" message="payload lodis is :#[payload]" doc:name="Logger"/>
    </flow>
    <flow name="callFromAnotherFlow">
        <set-payload value="Javaroots.com Mule ESB DataWeave Example" doc:name="Set Payload"/>
    </flow>
</mule>
Post Comments and Suggestions !!!



Tuesday, 1 August 2017

Textual Representation of logo

Domain not available error Cloudhub Redeployment Issue

While deploying an application which was deployed successfully to Cloudhub previously using maven, I got the following error :
[INFO] Deploying application MyApp-dev to Cloudhub
[ERROR] Domain MyApp-dev is not available. Aborting.
[ERROR] Failed to deploy MyApp-dev: Domain MyApp-dev is not available. Aborting.
org.mule.tools.maven.plugin.mule.DeploymentException: Domain MyApp-dev is not available. Aborting.
                at org.mule.tools.maven.plugin.mule.cloudhub.CloudhubDeployer.deploy(CloudhubDeployer.java:89)
                at org.mule.tools.maven.plugin.mule.DeployMojo.deployWithDeployer(DeployMojo.java:216)
                at org.mule.tools.maven.plugin.mule.DeployMojo.cloudhub(DeployMojo.java:193)
                at org.mule.tools.maven.plugin.mule.DeployMojo.doExecute(DeployMojo.java:179)
                at org.mule.tools.maven.plugin.mule.AbstractMuleMojo.execute(AbstractMuleMojo.java:176)
                at org.apache.maven.plugin.DefaultBuildPluginManager.executeMojo(DefaultBuildPluginManager.java:134)
                at org.apache.maven.lifecycle.internal.MojoExecutor.execute(MojoExecutor.java:207)
                at org.apache.maven.lifecycle.internal.MojoExecutor.execute(MojoExecutor.java:153)
                at org.apache.maven.lifecycle.internal.MojoExecutor.execute(MojoExecutor.java:145)
                at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject(LifecycleModuleBuilder.java:116)
                at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject(LifecycleModuleBuilder.java:80)
                at org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder.build(SingleThreadedBuilder.java:51)
                at org.apache.maven.lifecycle.internal.LifecycleStarter.execute(LifecycleStarter.java:128)
                at org.apache.maven.DefaultMaven.doExecute(DefaultMaven.java:307)
                at org.apache.maven.DefaultMaven.doExecute(DefaultMaven.java:193)
                at org.apache.maven.DefaultMaven.execute(DefaultMaven.java:106)
                at org.apache.maven.cli.MavenCli.execute(MavenCli.java:863)
                at org.apache.maven.cli.MavenCli.doMain(MavenCli.java:288)
                at org.apache.maven.cli.MavenCli.main(MavenCli.java:199)
                at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
                at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
                at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
                at java.lang.reflect.Method.invoke(Method.java:498)
                at org.codehaus.plexus.classworlds.launcher.Launcher.launchEnhanced(Launcher.java:289)
                at org.codehaus.plexus.classworlds.launcher.Launcher.launch(Launcher.java:229)
                at org.codehaus.plexus.classworlds.launcher.Launcher.mainWithExitCode(Launcher.java:415)
                at org.codehaus.plexus.classworlds.launcher.Launcher.main(Launcher.java:356)

This error comes only when the application already exists in Cloudhub. Ideally, it should just update the existing application in Cloudhub.
To rectify this error, you need to make sure that your application name in your pom file contains only lower case letters, because of a possible bug.
<applicationName>MyApp-${cloudHubAppSuffix}</applicationName>
So, the application name should be changed to :
<applicationName>myapp-${cloudHubAppSuffix}</applicationName>
Post Comments And Suggestions !!!

Wednesday, 19 July 2017

Textual Representation of logo

How to read and write XL Files in Java

Below Examples shows how to write and read XLSX and XLS files using Java .
package com.sun;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;

import org.apache.poi.openxml4j.opc.OPCPackage;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.xssf.usermodel.XSSFSheet;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;


/**
 * Created with IntelliJ IDEA.
 * User: javaroots
 * Date: 10/21/13
 * Time: 11:46 AM
 * To change this template use File | Settings | File Templates.
 */
public class XLSReader {

    public static void main(String[] args) throws IOException 
    {
        XSSFWorkbook workbook = new XSSFWorkbook();
     XSSFSheet sheet = workbook.createSheet("Employee Data");
        
        //This data needs to be written (Object[])
        Map data = new TreeMap();
        data.put("1", new Object[] {"ID", "NAME", "LASTNAME"});
        data.put("2", new Object[] {1, "Amit", "Shukla"});
        data.put("3", new Object[] {2, "Lokesh", "Gupta"});
        data.put("4", new Object[] {3, "John", "Adwards"});
        data.put("5", new Object[] {4, "Brian", "Schultz"});
        
        
        
        //Iterate over data and write to sheet
        Set keyset = data.keySet();
        int rownum = 0;
        for (String key : keyset)
        {
            Row row = sheet.createRow(rownum++);
            Object [] objArr = data.get(key);
            int cellnum = 0;
            for (Object obj : objArr)
            {
               Cell cell = row.createCell(cellnum++);
               if(obj instanceof String)
                    cell.setCellValue((String)obj);
                else if(obj instanceof Integer)
                    cell.setCellValue((Integer)obj);
            }
        }
        try
        {
           //Write the workbook in file system
            FileOutputStream out = new FileOutputStream(new File("C://xlFileMoreThan65000Test.xlsx"));
            
            workbook.write(out);
            out.close();
         
        }
        catch (Exception e)
        {
            e.printStackTrace();
        }
        
        
    }
}



package com.sun;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;

import org.apache.poi.openxml4j.opc.OPCPackage;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.xssf.usermodel.XSSFSheet;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;


/**
 * Created with IntelliJ IDEA.
 * User: javaroots
 * Date: 10/21/13
 * Time: 11:46 AM
 * To change this template use File | Settings | File Templates.
 */
public class XLSReader {

    public static void main(String[] args) throws IOException 
    {
       
        try
        {
           
            long startTime = System.currentTimeMillis();
            for(int i =0 ;i<10 ;i++)
            {
             FileInputStream in = new FileInputStream(new File("C://xlFileMoreThan65000Test.xlsx"));
                
              OPCPackage pkg = OPCPackage.open("C://xlFileMoreThan65000Test.xlsx");
             
             
             
                XSSFWorkbook workbook1 = new XSSFWorkbook(pkg);
                XSSFSheet sheet1 = workbook1.getSheetAt(0);
                int newRow = sheet1.getLastRowNum();
                System.out.println("last row is : " + newRow);
                for(int j=0;j <1000 ;j++)
                {
                 newRow = newRow + 1 ;
                    
                    Row row = sheet1.createRow(newRow);
                    
                    Object [] objArr = data.get("2");
                    
                    int cellnum = 0;
                    for (Object obj : objArr)
                    {
                     Cell cell = row.createCell(cellnum++);
                        cell.setCellValue("abc");
                    }
                }
                FileOutputStream out = new FileOutputStream(new File("C://xlFileMoreThan65000Test.xlsx"));
                workbook1.write(out);
                out.close();
                in.close();
                pkg.close();
            }
            
            System.out.println("Time taken is " + (System.currentTimeMillis()-startTime));
        }
        catch (Exception e)
        {
            e.printStackTrace();
        }
        
        
    }
}

Post Comments And Suggestions !!!

Wednesday, 12 July 2017

Textual description of firstImageUrl

How to enable DEBUG Logging In CloudHub

cloudHub ignores the local log4j.xml configuration and even if we enable DEBUG logging in the xml file , and deploy it in cloudhub , we see no changes .

To enable logging in cloud hub , we can add following configurations in out log4j .xml
<Log4J2CloudhubLogAppender name="CLOUDHUB"
                                   addressProvider="com.mulesoft.ch.logging.DefaultAggregatorAddressProvider"
                                   applicationContext="com.mulesoft.ch.logging.DefaultApplicationContext"
                                   appendRetryIntervalMs="${sys:logging.appendRetryInterval}"
                                   appendMaxAttempts="${sys:logging.appendMaxAttempts}"
                                   batchSendIntervalMs="${sys:logging.batchSendInterval}"
                                   batchMaxRecords="${sys:logging.batchMaxRecords}"
                                   memBufferMaxSize="${sys:logging.memBufferMaxSize}"
                                   journalMaxWriteBatchSize="${sys:logging.journalMaxBatchSize}"
                                   journalMaxFileSize="${sys:logging.journalMaxFileSize}"
                                   clientMaxPacketSize="${sys:logging.clientMaxPacketSize}"
                                   clientConnectTimeoutMs="${sys:logging.clientConnectTimeout}"
                                   clientSocketTimeoutMs="${sys:logging.clientSocketTimeout}"
                                   serverAddressPollIntervalMs="${sys:logging.serverAddressPollInterval}"
                                   serverHeartbeatSendIntervalMs="${sys:logging.serverHeartbeatSendIntervalMs}"
                                   statisticsPrintIntervalMs="${sys:logging.statisticsPrintIntervalMs}">

            <PatternLayout pattern="[%d{MM-dd HH:mm:ss}] %-5p %c{1} [%t] CUSTOM: %m%n"/>
        </Log4J2CloudhubLogAppender>

And add this appender along with your file appender .
<AppenderRef ref="CLOUDHUB"/>
Now you can deploy it on cloud hub and it should work fine .
You can also change Additional log levels and categories to include in logs in cloud hub .
Just click on Manage Application -> Settings -> Logging

There you can see logging levels , select desired logging level from drop down and write your package name , like com.mulesoft . And then click on apply changes . And those changes will be affected in your cloudhub logs



Provide Comments and Suggestions !!!

Thursday, 8 June 2017

Textual Representation of logo

How to make Poller Component Efficient In MULE ESB

Poller in MULE ESB is a very important component when you want to keep polling a endpoint for changes and then process it . Most of the times we do not know how big the changes will be and how much time will it take to complete . So setting polling interval becomes very crucial , because you do not want to trigger a new process before completing the existing task , and also you do not want to lose too much time waiting for the next trigger , if you complete the process earlier .

Following code snippet solves your problem , it achieves synchronous behavior and poller will wait if the process is taking some time , it also achieves asynchornous efficiency where if you have 10 results then 10 separate process will be executed and main poller flow will still wait for them to complete .


Code is self explanatory , i have used simple request reply and aggregator pattern , hope it helps .
<?xml version="1.0" encoding="UTF-8"?>

<mule xmlns:vm="http://www.mulesoft.org/schema/mule/vm" xmlns:scripting="http://www.mulesoft.org/schema/mule/scripting" xmlns:http="http://www.mulesoft.org/schema/mule/http" xmlns:metadata="http://www.mulesoft.org/schema/mule/metadata" xmlns:dw="http://www.mulesoft.org/schema/mule/ee/dw" xmlns:db="http://www.mulesoft.org/schema/mule/db" xmlns:batch="http://www.mulesoft.org/schema/mule/batch" xmlns:sfdc="http://www.mulesoft.org/schema/mule/sfdc" xmlns:tracking="http://www.mulesoft.org/schema/mule/ee/tracking" xmlns="http://www.mulesoft.org/schema/mule/core" xmlns:doc="http://www.mulesoft.org/schema/mule/documentation"
 xmlns:spring="http://www.springframework.org/schema/beans" 
 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 xsi:schemaLocation="http://www.mulesoft.org/schema/mule/vm http://www.mulesoft.org/schema/mule/vm/current/mule-vm.xsd
http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-current.xsd
http://www.mulesoft.org/schema/mule/core http://www.mulesoft.org/schema/mule/core/current/mule.xsd
http://www.mulesoft.org/schema/mule/ee/tracking http://www.mulesoft.org/schema/mule/ee/tracking/current/mule-tracking-ee.xsd
http://www.mulesoft.org/schema/mule/db http://www.mulesoft.org/schema/mule/db/current/mule-db.xsd
http://www.mulesoft.org/schema/mule/sfdc http://www.mulesoft.org/schema/mule/sfdc/current/mule-sfdc.xsd
http://www.mulesoft.org/schema/mule/batch http://www.mulesoft.org/schema/mule/batch/current/mule-batch.xsd
http://www.mulesoft.org/schema/mule/ee/dw http://www.mulesoft.org/schema/mule/ee/dw/current/dw.xsd
http://www.mulesoft.org/schema/mule/http http://www.mulesoft.org/schema/mule/http/current/mule-http.xsd
http://www.mulesoft.org/schema/mule/scripting http://www.mulesoft.org/schema/mule/scripting/current/mule-scripting.xsd">
    
    
    
    <flow name="exampleFlow" processingStrategy="synchronous" >
     
         <poll doc:name="Poll" >
          <fixed-frequency-scheduler frequency="10000" timeUnit="MILLISECONDS"></fixed-frequency-scheduler>
          <logger message="Upload tour poll started" level="DEBUG"></logger>
          
         </poll>
        
        <expression-component>
         payload = new java.util.ArrayList();
         payload.add("test");
         payload.add("test2");
        </expression-component>
        <choice >
         <when expression="#[payload.size()  &gt;0]">
          <request-reply doc:name="Request-Reply" >
               <vm:outbound-endpoint exchange-pattern="one-way" path="download_tour_vm" doc:name="VM">
                           <collection-splitter />
               </vm:outbound-endpoint>
               <vm:inbound-endpoint exchange-pattern="one-way" path="aggregated_download_tour_vm" doc:name="VM">
                           <collection-aggregator />
               </vm:inbound-endpoint>
    </request-reply>
           <logger message="COMPLETED EXAMPLE" level="INFO"/> 
         </when>
         <otherwise>
          <logger message="No Record To Process" level="INFO"/>
         </otherwise>
        
        </choice>   
        
    </flow>
    
    
    
    
    
    
    <flow name="downloadTour">
       <vm:inbound-endpoint  path="download_tour_vm" doc:name="VM"/>
       
       <scripting:component doc:name="Groovy">
            <scripting:script engine="Groovy"><![CDATA[sleep(20000);
return message.payload;]]></scripting:script>
        </scripting:component>
    </flow>

    
</mule>

Post comments and suggestions .!!