Merge pull request #11050 from izeye:polish-20171117

* pr/11050:
  Polish
pull/11054/merge
Stephane Nicoll 7 years ago
commit 347f63c77d

@ -36,7 +36,7 @@ public class AtlasProperties extends StepRegistryProperties {
private String uri;
/**
* Time to love for meters that do not have any activity. After this period the meter
* Time to live for meters that do not have any activity. After this period the meter
* will be considered expired and will not get reported.
*/
private Duration meterTimeToLive;

@ -192,7 +192,7 @@ public class ReactiveCloudFoundryActuatorAutoConfigurationTests {
}
@Test
public void allEndpointsAvailableUnderCloudFoundryWithoutEnablingWebInclues()
public void allEndpointsAvailableUnderCloudFoundryWithoutEnablingWebIncludes()
throws Exception {
setupContextWithCloudEnabled();
this.context.register(TestConfiguration.class);

@ -133,7 +133,7 @@ public class ExposeExcludePropertyEndpointFilterTests {
}
@Test
public void matchWhenDicovererDoesNotMatchShouldMatch() throws Exception {
public void matchWhenDiscovererDoesNotMatchShouldMatch() throws Exception {
this.environment.setProperty("foo.expose", "bar");
this.environment.setProperty("foo.exclude", "");
this.filter = new ExposeExcludePropertyEndpointFilter<>(

@ -63,6 +63,8 @@ import org.springframework.util.StringUtils;
public abstract class AnnotationEndpointDiscoverer<K, T extends Operation>
implements EndpointDiscoverer<T> {
private final Log logger = LogFactory.getLog(getClass());
private final ApplicationContext applicationContext;
private final Function<T, K> operationKeyFactory;
@ -270,15 +272,12 @@ public abstract class AnnotationEndpointDiscoverer<K, T extends Operation>
if (msg == null || msg.startsWith(endpointInfo.getClass().getName())) {
// Possibly a lambda-defined listener which we could not resolve the
// generic event type for
Log logger = LogFactory.getLog(getClass());
if (logger.isDebugEnabled()) {
logger.debug("Non-matching info type for lister: " + filter, ex);
if (this.logger.isDebugEnabled()) {
this.logger.debug("Non-matching info type for filter: " + filter, ex);
}
return false;
}
else {
throw ex;
}
throw ex;
}
}

@ -37,7 +37,7 @@ import static org.mockito.BDDMockito.given;
import static org.mockito.Mockito.verify;
/**
* Tests fopr {@link CachingOperationInvokerAdvisor}.
* Tests for {@link CachingOperationInvokerAdvisor}.
*
* @author Phillip Webb
*/

@ -48,7 +48,7 @@ public class MessageSourceProperties {
/**
* Set whether to fall back to the system Locale if no files for a specific Locale
* have been found. if this is turned off, the only fallback will be the default file
* have been found. If this is turned off, the only fallback will be the default file
* (e.g. "messages.properties" for basename "messages").
*/
private boolean fallbackToSystemLocale = true;

@ -1258,7 +1258,7 @@ content into your application; rather pick only the properties that you need.
spring.metrics.export.atlas.enabled= # Enable publishing to the backend.
spring.metrics.export.atlas.eval-uri= # URI for the Atlas LWC endpoint to evaluate the data for a subscription.
spring.metrics.export.atlas.lwc-enabled= # Enable streaming to Atlas LWC.
spring.metrics.export.atlas.meter-time-to-live= # Time to love for meters that do not have any activity. After this period the meter will be considered expired and will not get reported.
spring.metrics.export.atlas.meter-time-to-live= # Time to live for meters that do not have any activity. After this period the meter will be considered expired and will not get reported.
spring.metrics.export.atlas.num-threads= # Number of threads to use with the metrics publishing scheduler.
spring.metrics.export.atlas.read-timeout= # Read timeout for requests to the backend.
spring.metrics.export.atlas.step=1 # Step size (i.e. reporting frequency) to use.

@ -231,7 +231,7 @@ Your application should now be up and running on Heroku.
[[cloud-deployment-openshift]]
=== OpenShift
https://www.openshift.com/[OpenShift] is the Red Hat public (and enterprise) extension of
the Kubernetes container orchestration platform. Similarly to Kubernetes, OpenShift has
the Kubernetes container orchestration platform. Similarly to Kubernetes, OpenShift has
many options for installing Spring Boot based applications.
OpenShift has many resources describing how to deploy Spring Boot applications, which

@ -40,7 +40,7 @@ Phillip Webb; Dave Syer; Josh Long; Stéphane Nicoll; Rob Winch; Andy Wilkinson;
:spring-reference: http://docs.spring.io/spring/docs/{spring-docs-version}/spring-framework-reference/
:spring-security-reference: http://docs.spring.io/spring-security/site/docs/{spring-security-docs-version}/reference/htmlsingle
:spring-security-oauth2-reference: http://projects.spring.io/spring-security-oauth/docs/oauth2.html
:spring-webservices-reference: http://docs.spring.io/spring-ws/docs/{spring-webservices-docs-version}/reference/htmlsingle
:spring-webservices-reference: http://docs.spring.io/spring-ws/docs/{spring-webservices-docs-version}/reference/
:spring-javadoc: http://docs.spring.io/spring/docs/{spring-docs-version}/javadoc-api/org/springframework
:spring-amqp-javadoc: http://docs.spring.io/spring-amqp/docs/current/api/org/springframework/amqp
:spring-batch-javadoc: http://docs.spring.io/spring-batch/apidocs/org/springframework/batch

@ -181,7 +181,7 @@ register an `EndpointFilter` bean.
=== Securing HTTP Endpoints
You should take care to secure HTTP endpoints in the same way that you would any other
sensitive URL. Spring Boot will not apply any security on your behalf, however, it does
provide some convenient `ReqestMatchers` that can be used in combination with Spring
provide some convenient `RequestMatcher`s that can be used in combination with Spring
Security.
A typical Spring Security configuration could look something like this:
@ -203,7 +203,7 @@ A typical Spring Security configuration could look something like this:
----
The above uses `EndpointRequest.toAnyEndpoint()` to match a request to any endpoint, then
ensure that thet all have the `ENDPOINT_ADMIN` role. Several other matcher methods are
ensure that all have the `ENDPOINT_ADMIN` role. Several other matcher methods are
also available on `EndpointRequest` (see the API documentation for details).
If you deploy applications behind a firewall, you may prefer that all your actuator
@ -221,7 +221,7 @@ endpoints can be accessed without requiring authentication. You can do so by cha
[[production-ready-customizing-endpoints]]
=== Customizing Endpoints
Endpoints can be customized by using Spring properties. You can change whether an
endpoint is `enabled` and the amount of time it will cache reponses.
endpoint is `enabled` and the amount of time it will cache responses.
For example, the following `application.properties` changes the time-to-live of the
`beans` endpoint and also enables `shutdown`:
@ -242,7 +242,7 @@ disable _all_ endpoints except for `info`:
[source,properties,indent=0]
----
management.endpoints.enabled-by-default=flase
management.endpoints.enabled-by-default=false
management.endpoint.info.enabled=true
----
@ -308,7 +308,7 @@ TIP: See {sc-spring-boot-actuator-autoconfigure}/endpoint/web/servlet/CorsEndpoi
[[production-ready-customizing-endpoints-programmatically]]
=== Adding Custom Endpoints
If you add a `@Bean` annotated with `@Endpoint`, any methods annotated with
`@ReadOperation`, `@WriteOperation` or `@DeleteOperaion` are automatically exposed over
`@ReadOperation`, `@WriteOperation` or `@DeleteOperation` are automatically exposed over
JMX and, in a web application, over HTTP as well.
You can also write technology specific endpoints by using `@JmxEndpoint` or

@ -574,7 +574,7 @@ following:
Alternatively, when custom config locations are configured by using
`spring.config.addition-location`, they are used in addition to the default locations.
Additional locations are search before the default locations. For example, if
Additional locations are searched before the default locations. For example, if
additional locations of `classpath:/custom-config/,file:./custom-config/` are configured,
the search order becomes the following:

@ -174,7 +174,7 @@ public class EntityManagerFactoryBuilder {
* <p>
* Note that mapping resources must be relative to the classpath root, e.g.
* "META-INF/mappings.xml" or "com/mycompany/repository/mappings.xml", so that
* they can be loaded through {@code ClassLoader.getResource}.
* they can be loaded through {@code ClassLoader.getResource()}.
* @param mappingResources the mapping resources to use
* @return the builder for fluent usage
*/

@ -72,7 +72,7 @@ TIP: Run sample cache application using EhCache with
=== Hazelcast
Both `com.hazelcast:hazelcast` and `com.hazelcast:hazelcast-spring` should be added
to the project to enable support for Hazelcast. Since there is a default
to the project to enable support for Hazelcast. Since there is a default
`hazelcast.xml` configuration file at the root of the classpath, it is used to
automatically configure the underlying `HazelcastInstance`.
@ -118,4 +118,4 @@ for Caffeine. You can customize how caches are created in different ways, see
`application.properties` for an example and the documentation for more details.
TIP: Run sample cache application using Caffeine with
`$mvn spring-boot:run -Pcaffeine`.
`$mvn spring-boot:run -Pcaffeine`.

Loading…
Cancel
Save