Giriş
Şu satırı dahil ederiz
import org.springframework.core.annotation.AliasFor;
Bir açıklama burada
import net.javacrumbs.shedlock.support..KeepAliveLockProvider;
KeepAliveLockProvider extends the lock in the middle of the lockAtMostFor interval. For example, if the lockAtMostFor is 10 minutes the lock is extended every 5 minutes for 10 minutes until the lock is released. Please note that the minimal lockAtMostFor time supported by this provider is 30s. The scheduler is used only for the lock extension, single thread should be enough.
@Configuration public class ShedlockConfiguration { @Bean public LockProvider lockProvider(DataSource dataSource) { return new KeepAliveLockProvider( getJdbcTemplateLockProvider(dataSource), Executors.newSingleThreadScheduledExecutor() ); } private JdbcTemplateLockProvider getJdbcTemplateLockProvider(DataSource dataSource) { return new JdbcTemplateLockProvider( JdbcTemplateLockProvider.Configuration.builder() .withJdbcTemplate(new JdbcTemplate(dataSource)) .withDbUpperCase(true) .usingDbTime() .build() ); } }
import org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient;
@Configuration public class ReactiveRestClientConfig extends AbstractReactiveElasticsearchConfiguration { @Override public ReactiveElasticsearchClient reactiveElasticsearchClient() { ClientConfiguration clientConfiguration = ClientConfiguration.builder() .connectedTo("localhost:9200") .build(); return ReactiveRestClients.create(clientConfiguration); } }
@Override
public Mono<String> deleteStudent(String id) {
return studentRepository.deleteById(id)
.thenReturn("Student deleted successfully!");
}
@Configurationpublic class RabbitMQConfig { public static final String QUEUE_NAME = "queue-name"; public static final String DLQ_NAME = "dlq-name"; public static final String DLX_NAME = "dlx-name"; @Bean public DirectExchange directExchange() { return new DirectExchange(DIRECT_EXCHANGE); } @Bean public Queue queue() { return new Queue(QUEUE_NAME, true); // Declare the queue as durable } @Bean public Binding binding() { return BindingBuilder.bind(queue()).to(directExchange()).with(QUEUE_NAME); } // DLQ create @Bean public Queue deadLetterQueue() { return new Queue(DLQ_NAME, true); // Declare the DLQ as durable } @Bean public DirectExchange deadLetterExchange() { return new DirectExchange(DLX_NAME); } @Bean public Binding deadLetterBinding() { return BindingBuilder.bind(deadLetterQueue()) .to(deadLetterExchange()).with(DLQ_NAME); } @Bean public Binding queueToDeadLetterExchangeBinding() { return BindingBuilder.bind(queue()) .to(deadLetterExchange()).with(QUEUE_NAME); } }
@RabbitListener(queues = "queue-name") public void handleMessage(String message) { try { // Process the incoming message if (someCondition) { throw new Exception("Simulated exception"); } // Message processing succeeded } catch (Exception e) { // Handle the exception or log it System.err.println("Error processing message: " + e.getMessage()); // Send the message to the DLQ rabbitTemplate.send("direct.exchange", "dlq-name", new Message(message.getBytes())); } }
Offset scrolling works like pagination, which returns expected results by skipping a certain number of records from a large result. While we only see a portion of the requested results, the server needs to build the full result, which causes additional load.
public List<BookReview> getBooksUsingOffset(String rating) {
// To keep track of the position in the result set.
OffsetScrollPosition offset = ScrollPosition.offset();
// Retrieves the first 5 books with the specified rating
Window<BookReview> bookReviews = bookRepository.findFirst5ByBookRating(rating, offset);
List<BookReview> bookReviewsResult = new ArrayList<>();
do {
// Adds each BookReview to result
bookReviews.forEach(bookReviewsResult::add);
// Retrieves the next batch of 5 books with the specified rating
bookReviews = bookRepository
.findFirst5ByBookRating(
rating,
(OffsetScrollPosition) bookReviews.positionAt(bookReviews.size() - 1));
} while (!bookReviews.isEmpty() && bookReviews.hasNext());
return bookReviewsResult;
}
public List<BookReview> getBooksUsingOffSetFilteringAndWindowIterator(String rating) {
WindowIterator<BookReview> bookReviews = WindowIterator.of(position ->
bookRepository
.findFirst5ByBookRating("3.5", (OffsetScrollPosition) position))
.startingAt(ScrollPosition.offset());
List<BookReview> bookReviewsResult = new ArrayList<>();
bookReviews.forEachRemaining(bookReviewsResult::add);
return bookReviewsResult;
}
Keyset filtering helps the retrieval of a subset of results using the built-in capabilities of the database aiming to reduce the computation and IO requirements for individual queries.The database only needs to construct smaller results from the given keyset position without materializing a large full result
public List<BookReview> getBooksUsingKeySetFiltering(String rating) {
WindowIterator<BookReview> bookReviews = WindowIterator.of(position ->
bookRepository
.findFirst5ByBookRating(rating, (KeysetScrollPosition) position))
.startingAt(ScrollPosition.keyset());
List<BookReview> bookReviewsResult = new ArrayList<>();
bookReviews.forEachRemaining(bookReviewsResult::add);
return bookReviewsResult;
}
History and EvolutionNATS was originally created by Derek Collison, who, inspired by his experiences with building messaging systems, aimed to craft a messaging solution that embodies simplicity and performance. Over time, as cloud architectures grew in popularity, NATS emerged as a popular choice, primarily because of its inherent characteristics that matched the requirements of cloud-native systems.
<dependency><groupId>io.nats</groupId><artifactId>java-nats</artifactId><version>latest_version</version></dependency>
docker run -d -p 4222:4222 -p 8222:8222 --name nats-main nats:latest Go to http://localhost:8222
@Configuration public class NatsConfig { @Value("${nats.url}") private String natsUrl; @Bean public Connection natsConnection() throws IOException, InterruptedException { Options options = new Options.Builder().server(natsUrl).build(); return Nats.connect(options); } }
@Autowired private Connection natsConnection; public void sendMessage(String subject, String message) { natsConnection.publish(subject, message.getBytes(StandardCharsets.UTF_8)); }
@Autowired private Connection natsConnection; public void subscribeToSubject(String subject) { natsConnection.subscribe(subject, msg -> { String receivedMessage = new String(msg.getData(), StandardCharsets.UTF_8); // Handle and process the received message }); }
import org.springframework.boot.web.embedded.netty.NettyServerCustomizer;
Reactor-Netty doesn’t have a default idle connection timeout.
@Configurationpublic class NettyServerCustomizerConfig {@Beanpublic NettyServerCustomizer nettyServerCustomizer() {return httpServer -> httpServer.idleTimeout(Duration.ofMillis(1));}}
server: netty: idle-timeout: 1000 # 1 second of idle-timeout