diff --git a/.yardopts b/.yardopts index 77e7d3bb4..a778ea15d 100644 --- a/.yardopts +++ b/.yardopts @@ -5,10 +5,12 @@ --title=Concurrent Ruby --template default --template-path ./yard-template +--default-return undocumented ./lib/**/*.rb ./ext/concurrent_ruby_ext/**/*.c - doc/thread_pools.md +doc/promises.out.md README.md LICENSE.txt diff --git a/Gemfile b/Gemfile index 3ac928d82..20032a19d 100644 --- a/Gemfile +++ b/Gemfile @@ -4,18 +4,20 @@ gemspec name: 'concurrent-ruby' gemspec name: 'concurrent-ruby-edge' group :development do - gem 'rake', '~> 10.4.2' + gem 'rake', '~> 10.0' gem 'rake-compiler', '~> 0.9.5' gem 'rake-compiler-dock', '~> 0.4.3' gem 'gem-compiler', '~> 0.3.0' - gem 'benchmark-ips', '~> 2.2.0' + gem 'benchmark-ips', '~> 2.7' # documentation gem 'countloc', '~> 0.4.0', :platforms => :mri, :require => false - gem 'yard', '~> 0.8.7.6', :require => false + gem 'yard', '~> 0.8.0', :require => false # TODO (pitr-ch 15-Oct-2016): does not work on 1.9.3 anymore - gem 'inch', '~> 0.6.3', :platforms => :mri, :require => false - gem 'redcarpet', '~> 3.3.2', platforms: :mri # understands github markdown + # TODO remove, reports private classes as undocumented + gem 'inch', '~> 0.7.0', :platforms => :mri, :require => false + gem 'redcarpet', '~> 3.3', platforms: :mri # understands github markdown + gem 'md-ruby-eval' end group :testing do diff --git a/README.md b/README.md index a74795513..d74a84b1a 100644 --- a/README.md +++ b/README.md @@ -9,39 +9,28 @@ [![License](https://img.shields.io/badge/license-MIT-green.svg)](http://opensource.org/licenses/MIT) [![Gitter chat](https://img.shields.io/badge/IRC%20(gitter)-devs%20%26%20users-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) - - - - - -
-

- Modern concurrency tools for Ruby. Inspired by - Erlang, - Clojure, - Scala, - Haskell, - F#, - C#, - Java, - and classic concurrency patterns. -

-

- The design goals of this gem are: -

    -
  • Be an 'unopinionated' toolbox that provides useful utilities without debating which is better or why
  • -
  • Remain free of external gem dependencies
  • -
  • Stay true to the spirit of the languages providing inspiration
  • -
  • But implement in a way that makes sense for Ruby
  • -
  • Keep the semantics as idiomatic Ruby as possible
  • -
  • Support features that make sense in Ruby
  • -
  • Exclude features that don't make sense in Ruby
  • -
  • Be small, lean, and loosely coupled
  • -
-

-
- -
+Modern concurrency tools for Ruby. Inspired by +[Erlang](http://www.erlang.org/doc/reference_manual/processes.html), +[Clojure](http://clojure.org/concurrent_programming), +[Scala](http://akka.io/), +[Haskell](http://www.haskell.org/haskellwiki/Applications_and_libraries/Concurrency_and_parallelism#Concurrent_Haskell), +[F#](http://blogs.msdn.com/b/dsyme/archive/2010/02/15/async-and-parallel-design-patterns-in-f-part-3-agents.aspx), +[C#](http://msdn.microsoft.com/en-us/library/vstudio/hh191443.aspx), +[Java](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/package-summary.html), +and classic concurrency patterns. + + + +The design goals of this gem are: + +* Be an 'unopinionated' toolbox that provides useful utilities without debating which is better or why +* Remain free of external gem dependencies +* Stay true to the spirit of the languages providing inspiration +* But implement in a way that makes sense for Ruby +* Keep the semantics as idiomatic Ruby as possible +* Support features that make sense in Ruby +* Exclude features that don't make sense in Ruby +* Be small, lean, and loosely coupled ### Supported Ruby versions @@ -127,13 +116,13 @@ These features are under active development and may change frequently. They are keep backward compatibility (there may also lack tests and documentation). Semantic versions will be obeyed though. Features developed in `concurrent-ruby-edge` are expected to move to `concurrent-ruby` when final. -* [Actor](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Actor.html): - Implements the Actor Model, where concurrent actors exchange messages. -* [New Future Framework](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/FutureShortcuts.html): +* [Promises Framework](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Promises.html): Unified implementation of futures and promises which combines features of previous `Future`, `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and `TimerTask` into a single framework. It extensively uses the new synchronization layer to make all the features **non-blocking** and **lock-free**, with the exception of obviously blocking operations like `#wait`, `#value`. It also offers better performance. +* [Actor](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Actor.html): + Implements the Actor Model, where concurrent actors exchange messages. * [Channel](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/Channel.html): Communicating Sequential Processes ([CSP](https://en.wikipedia.org/wiki/Communicating_sequential_processes)). Functionally equivalent to Go [channels](https://tour.golang.org/concurrency/2) with additional @@ -141,15 +130,16 @@ be obeyed though. Features developed in `concurrent-ruby-edge` are expected to m * [LazyRegister](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/LazyRegister.html) * [AtomicMarkableReference](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/AtomicMarkableReference.html) * [LockFreeLinkedSet](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/LockFreeLinkedSet.html) -* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge/LockFreeStack.html) +* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/LockFreeStack.html) #### Statuses: *Why are these not in core?* +- **Promises Framework** - They are being finalized to be able to be moved to core. They'll deprecate old + implementation. - **Actor** - Partial documentation and tests; depends on new future/promise framework; stability is good. - **Channel** - Brand new implementation; partial documentation and tests; stability is good. -- **Future/Promise Framework** - API changes; partial documentation and tests; stability is good. - **LazyRegister** - Missing documentation and tests. - **AtomicMarkableReference, LockFreeLinkedSet, LockFreeStack** - Need real world battle testing. diff --git a/doc/future-promise.md b/doc/future-promise.md deleted file mode 100644 index 5be5a6983..000000000 --- a/doc/future-promise.md +++ /dev/null @@ -1,12 +0,0 @@ -# Futures and Promises - -New implementation added in version 0.8 differs from previous versions and has little in common. -{Future} represents a value which will become {#completed?} in future, it'll contain {#value} if {#success?} or a {#reason} if {#failed?}. It cannot be directly completed, there are implementations of abstract {Promise} class for that, so {Promise}'s only purpose is to complete a given {Future} object. They are always constructed as a Pair even in chaining methods like {#then}, {#rescue}, {#then_delay}, etc. - -There is few {Promise} implementations: - -- OuterPromise - only Promise used by users, can be completed by outer code. Constructed with {Concurrent::Next.promise} helper method. -- Immediate - internal implementation of Promise used to represent immediate evaluation of a block. Constructed with {Concurrent::Next.future} helper method. -- Delay - internal implementation of Promise used to represent delayed evaluation of a block. Constructed with {Concurrent::Next.delay} helper method. -- ConnectedPromise - used internally to support {Future#with_default_executor} - diff --git a/doc/promises-main.md b/doc/promises-main.md new file mode 100644 index 000000000..526b2405c --- /dev/null +++ b/doc/promises-main.md @@ -0,0 +1,61 @@ +Promises is a new framework unifying former tools `Concurrent::Future`, +`Concurrent::Promise`, `Concurrent::IVar`, `Concurrent::Event`, +`Concurrent.dataflow`, `Delay`, and `TimerTask` of concurrent-ruby. It +extensively uses the new synchronization layer to make all the methods +*lock-free* (with the exception of obviously blocking operations like `#wait`, +`#value`, etc.). As a result it lowers danger of deadlocking and offers +better performance. + +It provides similar tools as other promise libraries do, users coming from +other languages and other promise libraries will find the same tools here +(probably named differently though). The naming conventions were borrowed +heavily from JS promises. + +This framework, however, is not just a re-implementation of other promise +library, it draws inspiration from many other promise libraries, adds new +ideas, and is integrated with other abstractions like actors and channels. + +Therefore it is likely that user will find a suitable solution for a problem in +this framework. If the problem is simple user can pick one suitable +abstraction, e.g. just promises or actors. If the problem is complex user can +combine parts (promises, channels, actors) which were designed to work together +well to a solution. Rather than having to combine fragilely independent tools. + +This framework allows its users to: + +- Process tasks asynchronously +- Chain, branch, and zip the asynchronous tasks together + - Therefore, to create directed acyclic graph (hereafter DAG) of tasks +- Create delayed tasks (or delayed DAG of tasks) +- Create scheduled tasks (or delayed DAG of tasks) +- Deal with errors through rejections +- Reduce danger of deadlocking +- Control the concurrency level of tasks +- Simulate thread-like processing without occupying threads + - It allows to create tens of thousands simulations on one thread + pool + - It works well on all Ruby implementations +- Use actors to maintain isolated states and to seamlessly combine + it with promises +- Build parallel processing stream system with back + pressure (parts, which are not keeping up, signal to the other parts of the + system to slow down). + +**The guide is best place to start with promises, see** +**{file:doc/promises.out.md}.** + +# Main classes + +The main public user-facing classes are {Concurrent::Promises::Event} and +{Concurrent::Promises::Future} which share common ancestor +{Concurrent::Promises::AbstractEventFuture}. + +**{Concurrent::Promises::AbstractEventFuture}:** +> {include:Concurrent::Promises::AbstractEventFuture} + +**{Concurrent::Promises::Event}:** +> {include:Concurrent::Promises::Event} + +**{Concurrent::Promises::Future}:** +> {include:Concurrent::Promises::Future} + diff --git a/doc/promises.in.md b/doc/promises.in.md new file mode 100644 index 000000000..0d1d06614 --- /dev/null +++ b/doc/promises.in.md @@ -0,0 +1,963 @@ +# Basics + +## Factory methods + +Future and Event are created indirectly with constructor methods in +FactoryMethods. They are not designed for inheritance but rather for +composition. + +```ruby +Concurrent::Promises::FactoryMethods.instance_methods +``` + +The module can be included or extended where needed. + +```ruby +Class.new do + include Concurrent::Promises::FactoryMethods + + def a_method + resolvable_event + end +end.new.a_method + +Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event +``` + +The module is already extended into {Concurrent::Promises} for convenience. + +```ruby +Concurrent::Promises.resolvable_event +``` + +For this guide we introduce a shortcut in `main` so we can call the factory +methods in following examples by using `Promisses` directly. + +```ruby +Promises = Concurrent::Promises # +Promises.resolvable_event +``` + +## Asynchronous task + +The most basic use-case of the framework is asynchronous processing. A task can +be processed asynchronously by using a `future` factory method. The block will +be executed on an internal thread pool. + +Arguments of `future` are passed to the block and evaluation starts immediately. + +```ruby +future = Promises.future(0.1) do |duration| + sleep duration + :result +end +``` + +Asks if the future is resolved, here it will be still in the middle of the +sleep call. + +```ruby +future.resolved? +``` + +Retrieving the value will block until the future is resolved. + +```ruby +future.value +future.resolved? +``` + +If the task fails we talk about the future being rejected. + +```ruby +future = Promises.future { raise 'Boom' } +``` + +There is no result, the future was rejected with a reason. + +```ruby +future.value +future.reason +``` + +It can be forced to raise the reason for rejection when retrieving the value. + +```ruby +begin + future.value! +rescue => e + e +end +``` + +Which is the same as `future.value! rescue $!` which will be used hereafter. + +Or it can be used directly as argument for raise, since it implements exception +method. + +```ruby +raise future rescue $! +``` + +## States + +Lets define a inspection helper for methods. + +```ruby +def inspect_methods(*methods, of:) + methods.reduce({}) { |h, m| h.update m => of.send(m) } +end # +``` + +Event has `pending` and `resolved` state. + +```ruby +event = Promises.resolvable_event # +inspect_methods(:state, :pending?, :resolved?, of: event) + +event.resolve # +inspect_methods(:state, :pending?, :resolved?, of: event) +``` + +Future's `resolved` state is further specified to be `fulfilled` or `rejected`. + +```ruby +future = Promises.resolvable_future # +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + of: future) + +future.fulfill :value # +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + :result, :value, :reason, of: future) + +future = Promises.rejected_future StandardError.new # +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + :result, :value, :reason, of: future) +``` + +## Direct creation of resolved futures + +When an existing value has to wrapped in a future it does not have to go +through evaluation as follows. + +```ruby +Promises.future { :value } +``` + +Instead it can be created directly. + +```ruby +Promises.fulfilled_future(:value) +Promises.rejected_future(StandardError.new('Ups')) +Promises.resolved_future(true, :value, nil) +Promises.resolved_future(false, nil, StandardError.new('Ups')) +``` + +## Chaining + +Big advantage of promises is ability to chain tasks together without blocking +current thread. + +```ruby +Promises. + future(2) { |v| v.succ }. + then(&:succ). + value! +``` + +As `future` factory method takes argument, `then` method takes as well. Any +supplied arguments are passed to the block, and the library ensures that they +are visible to the block. + +```ruby +Promises. + future('3') { |s| s.to_i }. + then(2) { |v, arg| v + arg }. + value +Promises. + fulfilled_future('3'). + then(&:to_i). + then(2, &:+). + value +Promises. + fulfilled_future(1). + chain(2) { |fulfilled, value, reason, arg| value + arg }. + value +``` + +Passing the arguments in (similarly as for a thread `Thread.new(arg) { |arg| +do_stuff arg }`) is **required**, both following examples may break. + +```ruby +arg = 1 +Thread.new { do_stuff arg } +Promises.future { do_stuff arg } +``` + +## Branching, and zipping + +Besides chaining it can also be branched. + +```ruby +head = Promises.fulfilled_future -1 # +branch1 = head.then(&:abs) # +branch2 = head.then(&:succ).then(&:succ) # + +branch1.value! +branch2.value! +``` + +It can be combined back to one future by zipping (`zip`, `&`). + +```ruby +branch1.zip(branch2).value! +(branch1 & branch2). + then { |a, b| a + b }. + value! +(branch1 & branch2). + then(&:+). + value! +Promises. + zip(branch1, branch2, branch1). + then { |*values| values.reduce(&:+) }. + value! +``` + +Instead of zipping only the first one can be taken if needed. + +```ruby +Promises.any(branch1, branch2).value! +(branch1 | branch2).value! +``` + +## Blocking methods + +In these examples we have used blocking methods like `value` extensively for +their convenience, however in practice is better to avoid them and continue +chaining. + +If they need to be used (e.g. when integrating with threads), `value!` is a +better option over `value` when rejections are not dealt with differently. +Otherwise the rejection are not handled and probably silently forgotten. + +## Error handling + +When one of the tasks in the chain fails, the rejection propagates down the +chain without executing the tasks created with `then`. + +```ruby +Promises. + fulfilled_future(Object.new). + then(&:succ). + then(&:succ). + result +``` + +As `then` chained tasks execute only on fulfilled futures, there is a `rescue` +method which chains a task which is executed only when the future is rejected. +It can be used to recover from rejection. + +Using rescue to fulfill to 0 instead of the error. + +```ruby +Promises. + fulfilled_future(Object.new). + then(&:succ). + then(&:succ). + rescue { |err| 0 }. + result +``` + +Rescue not executed when there is no rejection. + +```ruby +Promises. + fulfilled_future(1). + then(&:succ). + then(&:succ). + rescue { |e| 0 }. + result +``` + +Tasks added with `chain` are evaluated always. + +```ruby +Promises. + fulfilled_future(1). + chain { |fulfilled, value, reason| fulfilled ? value : reason }. + value! +Promises. + rejected_future(StandardError.new('Ups')). + chain { |fulfilled, value, reason| fulfilled ? value : reason }. + value! +``` + +Zip is rejected if any of the zipped futures is. + +```ruby +rejected_zip = Promises.zip( + Promises.fulfilled_future(1), + Promises.rejected_future(StandardError.new('Ups'))) +rejected_zip.result +rejected_zip. + rescue { |reason1, reason2| (reason1 || reason2).message }. + value +``` + +## Delayed futures + +Delayed futures will not evaluate until asked by `touch` or other method +requiring resolution. + +```ruby +future = Promises.delay { sleep 0.1; 'lazy' } +sleep 0.1 # +future.resolved? +future.touch +sleep 0.2 # +future.resolved? +``` + +All blocking methods like `wait`, `value` call `touch` and trigger evaluation. + +```ruby +Promises.delay { :value }.value +``` + +It propagates trough chain up allowing whole or partial lazy chains. + +```ruby +head = Promises.delay { 1 } # +branch1 = head.then(&:succ) # +branch2 = head.delay.then(&:succ) # +join = branch1 & branch2 # + +sleep 0.1 # +``` + +Nothing resolves. + +```ruby +[head, branch1, branch2, join].map(&:resolved?) +``` + +Force `branch1` evaluation. + +```ruby +branch1.value +sleep 0.1 # +[head, branch1, branch2, join].map(&:resolved?) +``` + +Force evaluation of both by calling `value` on `join`. + +```ruby +join.value +[head, branch1, branch2, join].map(&:resolved?) +``` + +## Flatting + +Sometimes it is needed to wait for a inner future. Apparent solution is to wait +inside the future `Promises.future { Promises.future { 1+1 }.value }.value` +however as mentioned before, `value` calls should be **avoided** to avoid +blocking threads. Therefore there is a flat method which is a correct solution +in this situation and does not block any thread. + +```ruby +Promises.future { Promises.future { 1+1 } }.flat.value! +``` + +A more complicated example. +```ruby +Promises. + future { Promises.future { Promises.future { 1 + 1 } } }. + flat(1). + then { |future| future.then(&:succ) }. + flat(1). + value! +``` + +## Scheduling + +Tasks can be planned to be executed with a time delay. + +Schedule task to be executed in 0.1 seconds. + +```ruby +scheduled = Promises.schedule(0.1) { 1 } +scheduled.resolved? +``` + +Value will become available after 0.1 seconds. + +```ruby +scheduled.value +``` + +It can be used in the chain as well, where the delay is counted form a moment +its parent resolves. Therefore following future will be resolved in 0.2 seconds. + +```ruby +future = Promises. + future { sleep 0.1; :result }. + schedule(0.1). + then(&:to_s). + value! +``` + +Time can be used as well. + +```ruby +Promises.schedule(Time.now + 10) { :val } +``` + +## Resolvable Future and Event: + +Sometimes it is required to resolve a future externally, in these cases +`resolvable_future` and `resolvable_event` factory methods can be uses. See +{Concurrent::Promises::ResolvableFuture} and +{Concurrent::Promises::ResolvableEvent}. + +```ruby +future = Promises.resolvable_future +``` + +The thread will be blocked until the future is resolved + +```ruby +thread = Thread.new { future.value } # +future.fulfill 1 +thread.value +``` + +Future can be resolved only once. + +```ruby +future.fulfill 1 rescue $! +future.fulfill 2, false +``` + +## How are promises executed? + +Promises use global pools to execute the tasks. Therefore each task may run on +different thread which implies that users have to be careful not to depend on +Thread local variables (or they have to set at the begging of the task and +cleaned up at the end of the task). + +Since the tasks are running on may different threads of the thread pool, it's +better to follow following rules: + +- Use only data passed in through arguments or values of parent futures, to + have better control over what are futures accessing. +- The data passed in and out of futures are easier to deal with if they are + immutable or at least treated as such. +- Any mutable and mutated object accessed by more than one threads or futures + must be thread safe, see {Concurrent::Array}, {Concurrent::Hash}, and + {Concurrent::Map}. (Value of a future may be consumed by many futures.) +- Futures can access outside objects, but they has to be thread-safe. + +> *TODO: This part to be extended* + +# Advanced + +## Callbacks + +```ruby +queue = Queue.new +future = Promises.delay { 1 + 1 } + +future.on_fulfillment { queue << 1 } # evaluated asynchronously +future.on_fulfillment! { queue << 2 } # evaluated on resolving thread + +queue.empty? +future.value +queue.pop +queue.pop +``` + +## Using executors + +Factory methods, chain, and callback methods have all other version of them +which takes executor argument. + +It takes an instance of an executor or a symbol which is a shortcuts for the +two global pools in concurrent-ruby. `fast` for short and non-blocking tasks +and `:io` for blocking and long tasks. + +```ruby +Promises.future_on(:fast) { 2 }. + then_on(:io) { File.read __FILE__ }. + value.size +``` + +## Run (simulated process) + +Similar to flatting is running. When `run` is called on a future it will flat +indefinitely as long the future fulfils into a `Future` value. It can be used +to simulate a thread like processing without actually occupying the thread. + +```ruby +count = lambda do |v| + v += 1 + v < 5 ? Promises.future_on(:fast, v, &count) : v +end +400.times. + map { Promises.future_on(:fast, 0, &count).run.value! }. + all? { |v| v == 5 } +``` + +Therefore the above example finished fine on the the `:fast` thread pool even +though it has much less threads than there is the simulated process. + +# Interoperability + +## Actors + +Create an actor which takes received numbers and returns the number squared. + +```ruby +actor = Concurrent::Actor::Utils::AdHoc.spawn :square do + -> v { v ** 2 } +end +``` + +Send result of `1+1` to the actor, and add 2 to the result send back from the +actor. + +```ruby +Promises. + future { 1 + 1 }. + then_ask(actor). + then { |v| v + 2 }. + value! +``` + +So `(1 + 1)**2 + 2 = 6`. + +The `ask` method returns future. + +```ruby +actor.ask(2).then(&:succ).value! +``` +## ProcessingActor + +> *TODO: Documentation to be added in few days* + +## Channel + +There is an implementation of channel as well. Lets start by creating a +channel with capacity 2 messages. + +```ruby +ch1 = Concurrent::Promises::Channel.new 2 +``` + +We push 3 messages, it can be observed that the last future representing the +push is not fulfilled since the capacity prevents it. When the work which fills +the channel depends on the futures created by push it can be used to create +back pressure – the filling work is delayed until the channel has space for +more messages. + +```ruby +pushes = 3.times.map { |i| ch1.push i } +ch1.pop.value! +pushes +``` + +A selection over channels can be created with select_channel factory method. It +will be fulfilled with a first message available in any of the channels. It +returns a pair to be able to find out which channel had the message available. + +```ruby +ch2 = Concurrent::Promises::Channel.new 2 +result = Concurrent::Promises.select_channel(ch1, ch2) +result.value! + +Promises.future { 1+1 }.then_push_channel(ch1) +result = ( + Concurrent::Promises.fulfilled_future('%02d') & + Concurrent::Promises.select_channel(ch1, ch2)). + then { |format, (channel, value)| format format, value } +result.value! +``` + +# Use-cases + +## Simple background processing + +```ruby +Promises.future { do_stuff } +``` + +## Parallel background processing + +```ruby +tasks = 4.times.map { |i| Promises.future(i) { |i| i*2 } } +Promises.zip(*tasks).value! +``` + +## Actor background processing + +Actors are mainly keep and isolate state, they should stay responsive not being +blocked by a longer running computations. It desirable to offload the work to +stateless promises. + +Lets define an actor which will process jobs, while staying responsive, and +tracking the number of tasks being processed. + +```ruby +class Computer < Concurrent::Actor::RestartingContext + def initialize + super() + @jobs = {} + end + + def on_message(msg) + command, *args = msg + case command + # new job to process + when :run + job = args[0] + @jobs[job] = envelope.future + # Process asynchronously and send message back when done. + Concurrent::Promises.future(&job).chain(job) do |fulfilled, value, reason, job| + self.tell [:done, job, fulfilled, value, reason] + end + # Do not make return value of this method to be answer of this message. + # We are answering later in :done by resolving the future kept in @jobs. + Concurrent::Actor::Behaviour::MESSAGE_PROCESSED + when :done + job, fulfilled, value, reason = *args + future = @jobs.delete job + # Answer the job's result. + future.resolve fulfilled, value, reason + when :status + { running_jobs: @jobs.size } + else + # Continue to fail with unknown message. + pass + end + end +end +``` + +Create the computer actor and send it 3 jobs. + +```ruby +computer = Concurrent::Actor.spawn Computer, :computer +results = 3.times.map { computer.ask [:run, -> { sleep 0.1; :result }] } +computer.ask(:status).value! +results.map(&:value!) +``` +## Solving the Thread count limit by thread simulation + +Sometimes an application requires to process a lot of tasks concurrently. If +the number of concurrent tasks is high enough than it is not possible to create +a Thread for each of them. A partially satisfactory solution could be to use +Fibers, but that solution locks the application on MRI since other Ruby +implementations are using threads for each Fiber. + +This library provides a {Concurrent::Promises::Future#run} method on a future +to simulate threads without actually accepting one all the time. The run method +is similar to {Concurrent::Promises::Future#flat} but it will keep flattening +until it's fulfilled with non future value, then the value is taken as a result +of the process simulated by `run`. + +```ruby +body = lambda do |v| + # Some computation step of the process + new_v = v + 1 + # Is the process finished? + if new_v < 5 + # Continue computing with new value, does not have to be recursive. + # It just has to return a future. + Promises.future(new_v, &body) + else + # The process is finished, fulfill the final value with `new_v`. + new_v + end +end +Promises.future(0, &body).run.value! # => 5 +``` + +This solution works well an any Ruby implementation. + +> *TODO: More examples to be added.* + +## Cancellation + +### Simple + +Lets have two processes which will count until cancelled. + +```ruby +source, token = Concurrent::Cancellation.create + +count_until_cancelled = -> token, count do + if token.canceled? + count + else + Promises.future token, count+1, &count_until_cancelled + end +end # + +futures = Array.new(2) do + Promises.future(token, 0, &count_until_cancelled).run +end + +sleep 0.01 # +source.cancel +futures.map(&:value!) +``` + +Cancellation can also be used as event or future to log or plan re-execution. + +```ruby +token.to_event.chain do + # log cancellation + # plane re-execution +end +``` + +### Parallel background processing with cancellation + +Each task tries to count to 1000 but there is a randomly failing test. The +tasks share a cancellation, when one of them fails it cancels the others. + +```ruby +source, token = Concurrent::Cancellation.create +tasks = 4.times.map do |i| + Promises.future(source, token, i) do |source, token, i| + count = 0 + 1000.times do + break count = :cancelled if token.canceled? + count += 1 + sleep 0.01 + if rand > 0.95 + source.cancel + raise 'random error' + end + count + end + end +end +Promises.zip(*tasks).result +``` + +Without the randomly failing part it produces following. + +```ruby +source, token = Concurrent::Cancellation.create +tasks = 4.times.map do |i| + Promises.future(source, token, i) do |source, token, i| + count = 0 + 1000.times do + break count = :cancelled if token.canceled? + count += 1 + # sleep 0.01 + # if rand > 0.95 + # source.cancel + # raise 'random error' + # end + end + count + end +end +Promises.zip(*tasks).result +``` + +## Throttling concurrency + +By creating an actor managing the resource we can control how many threads is +accessing the resource. In this case one at the time. + +```ruby +data = Array.new(10) { |i| '*' * i } +DB = Concurrent::Actor::Utils::AdHoc.spawn :db, data do |data| + lambda do |message| + # pretending that this queries a DB + data[message] + end +end + +concurrent_jobs = 11.times.map do |v| + DB. + # ask the DB with the `v`, only one at the time, rest is parallel + ask(v). + # get size of the string, rejects for 11 + then(&:size). + # translate error to a value (message of the exception) + rescue { |reason| reason.message } +end # + +Promises.zip(*concurrent_jobs).value! +``` + +Often there is more then one DB connections, then the pool can be used. + +```ruby +pool_size = 5 + +DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| + # DB connection constructor + Concurrent::Actor::Utils::AdHoc.spawn( + name: "connection-#{index}", + args: [data]) do |data| + lambda do |message| + # pretending that this queries a DB + data[message] + end + end +end + +concurrent_jobs = 11.times.map do |v| + DB_POOL. + # ask the DB with the `v`, only one at the time, rest is parallel + ask(v). + # get size of the string, rejects for 11 + then(&:size). + # translate error to a value (message of the exception) + rescue { |reason| reason.message } +end # + +Promises.zip(*concurrent_jobs).value! +``` + +In other cases the DB adapter maintains its internal connection pool and we +just need to limit concurrent access to the DB's API to avoid the calls being +blocked. + +Lets pretend that the `#[]` method on `DB_INTERNAL_POOL` is using the internal +pool of size 3. We create throttle with the same size + +```ruby +DB_INTERNAL_POOL = Concurrent::Array.new data + +max_tree = Concurrent::Throttle.new 3 + +futures = 11.times.map do |i| + max_tree. + # throttled tasks, at most 3 simultaneous calls of [] on the database + throttled_future { DB_INTERNAL_POOL[i] }. + # un-throttled tasks, unlimited concurrency + then { |starts| starts.size }. + rescue { |reason| reason.message } +end # + +futures.map(&:value!) +``` + +## Long stream of tasks, applying back pressure + +Lets assume that we queuing an API for a data and the queries can be faster +than we are able to process them. This example shows how to use channel as a +buffer and how to apply back pressure to slow down the queries. + +```ruby +require 'json' # + +channel = Promises::Channel.new 6 +source, token = Concurrent::Cancellation.create + +def query_random_text(token, channel) + Promises.future do + # for simplicity the query is omitted + # url = 'some api' + # Net::HTTP.get(URI(url)) + sleep 0.1 + { 'message' => + 'Lorem ipsum rhoncus scelerisque vulputate diam inceptos' + }.to_json + end.then(token) do |value, token| + # The push to channel is fulfilled only after the message is successfully + # published to the channel, therefore it will not continue querying until + # current message is pushed. + channel.push(value) | + # It could wait on the push indefinitely if the token is not checked + # here with `or` (the pipe). + token.to_future + end.flat_future.then(token) do |_, token| + # query again after the message is pushed to buffer + query_random_text(token, channel) unless token.canceled? + end +end + +words = [] +words_throttle = Concurrent::Throttle.new 1 + +def count_words_in_random_text(token, channel, words, words_throttle) + channel.pop.then do |response| + string = JSON.load(response)['message'] + # processing is slower than querying + sleep 0.2 + words_count = string.scan(/\w+/).size + end.then_throttled_by(words_throttle, words) do |words_count, words| + # safe since throttled to only 1 task at a time + words << words_count + end.then(token) do |_, token| + # count words in next message + unless token.canceled? + count_words_in_random_text(token, channel, words, words_throttle) + end + end +end + +query_processes = 3.times.map do + Promises.future(token, channel, &method(:query_random_text)).run +end + +word_counter_processes = 2.times.map do + Promises.future(token, channel, words, words_throttle, + &method(:count_words_in_random_text)).run +end + +sleep 0.5 +``` + +Let it run for a while then cancel it and ensure that the runs all fulfilled +(therefore ended) after the cancellation. Finally print the result. + +```ruby +source.cancel +query_processes.map(&:wait!) +word_counter_processes.map(&:wait!) +words +``` + +Compared to using threads directly this is highly configurable and compostable +solution. + + +## Periodic task + +By combining `schedule`, `run` and `Cancellation` periodically executed task +can be easily created. + +```ruby +repeating_scheduled_task = -> interval, token, task do + Promises. + # Schedule the task. + schedule(interval, token, &task). + # If successful schedule again. + # Alternatively use chain to schedule always. + then { repeating_scheduled_task.call(interval, token, task) } +end + +cancellation, token = Concurrent::Cancellation.create + +task = -> token do + 5.times do + token.raise_if_canceled + # do stuff + print '.' + sleep 0.01 + end +end + +result = Promises.future(0.1, token, task, &repeating_scheduled_task).run +sleep 0.2 +cancellation.cancel +result.result +``` + diff --git a/doc/promises.init.rb b/doc/promises.init.rb new file mode 100644 index 000000000..c3ed8aafb --- /dev/null +++ b/doc/promises.init.rb @@ -0,0 +1,5 @@ +require 'concurrent-edge' + +def do_stuff + :stuff +end diff --git a/doc/promises.out.md b/doc/promises.out.md new file mode 100644 index 000000000..0155468ad --- /dev/null +++ b/doc/promises.out.md @@ -0,0 +1,1121 @@ +# Basics + +## Factory methods + +Future and Event are created indirectly with constructor methods in +FactoryMethods. They are not designed for inheritance but rather for +composition. + +```ruby +Concurrent::Promises::FactoryMethods.instance_methods +# => [:zip, +# :create, +# :delay, +# :future, +# :resolvable_future, +# :resolvable_event, +# :resolvable_event_on, +# :resolvable_future_on, +# :future_on, +# :resolved_future, +# :fulfilled_future, +# :rejected_future, +# :resolved_event, +# :delay_on, +# :schedule, +# :schedule_on, +# :zip_futures, +# :zip_futures_on, +# :zip_events, +# :zip_events_on, +# :any_resolved_future, +# :any_resolved_future_on, +# :any, +# :any_fulfilled_future, +# :any_fulfilled_future_on, +# :any_event, +# :any_event_on, +# :select_channel] +``` + +The module can be included or extended where needed. + +```ruby +Class.new do + include Concurrent::Promises::FactoryMethods + + def a_method + resolvable_event + end +end.new.a_method +# => <#Concurrent::Promises::ResolvableEvent:0x7fece40e07d0 pending> + +Module.new { extend Concurrent::Promises::FactoryMethods }.resolvable_event +# => <#Concurrent::Promises::ResolvableEvent:0x7fece4178990 pending> +``` + +The module is already extended into {Concurrent::Promises} for convenience. + +```ruby +Concurrent::Promises.resolvable_event +# => <#Concurrent::Promises::ResolvableEvent:0x7fece40dd198 pending> +``` + +For this guide we introduce a shortcut in `main` so we can call the factory +methods in following examples by using `Promisses` directly. + +```ruby +Promises = Concurrent::Promises +Promises.resolvable_event +# => <#Concurrent::Promises::ResolvableEvent:0x7fece40da740 pending> +``` + +## Asynchronous task + +The most basic use-case of the framework is asynchronous processing. A task can +be processed asynchronously by using a `future` factory method. The block will +be executed on an internal thread pool. + +Arguments of `future` are passed to the block and evaluation starts immediately. + +```ruby +future = Promises.future(0.1) do |duration| + sleep duration + :result +end +# => <#Concurrent::Promises::Future:0x7fece40d2108 pending> +``` + +Asks if the future is resolved, here it will be still in the middle of the +sleep call. + +```ruby +future.resolved? # => false +``` + +Retrieving the value will block until the future is resolved. + +```ruby +future.value # => :result +future.resolved? # => true +``` + +If the task fails we talk about the future being rejected. + +```ruby +future = Promises.future { raise 'Boom' } +# => <#Concurrent::Promises::Future:0x7fece4170808 pending> +``` + +There is no result, the future was rejected with a reason. + +```ruby +future.value # => nil +future.reason # => # +``` + +It can be forced to raise the reason for rejection when retrieving the value. + +```ruby +begin + future.value! +rescue => e + e +end # => # +``` + +Which is the same as `future.value! rescue $!` which will be used hereafter. + +Or it can be used directly as argument for raise, since it implements exception +method. + +```ruby +raise future rescue $! # => # +``` + +## States + +Lets define a inspection helper for methods. + +```ruby +def inspect_methods(*methods, of:) + methods.reduce({}) { |h, m| h.update m => of.send(m) } +end +``` + +Event has `pending` and `resolved` state. + +```ruby +event = Promises.resolvable_event +inspect_methods(:state, :pending?, :resolved?, of: event) +# => {:state=>:pending, :pending?=>true, :resolved?=>false} + +event.resolve +inspect_methods(:state, :pending?, :resolved?, of: event) +# => {:state=>:resolved, :pending?=>false, :resolved?=>true} +``` + +Future's `resolved` state is further specified to be `fulfilled` or `rejected`. + +```ruby +future = Promises.resolvable_future +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + of: future) +# => {:state=>:pending, +# :pending?=>true, +# :resolved?=>false, +# :fulfilled?=>false, +# :rejected?=>false} + +future.fulfill :value +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + :result, :value, :reason, of: future) +# => {:state=>:fulfilled, +# :pending?=>false, +# :resolved?=>true, +# :fulfilled?=>true, +# :rejected?=>false, +# :result=>[true, :value, nil], +# :value=>:value, +# :reason=>nil} + +future = Promises.rejected_future StandardError.new +inspect_methods(:state, :pending?, :resolved?, :fulfilled?, :rejected?, + :result, :value, :reason, of: future) +# => {:state=>:rejected, +# :pending?=>false, +# :resolved?=>true, +# :fulfilled?=>false, +# :rejected?=>true, +# :result=>[false, nil, #], +# :value=>nil, +# :reason=>#} +``` + +## Direct creation of resolved futures + +When an existing value has to wrapped in a future it does not have to go +through evaluation as follows. + +```ruby +Promises.future { :value } +# => <#Concurrent::Promises::Future:0x7fece092fed0 pending> +``` + +Instead it can be created directly. + +```ruby +Promises.fulfilled_future(:value) +# => <#Concurrent::Promises::Future:0x7fece10db4e0 fulfilled> +Promises.rejected_future(StandardError.new('Ups')) +# => <#Concurrent::Promises::Future:0x7fece10d3768 rejected> +Promises.resolved_future(true, :value, nil) +# => <#Concurrent::Promises::Future:0x7fece10cba90 fulfilled> +Promises.resolved_future(false, nil, StandardError.new('Ups')) +# => <#Concurrent::Promises::Future:0x7fece10c97e0 rejected> +``` + +## Chaining + +Big advantage of promises is ability to chain tasks together without blocking +current thread. + +```ruby +Promises. + future(2) { |v| v.succ }. + then(&:succ). + value! # => 4 +``` + +As `future` factory method takes argument, `then` method takes as well. Any +supplied arguments are passed to the block, and the library ensures that they +are visible to the block. + +```ruby +Promises. + future('3') { |s| s.to_i }. + then(2) { |v, arg| v + arg }. + value # => 5 +Promises. + fulfilled_future('3'). + then(&:to_i). + then(2, &:+). + value # => 5 +Promises. + fulfilled_future(1). + chain(2) { |fulfilled, value, reason, arg| value + arg }. + value # => 3 +``` + +Passing the arguments in (similarly as for a thread `Thread.new(arg) { |arg| +do_stuff arg }`) is **required**, both following examples may break. + +```ruby +arg = 1 # => 1 +Thread.new { do_stuff arg } +# => # +Promises.future { do_stuff arg } +# => <#Concurrent::Promises::Future:0x7fece4052a48 pending> +``` + +## Branching, and zipping + +Besides chaining it can also be branched. + +```ruby +head = Promises.fulfilled_future -1 +branch1 = head.then(&:abs) +branch2 = head.then(&:succ).then(&:succ) + +branch1.value! # => 1 +branch2.value! # => 1 +``` + +It can be combined back to one future by zipping (`zip`, `&`). + +```ruby +branch1.zip(branch2).value! # => [1, 1] +(branch1 & branch2). + then { |a, b| a + b }. + value! # => 2 +(branch1 & branch2). + then(&:+). + value! # => 2 +Promises. + zip(branch1, branch2, branch1). + then { |*values| values.reduce(&:+) }. + value! # => 3 +``` + +Instead of zipping only the first one can be taken if needed. + +```ruby +Promises.any(branch1, branch2).value! # => 1 +(branch1 | branch2).value! # => 1 +``` + +## Blocking methods + +In these examples we have used blocking methods like `value` extensively for +their convenience, however in practice is better to avoid them and continue +chaining. + +If they need to be used (e.g. when integrating with threads), `value!` is a +better option over `value` when rejections are not dealt with differently. +Otherwise the rejection are not handled and probably silently forgotten. + +## Error handling + +When one of the tasks in the chain fails, the rejection propagates down the +chain without executing the tasks created with `then`. + +```ruby +Promises. + fulfilled_future(Object.new). + then(&:succ). + then(&:succ). + result +# => [false, +# nil, +# #>] +``` + +As `then` chained tasks execute only on fulfilled futures, there is a `rescue` +method which chains a task which is executed only when the future is rejected. +It can be used to recover from rejection. + +Using rescue to fulfill to 0 instead of the error. + +```ruby +Promises. + fulfilled_future(Object.new). + then(&:succ). + then(&:succ). + rescue { |err| 0 }. + result # => [true, 0, nil] +``` + +Rescue not executed when there is no rejection. + +```ruby +Promises. + fulfilled_future(1). + then(&:succ). + then(&:succ). + rescue { |e| 0 }. + result # => [true, 3, nil] +``` + +Tasks added with `chain` are evaluated always. + +```ruby +Promises. + fulfilled_future(1). + chain { |fulfilled, value, reason| fulfilled ? value : reason }. + value! # => 1 +Promises. + rejected_future(StandardError.new('Ups')). + chain { |fulfilled, value, reason| fulfilled ? value : reason }. + value! # => # +``` + +Zip is rejected if any of the zipped futures is. + +```ruby +rejected_zip = Promises.zip( + Promises.fulfilled_future(1), + Promises.rejected_future(StandardError.new('Ups'))) +# => <#Concurrent::Promises::Future:0x7fece2b54538 rejected> +rejected_zip.result +# => [false, [1, nil], [nil, #]] +rejected_zip. + rescue { |reason1, reason2| (reason1 || reason2).message }. + value # => "Ups" +``` + +## Delayed futures + +Delayed futures will not evaluate until asked by `touch` or other method +requiring resolution. + +```ruby +future = Promises.delay { sleep 0.1; 'lazy' } +# => <#Concurrent::Promises::Future:0x7fece2b3d018 pending> +sleep 0.1 +future.resolved? # => false +future.touch +# => <#Concurrent::Promises::Future:0x7fece2b3d018 pending> +sleep 0.2 +future.resolved? # => true +``` + +All blocking methods like `wait`, `value` call `touch` and trigger evaluation. + +```ruby +Promises.delay { :value }.value # => :value +``` + +It propagates trough chain up allowing whole or partial lazy chains. + +```ruby +head = Promises.delay { 1 } +branch1 = head.then(&:succ) +branch2 = head.delay.then(&:succ) +join = branch1 & branch2 + +sleep 0.1 +``` + +Nothing resolves. + +```ruby +[head, branch1, branch2, join].map(&:resolved?) +# => [false, false, false, false] +``` + +Force `branch1` evaluation. + +```ruby +branch1.value # => 2 +sleep 0.1 +[head, branch1, branch2, join].map(&:resolved?) +# => [true, true, false, false] +``` + +Force evaluation of both by calling `value` on `join`. + +```ruby +join.value # => [2, 2] +[head, branch1, branch2, join].map(&:resolved?) +# => [true, true, true, true] +``` + +## Flatting + +Sometimes it is needed to wait for a inner future. Apparent solution is to wait +inside the future `Promises.future { Promises.future { 1+1 }.value }.value` +however as mentioned before, `value` calls should be **avoided** to avoid +blocking threads. Therefore there is a flat method which is a correct solution +in this situation and does not block any thread. + +```ruby +Promises.future { Promises.future { 1+1 } }.flat.value! +# => 2 +``` + +A more complicated example. +```ruby +Promises. + future { Promises.future { Promises.future { 1 + 1 } } }. + flat(1). + then { |future| future.then(&:succ) }. + flat(1). + value! # => 3 +``` + +## Scheduling + +Tasks can be planned to be executed with a time delay. + +Schedule task to be executed in 0.1 seconds. + +```ruby +scheduled = Promises.schedule(0.1) { 1 } +# => <#Concurrent::Promises::Future:0x7fece2acca98 pending> +scheduled.resolved? # => false +``` + +Value will become available after 0.1 seconds. + +```ruby +scheduled.value # => 1 +``` + +It can be used in the chain as well, where the delay is counted form a moment +its parent resolves. Therefore following future will be resolved in 0.2 seconds. + +```ruby +future = Promises. + future { sleep 0.1; :result }. + schedule(0.1). + then(&:to_s). + value! # => "result" +``` + +Time can be used as well. + +```ruby +Promises.schedule(Time.now + 10) { :val } +# => <#Concurrent::Promises::Future:0x7fece2aa7f40 pending> +``` + +## Resolvable Future and Event: + +Sometimes it is required to resolve a future externally, in these cases +`resolvable_future` and `resolvable_event` factory methods can be uses. See +{Concurrent::Promises::ResolvableFuture} and +{Concurrent::Promises::ResolvableEvent}. + +```ruby +future = Promises.resolvable_future +# => <#Concurrent::Promises::ResolvableFuture:0x7fece2aa5948 pending> +``` + +The thread will be blocked until the future is resolved + +```ruby +thread = Thread.new { future.value } +future.fulfill 1 +# => <#Concurrent::Promises::ResolvableFuture:0x7fece2aa5948 fulfilled> +thread.value # => 1 +``` + +Future can be resolved only once. + +```ruby +future.fulfill 1 rescue $! +# => #[true, 1, nil], :new_result=>[true, 1, nil]}> +future.fulfill 2, false # => false +``` + +## How are promises executed? + +Promises use global pools to execute the tasks. Therefore each task may run on +different thread which implies that users have to be careful not to depend on +Thread local variables (or they have to set at the begging of the task and +cleaned up at the end of the task). + +Since the tasks are running on may different threads of the thread pool, it's +better to follow following rules: + +- Use only data passed in through arguments or values of parent futures, to + have better control over what are futures accessing. +- The data passed in and out of futures are easier to deal with if they are + immutable or at least treated as such. +- Any mutable and mutated object accessed by more than one threads or futures + must be thread safe, see {Concurrent::Array}, {Concurrent::Hash}, and + {Concurrent::Map}. (Value of a future may be consumed by many futures.) +- Futures can access outside objects, but they has to be thread-safe. + +> *TODO: This part to be extended* + +# Advanced + +## Callbacks + +```ruby +queue = Queue.new # => # +future = Promises.delay { 1 + 1 } +# => <#Concurrent::Promises::Future:0x7fece2a85670 pending> + +future.on_fulfillment { queue << 1 } # evaluated asynchronously +future.on_fulfillment! { queue << 2 } # evaluated on resolving thread + +queue.empty? # => true +future.value # => 2 +queue.pop # => 2 +queue.pop # => 1 +``` + +## Using executors + +Factory methods, chain, and callback methods have all other version of them +which takes executor argument. + +It takes an instance of an executor or a symbol which is a shortcuts for the +two global pools in concurrent-ruby. `fast` for short and non-blocking tasks +and `:io` for blocking and long tasks. + +```ruby +Promises.future_on(:fast) { 2 }. + then_on(:io) { File.read __FILE__ }. + value.size # => 23539 +``` + +## Run (simulated process) + +Similar to flatting is running. When `run` is called on a future it will flat +indefinitely as long the future fulfils into a `Future` value. It can be used +to simulate a thread like processing without actually occupying the thread. + +```ruby +count = lambda do |v| + v += 1 + v < 5 ? Promises.future_on(:fast, v, &count) : v +end +# => # +400.times. + map { Promises.future_on(:fast, 0, &count).run.value! }. + all? { |v| v == 5 } # => true +``` + +Therefore the above example finished fine on the the `:fast` thread pool even +though it has much less threads than there is the simulated process. + +# Interoperability + +## Actors + +Create an actor which takes received numbers and returns the number squared. + +```ruby +actor = Concurrent::Actor::Utils::AdHoc.spawn :square do + -> v { v ** 2 } +end +# => # +``` + +Send result of `1+1` to the actor, and add 2 to the result send back from the +actor. + +```ruby +Promises. + future { 1 + 1 }. + then_ask(actor). + then { |v| v + 2 }. + value! # => 6 +``` + +So `(1 + 1)**2 + 2 = 6`. + +The `ask` method returns future. + +```ruby +actor.ask(2).then(&:succ).value! # => 5 +``` +## ProcessActor + +> *TODO: Documentation to be added in few days* + +## Channel + +There is an implementation of channel as well. Lets start by creating a +channel with capacity 2 messages. + +```ruby +ch1 = Concurrent::Promises::Channel.new 2 +# => <#Concurrent::Promises::Channel:0x7fece2a17e18 size:2> +``` + +We push 3 messages, it can be observed that the last future representing the +push is not fulfilled since the capacity prevents it. When the work which fills +the channel depends on the futures created by push it can be used to create +back pressure – the filling work is delayed until the channel has space for +more messages. + +```ruby +pushes = 3.times.map { |i| ch1.push i } +# => [<#Concurrent::Promises::Future:0x7fece29fe030 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece29fd590 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece29fc1e0 pending>] +ch1.pop.value! # => 0 +pushes +# => [<#Concurrent::Promises::Future:0x7fece29fe030 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece29fd590 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece29fc1e0 fulfilled>] +``` + +A selection over channels can be created with select_channel factory method. It +will be fulfilled with a first message available in any of the channels. It +returns a pair to be able to find out which channel had the message available. + +```ruby +ch2 = Concurrent::Promises::Channel.new 2 +# => <#Concurrent::Promises::Channel:0x7fece29d7d40 size:2> +result = Concurrent::Promises.select_channel(ch1, ch2) +# => <#Concurrent::Promises::ResolvableFuture:0x7fece29d6f30 fulfilled> +result.value! +# => [<#Concurrent::Promises::Channel:0x7fece2a17e18 size:2>, 1] + +Promises.future { 1+1 }.then_push_channel(ch1) +# => <#Concurrent::Promises::Future:0x7fece29cdc00 pending> +result = ( + Concurrent::Promises.fulfilled_future('%02d') & + Concurrent::Promises.select_channel(ch1, ch2)). + then { |format, (channel, value)| format format, value } +# => <#Concurrent::Promises::Future:0x7fece29c49c0 pending> +result.value! # => "02" +``` + +# Use-cases + +## Simple background processing + +```ruby +Promises.future { do_stuff } +# => <#Concurrent::Promises::Future:0x7fece29b44f8 pending> +``` + +## Parallel background processing + +```ruby +tasks = 4.times.map { |i| Promises.future(i) { |i| i*2 } } +# => [<#Concurrent::Promises::Future:0x7fece29a47d8 pending>, +# <#Concurrent::Promises::Future:0x7fece4192fc0 pending>, +# <#Concurrent::Promises::Future:0x7fece4191e68 pending>, +# <#Concurrent::Promises::Future:0x7fece4191148 pending>] +Promises.zip(*tasks).value! # => [0, 2, 4, 6] +``` + +## Actor background processing + +Actors are mainly keep and isolate state, they should stay responsive not being +blocked by a longer running computations. It desirable to offload the work to +stateless promises. + +Lets define an actor which will process jobs, while staying responsive, and +tracking the number of tasks being processed. + +```ruby +class Computer < Concurrent::Actor::RestartingContext + def initialize + super() + @jobs = {} + end + + def on_message(msg) + command, *args = msg + case command + # new job to process + when :run + job = args[0] + @jobs[job] = envelope.future + # Process asynchronously and send message back when done. + Concurrent::Promises.future(&job).chain(job) do |fulfilled, value, reason, job| + self.tell [:done, job, fulfilled, value, reason] + end + # Do not make return value of this method to be answer of this message. + # We are answering later in :done by resolving the future kept in @jobs. + Concurrent::Actor::Behaviour::MESSAGE_PROCESSED + when :done + job, fulfilled, value, reason = *args + future = @jobs.delete job + # Answer the job's result. + future.resolve fulfilled, value, reason + when :status + { running_jobs: @jobs.size } + else + # Continue to fail with unknown message. + pass + end + end +end +``` + +Create the computer actor and send it 3 jobs. + +```ruby +computer = Concurrent::Actor.spawn Computer, :computer +# => # +results = 3.times.map { computer.ask [:run, -> { sleep 0.1; :result }] } +# => [<#Concurrent::Promises::Future:0x7fece50178c0 pending>, +# <#Concurrent::Promises::Future:0x7fece5015c50 pending>, +# <#Concurrent::Promises::Future:0x7fece5015020 pending>] +computer.ask(:status).value! # => {:running_jobs=>3} +results.map(&:value!) # => [:result, :result, :result] +``` +## Solving the Thread count limit by thread simulation + +Sometimes an application requires to process a lot of tasks concurrently. If +the number of concurrent tasks is high enough than it is not possible to create +a Thread for each of them. A partially satisfactory solution could be to use +Fibers, but that solution locks the application on MRI since other Ruby +implementations are using threads for each Fiber. + +This library provides a {Concurrent::Promises::Future#run} method on a future +to simulate threads without actually accepting one all the time. The run method +is similar to {Concurrent::Promises::Future#flat} but it will keep flattening +until it's fulfilled with non future value, then the value is taken as a result +of the process simulated by `run`. + +```ruby +body = lambda do |v| + # Some computation step of the process + new_v = v + 1 + # Is the process finished? + if new_v < 5 + # Continue computing with new value, does not have to be recursive. + # It just has to return a future. + Promises.future(new_v, &body) + else + # The process is finished, fulfill the final value with `new_v`. + new_v + end +end +Promises.future(0, &body).run.value! # => 5 +``` + +This solution works well an any Ruby implementation. + +> *TODO: More examples to be added.* + +## Cancellation + +### Simple + +Lets have two processes which will count until cancelled. + +```ruby +source, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fece4071678 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece406a508 canceled:false>] + +count_until_cancelled = -> token, count do + if token.canceled? + count + else + Promises.future token, count+1, &count_until_cancelled + end +end + +futures = Array.new(2) do + Promises.future(token, 0, &count_until_cancelled).run +end +# => [<#Concurrent::Promises::Future:0x7fece4050478 pending>, +# <#Concurrent::Promises::Future:0x7fece403b0a0 pending>] + +sleep 0.01 +source.cancel # => true +futures.map(&:value!) # => [63, 63] +``` + +Cancellation can also be used as event or future to log or plan re-execution. + +```ruby +token.to_event.chain do + # log cancellation + # plane re-execution +end +``` + +### Parallel background processing with cancellation + +Each task tries to count to 1000 but there is a randomly failing test. The +tasks share a cancellation, when one of them fails it cancels the others. + +```ruby +source, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fece2a7c638 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece2a77a70 canceled:false>] +tasks = 4.times.map do |i| + Promises.future(source, token, i) do |source, token, i| + count = 0 + 1000.times do + break count = :cancelled if token.canceled? + count += 1 + sleep 0.01 + if rand > 0.95 + source.cancel + raise 'random error' + end + count + end + end +end +# => [<#Concurrent::Promises::Future:0x7fece2a741b8 pending>, +# <#Concurrent::Promises::Future:0x7fece2a6eee8 pending>, +# <#Concurrent::Promises::Future:0x7fece2a6d520 pending>, +# <#Concurrent::Promises::Future:0x7fece2a6c8a0 pending>] +Promises.zip(*tasks).result +# => [false, +# [:cancelled, :cancelled, nil, :cancelled], +# [nil, nil, #, nil]] +``` + +Without the randomly failing part it produces following. + +```ruby +source, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fece2a26670 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece2a26198 canceled:false>] +tasks = 4.times.map do |i| + Promises.future(source, token, i) do |source, token, i| + count = 0 + 1000.times do + break count = :cancelled if token.canceled? + count += 1 + # sleep 0.01 + # if rand > 0.95 + # source.cancel + # raise 'random error' + # end + end + count + end +end +Promises.zip(*tasks).result # => [true, [1000, 1000, 1000, 1000], nil] +``` + +## Throttling concurrency + +By creating an actor managing the resource we can control how many threads is +accessing the resource. In this case one at the time. + +```ruby +data = Array.new(10) { |i| '*' * i } +# => ["", +# "*", +# "**", +# "***", +# "****", +# "*****", +# "******", +# "*******", +# "********", +# "*********"] +DB = Concurrent::Actor::Utils::AdHoc.spawn :db, data do |data| + lambda do |message| + # pretending that this queries a DB + data[message] + end +end + +concurrent_jobs = 11.times.map do |v| + DB. + # ask the DB with the `v`, only one at the time, rest is parallel + ask(v). + # get size of the string, rejects for 11 + then(&:size). + # translate error to a value (message of the exception) + rescue { |reason| reason.message } +end + +Promises.zip(*concurrent_jobs).value! +# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] +``` + +Often there is more then one DB connections, then the pool can be used. + +```ruby +pool_size = 5 # => 5 + +DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| + # DB connection constructor + Concurrent::Actor::Utils::AdHoc.spawn( + name: "connection-#{index}", + args: [data]) do |data| + lambda do |message| + # pretending that this queries a DB + data[message] + end + end +end + +concurrent_jobs = 11.times.map do |v| + DB_POOL. + # ask the DB with the `v`, only one at the time, rest is parallel + ask(v). + # get size of the string, rejects for 11 + then(&:size). + # translate error to a value (message of the exception) + rescue { |reason| reason.message } +end + +Promises.zip(*concurrent_jobs).value! +# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] +``` + +In other cases the DB adapter maintains its internal connection pool and we +just need to limit concurrent access to the DB's API to avoid the calls being +blocked. + +Lets pretend that the `#[]` method on `DB_INTERNAL_POOL` is using the internal +pool of size 3. We create throttle with the same size + +```ruby +DB_INTERNAL_POOL = Concurrent::Array.new data +# => ["", +# "*", +# "**", +# "***", +# "****", +# "*****", +# "******", +# "*******", +# "********", +# "*********"] + +max_tree = Concurrent::Throttle.new 3 +# => <#Concurrent::Throttle:0x7fece0843be8 limit:3 can_run:3> + +futures = 11.times.map do |i| + max_tree. + # throttled tasks, at most 3 simultaneous calls of [] on the database + throttled_future { DB_INTERNAL_POOL[i] }. + # un-throttled tasks, unlimited concurrency + then { |starts| starts.size }. + rescue { |reason| reason.message } +end + +futures.map(&:value!) +# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] +``` + +## Long stream of tasks, applying back pressure + +> *TODO: To be added, parallel IO query > buffer > parallel processing, include a bad example as well + +Lets assume that we queuing an API for a data and the queries can be faster +than we are able to process them. This example shows how to use channel as a +buffer and how to apply back pressure to slow down the queries. + +```ruby +require 'json' + +channel = Promises::Channel.new 6 +# => <#Concurrent::Promises::Channel:0x7fece41918f0 size:6> +source, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fece4190428 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece418bec8 canceled:false>] + +def query_random_text(token, channel) + Promises.future do + # for simplicity the query is omitted + # url = 'some api' + # Net::HTTP.get(URI(url)) + sleep 0.1 + { 'message' => + 'Lorem ipsum rhoncus scelerisque vulputate diam inceptos' + }.to_json + end.then(token) do |value, token| + # It could wait on the push indefinitely if the token is not checked here + # with or (the pipe) + channel.push(value) | token.to_future + end.flat_future.then(token) do |_, token| + # query again after the message is pushed to buffer + query_random_text(token, channel) unless token.canceled? + end +end + +words = [] # => [] +words_throttle = Concurrent::Throttle.new 1 +# => <#Concurrent::Throttle:0x7fece29973a8 limit:1 can_run:1> + +def count_words_in_random_text(token, channel, words, words_throttle) + channel.pop.then do |response| + string = JSON.load(response)['message'] + # processing is slower than querying + sleep 0.2 + words_count = string.scan(/\w+/).size + end.then_throttled_by(words_throttle, words) do |words_count, words| + # safe since throttled to only 1 task at a time + words << words_count + end.then(token) do |_, token| + # count words in next message + unless token.canceled? + count_words_in_random_text(token, channel, words, words_throttle) + end + end +end + +query_processes = 3.times.map do + Promises.future(token, channel, &method(:query_random_text)).run +end +# => [<#Concurrent::Promises::Future:0x7fece41818d8 pending>, +# <#Concurrent::Promises::Future:0x7fece40e20f8 pending>, +# <#Concurrent::Promises::Future:0x7fece40e0668 pending>] + +word_counter_processes = 2.times.map do + Promises.future(token, channel, words, words_throttle, + &method(:count_words_in_random_text)).run +end +# => [<#Concurrent::Promises::Future:0x7fece40dc860 pending>, +# <#Concurrent::Promises::Future:0x7fece40db230 pending>] + +sleep 0.5 # => 1 +``` + +Let it run for a while then cancel it and ensure that the runs all fulfil +(therefore end) after the cancellation. Finally print the result. + +```ruby +source.cancel # => true +query_processes.map(&:wait!) +# => [<#Concurrent::Promises::Future:0x7fece41818d8 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece40e20f8 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece40e0668 fulfilled>] +word_counter_processes.map(&:wait!) +# => [<#Concurrent::Promises::Future:0x7fece40dc860 fulfilled>, +# <#Concurrent::Promises::Future:0x7fece40db230 fulfilled>] +words # => [7, 7, 7, 7] +``` + + +## Periodic task + +By combining `schedule`, `run` and `Cancellation` periodically executed task +can be easily created. + +```ruby +repeating_scheduled_task = -> interval, token, task do + Promises. + # Schedule the task. + schedule(interval, token, &task). + # If successful schedule again. + # Alternatively use chain to schedule always. + then { repeating_scheduled_task.call(interval, token, task) } +end + +cancellation, token = Concurrent::Cancellation.create +# => [<#Concurrent::Cancellation:0x7fece294db90 canceled:false>, +# <#Concurrent::Cancellation::Token:0x7fece2947ba0 canceled:false>] + +task = -> token do + 5.times do + token.raise_if_canceled + # do stuff + print '.' + sleep 0.01 + end +end + +result = Promises.future(0.1, token, task, &repeating_scheduled_task).run +# => <#Concurrent::Promises::Future:0x7fece28ce750 pending> +sleep 0.2 # => 0 +cancellation.cancel # => true +result.result +# => [false, +# nil, +# #] +``` + diff --git a/examples/benchmark_new_futures.rb b/examples/benchmark_new_futures.rb index f8b04934c..76f36571c 100755 --- a/examples/benchmark_new_futures.rb +++ b/examples/benchmark_new_futures.rb @@ -2,7 +2,6 @@ require 'benchmark/ips' require 'concurrent' -require 'concurrent-edge' scale = 1 @@ -12,19 +11,19 @@ Benchmark.ips(time, warmup) do |x| x.report('flat-old') { Concurrent::Promise.execute { 1 }.flat_map { |v| Concurrent::Promise.execute { v + 2 } }.value! } - x.report('flat-new') { Concurrent.future(:fast) { 1 }.then { |v| Concurrent.future(:fast) { v + 2 } }.flat.value! } + x.report('flat-new') { Concurrent::Promises.future(:fast) { 1 }.then { |v| Concurrent::Promises.future(:fast) { v + 2 } }.flat.value! } x.compare! end Benchmark.ips(time, warmup) do |x| x.report('status-old') { f = Concurrent::Promise.execute { nil }; 100.times { f.complete? } } - x.report('status-new') { f = Concurrent.future(:fast) { nil }; 100.times { f.completed? } } + x.report('status-new') { f = Concurrent::Promises.future(:fast) { nil }; 100.times { f.completed? } } x.compare! end Benchmark.ips(time, warmup) do |x| of = Concurrent::Promise.execute { 1 } - nf = Concurrent.succeeded_future(1, :fast) + nf = Concurrent::Promises.succeeded_future(1, :fast) x.report('value-old') { of.value! } x.report('value-new') { nf.value! } x.compare! @@ -41,7 +40,7 @@ head.value! end x.report('graph-new') do - head = Concurrent.succeeded_future(1, :fast) + head = Concurrent::Promises.succeeded_future(1, :fast) 10.times do branch1 = head.then(&:succ) branch2 = head.then(&:succ).then(&:succ) @@ -54,14 +53,14 @@ Benchmark.ips(time, warmup) do |x| x.report('immediate-old') { Concurrent::Promise.execute { nil }.value! } - x.report('immediate-new') { Concurrent.succeeded_future(nil, :fast).value! } + x.report('immediate-new') { Concurrent::Promises.succeeded_future(nil, :fast).value! } x.compare! end Benchmark.ips(time, warmup) do |x| of = Concurrent::Promise.execute { 1 } - nf = Concurrent.succeeded_future(1, :fast) - x.report('then-old') { 100.times.reduce(nf) { |nf, _| nf.then(&:succ) }.value! } + nf = Concurrent::Promises.succeeded_future(1, :fast) + x.report('then-old') { 100.times.reduce(of) { |nf, _| nf.then(&:succ) }.value! } x.report('then-new') { 100.times.reduce(nf) { |nf, _| nf.then(&:succ) }.value! } x.compare! end diff --git a/examples/edge_futures.in.rb b/examples/edge_futures.in.rb deleted file mode 100644 index bbca7b2fa..000000000 --- a/examples/edge_futures.in.rb +++ /dev/null @@ -1,234 +0,0 @@ -### Simple asynchronous task - -future = Concurrent.future { sleep 0.1; 1 + 1 } # evaluation starts immediately -future.completed? -# block until evaluated -future.value -future.completed? - - -### Failing asynchronous task - -future = Concurrent.future { raise 'Boom' } -future.value -future.value! rescue $! -future.reason -# re-raising -raise future rescue $! - - -### Chaining - -head = Concurrent.succeeded_future 1 # -branch1 = head.then(&:succ) # -branch2 = head.then(&:succ).then(&:succ) # -branch1.zip(branch2).value! -(branch1 & branch2).then { |a, b| a + b }.value! -(branch1 & branch2).then(&:+).value! -Concurrent.zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! -# pick only first completed -(branch1 | branch2).value! - -### Error handling - -Concurrent.future { Object.new }.then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates -Concurrent.future { Object.new }.then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 -Concurrent.future { 1 }.then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied - -failing_zip = Concurrent.succeeded_future(1) & Concurrent.failed_future(StandardError.new('boom')) -failing_zip.result -failing_zip.then { |v| 'never happens' }.result -failing_zip.rescue { |a, b| (a || b).message }.value -failing_zip.chain { |success, values, reasons| [success, values.compact, reasons.compactß] }.value - -### Delay - -# will not evaluate until asked by #value or other method requiring completion -future = Concurrent.delay { 'lazy' } -sleep 0.1 # -future.completed? -future.value - -# propagates trough chain allowing whole or partial lazy chains - -head = Concurrent.delay { 1 } -branch1 = head.then(&:succ) -branch2 = head.delay.then(&:succ) -join = branch1 & branch2 - -sleep 0.1 # nothing will complete -[head, branch1, branch2, join].map(&:completed?) - -branch1.value -sleep 0.1 # forces only head to complete, branch 2 stays incomplete -[head, branch1, branch2, join].map(&:completed?) - -join.value - - -### Flatting - -Concurrent.future { Concurrent.future { 1+1 } }.flat.value # waits for inner future - -# more complicated example -Concurrent.future { Concurrent.future { Concurrent.future { 1 + 1 } } }. - flat(1). - then { |f| f.then(&:succ) }. - flat(1).value - - -### Schedule - -scheduled = Concurrent.schedule(0.1) { 1 } - -scheduled.completed? -scheduled.value # available after 0.1sec - -# and in chain -scheduled = Concurrent.delay { 1 }.schedule(0.1).then(&:succ) -# will not be scheduled until value is requested -sleep 0.1 # -scheduled.value # returns after another 0.1sec - - -### Completable Future and Event - -future = Concurrent.future -event = Concurrent.event -# Don't forget to keep the reference, `Concurrent.future.then { |v| v }` is incompletable - -# will be blocked until completed -t1 = Thread.new { future.value } # -t2 = Thread.new { event.wait } # - -future.success 1 -future.success 1 rescue $! -future.try_success 2 -event.complete - -[t1, t2].each &:join # - - -### Callbacks - -queue = Queue.new -future = Concurrent.delay { 1 + 1 } - -future.on_success { queue << 1 } # evaluated asynchronously -future.on_success! { queue << 2 } # evaluated on completing thread - -queue.empty? -future.value -queue.pop -queue.pop - - -### Thread-pools - -Concurrent.future(:fast) { 2 }.then(:io) { File.read __FILE__ }.wait - - -### Interoperability with actors - -actor = Concurrent::Actor::Utils::AdHoc.spawn :square do - -> v { v ** 2 } -end - -Concurrent. - future { 2 }. - then_ask(actor). - then { |v| v + 2 }. - value - -actor.ask(2).then(&:succ).value - - -### Interoperability with channels - -ch1 = Concurrent::Channel.new -ch2 = Concurrent::Channel.new - -result = Concurrent.select(ch1, ch2) -ch1.put 1 -result.value! - -Concurrent. - future { 1+1 }. - then_push(ch1) -result = Concurrent. - future { '%02d' }. - then_select(ch1, ch2). - then { |format, (value, channel)| format format, value } -result.value! - - -### Common use-cases Examples - -# simple background processing -Concurrent.future { do_stuff } - -# parallel background processing -jobs = 10.times.map { |i| Concurrent.future { i } } # -Concurrent.zip(*jobs).value - - -# periodic task -@end = false - -def schedule_job - Concurrent.schedule(1) { do_stuff }. - rescue { |e| StandardError === e ? report_error(e) : raise(e) }. - then { schedule_job unless @end } -end - -schedule_job -@end = true - - -# How to limit processing where there are limited resources? -# By creating an actor managing the resource -DB = Concurrent::Actor::Utils::AdHoc.spawn :db do - data = Array.new(10) { |i| '*' * i } - lambda do |message| - # pretending that this queries a DB - data[message] - end -end - -concurrent_jobs = 11.times.map do |v| - Concurrent. - future { v }. - # ask the DB with the `v`, only one at the time, rest is parallel - then_ask(DB). - # get size of the string, fails for 11 - then(&:size). - rescue { |reason| reason.message } # translate error to value (exception, message) -end # - -Concurrent.zip(*concurrent_jobs).value! - - -# In reality there is often a pool though: -data = Array.new(10) { |i| '*' * i } -pool_size = 5 - -DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| - # DB connection constructor - Concurrent::Actor::Utils::AdHoc.spawn(name: "worker-#{index}", args: [data]) do |data| - lambda do |message| - # pretending that this queries a DB - data[message] - end - end -end - -concurrent_jobs = 11.times.map do |v| - Concurrent. - future { v }. - # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel - then_ask(DB_POOL). - then(&:size). - rescue { |reason| reason.message } -end # - -Concurrent.zip(*concurrent_jobs).value! diff --git a/examples/edge_futures.out.rb b/examples/edge_futures.out.rb deleted file mode 100644 index e92e548e4..000000000 --- a/examples/edge_futures.out.rb +++ /dev/null @@ -1,273 +0,0 @@ -### Simple asynchronous task - -future = Concurrent.future { sleep 0.1; 1 + 1 } # evaluation starts immediately - # => <#Concurrent::Edge::Future:0x7fcc73208180 pending blocks:[]> -future.completed? # => false -# block until evaluated -future.value # => 2 -future.completed? # => true - - -### Failing asynchronous task - -future = Concurrent.future { raise 'Boom' } - # => <#Concurrent::Edge::Future:0x7fcc731fa0a8 pending blocks:[]> -future.value # => nil -future.value! rescue $! # => # -future.reason # => # -# re-raising -raise future rescue $! # => # - - -### Chaining - -head = Concurrent.succeeded_future 1 -branch1 = head.then(&:succ) -branch2 = head.then(&:succ).then(&:succ) -branch1.zip(branch2).value! # => [2, 3] -(branch1 & branch2).then { |a, b| a + b }.value! # => 5 -(branch1 & branch2).then(&:+).value! # => 5 -Concurrent.zip(branch1, branch2, branch1).then { |*values| values.reduce &:+ }.value! - # => 7 -# pick only first completed -(branch1 | branch2).value! # => 2 - -### Error handling - -Concurrent.future { Object.new }.then(&:succ).then(&:succ).rescue { |e| e.class }.value # error propagates - # => NoMethodError -Concurrent.future { Object.new }.then(&:succ).rescue { 1 }.then(&:succ).value # rescued and replaced with 1 - # => 2 -Concurrent.future { 1 }.then(&:succ).rescue { |e| e.message }.then(&:succ).value # no error, rescue not applied - # => 3 - -failing_zip = Concurrent.succeeded_future(1) & Concurrent.failed_future(StandardError.new('boom')) - # => <#Concurrent::Edge::Future:0x7ffcc19ac2a0 failed blocks:[]> -failing_zip.result # => [false, [1, nil], [nil, #]] -failing_zip.then { |v| 'never happens' }.result # => [false, [1, nil], [nil, #]] -failing_zip.rescue { |a, b| (a || b).message }.value - # => "boom" -failing_zip.chain { |success, values, reasons| [success, values.compact, reasons.compactß] }.value - # => nil - -### Delay - -# will not evaluate until asked by #value or other method requiring completion -future = Concurrent.delay { 'lazy' } - # => <#Concurrent::Edge::Future:0x7fcc731a1840 pending blocks:[]> -sleep 0.1 -future.completed? # => false -future.value # => "lazy" - -# propagates trough chain allowing whole or partial lazy chains - -head = Concurrent.delay { 1 } - # => <#Concurrent::Edge::Future:0x7fcc73193b28 pending blocks:[]> -branch1 = head.then(&:succ) - # => <#Concurrent::Edge::Future:0x7fcc73190900 pending blocks:[]> -branch2 = head.delay.then(&:succ) - # => <#Concurrent::Edge::Future:0x7fcc7318b400 pending blocks:[]> -join = branch1 & branch2 - # => <#Concurrent::Edge::Future:0x7fcc73180af0 pending blocks:[]> - -sleep 0.1 # nothing will complete # => 0 -[head, branch1, branch2, join].map(&:completed?) # => [false, false, false, false] - -branch1.value # => 2 -sleep 0.1 # forces only head to complete, branch 2 stays incomplete - # => 0 -[head, branch1, branch2, join].map(&:completed?) # => [true, true, false, false] - -join.value # => [2, 2] - - -### Flatting - -Concurrent.future { Concurrent.future { 1+1 } }.flat.value # waits for inner future - # => 2 - -# more complicated example -Concurrent.future { Concurrent.future { Concurrent.future { 1 + 1 } } }. - flat(1). - then { |f| f.then(&:succ) }. - flat(1).value # => 3 - - -### Schedule - -scheduled = Concurrent.schedule(0.1) { 1 } - # => <#Concurrent::Edge::Future:0x7fcc73143e48 pending blocks:[]> - -scheduled.completed? # => false -scheduled.value # available after 0.1sec # => 1 - -# and in chain -scheduled = Concurrent.delay { 1 }.schedule(0.1).then(&:succ) - # => <#Concurrent::Edge::Future:0x7fcc7313a758 pending blocks:[]> -# will not be scheduled until value is requested -sleep 0.1 -scheduled.value # returns after another 0.1sec # => 2 - - -### Completable Future and Event - -future = Concurrent.future - # => <#Concurrent::Edge::CompletableFuture:0x7fcc731286e8 pending blocks:[]> -event = Concurrent.event - # => <#Concurrent::Edge::CompletableEvent:0x7fcc73123058 pending blocks:[]> -# Don't forget to keep the reference, `Concurrent.future.then { |v| v }` is incompletable - -# will be blocked until completed -t1 = Thread.new { future.value } -t2 = Thread.new { event.wait } - -future.success 1 - # => <#Concurrent::Edge::CompletableFuture:0x7fcc731286e8 success blocks:[]> -future.success 1 rescue $! - # => # -future.try_success 2 # => false -event.complete - # => <#Concurrent::Edge::CompletableEvent:0x7fcc73123058 completed blocks:[]> - -[t1, t2].each &:join - - -### Callbacks - -queue = Queue.new # => # -future = Concurrent.delay { 1 + 1 } - # => <#Concurrent::Edge::Future:0x7fcc7310ab98 pending blocks:[]> - -future.on_success { queue << 1 } # evaluated asynchronously - # => <#Concurrent::Edge::Future:0x7fcc7310ab98 pending blocks:[]> -future.on_success! { queue << 2 } # evaluated on completing thread - # => <#Concurrent::Edge::Future:0x7fcc7310ab98 pending blocks:[]> - -queue.empty? # => true -future.value # => 2 -queue.pop # => 2 -queue.pop # => 1 - - -### Thread-pools - -Concurrent.future(:fast) { 2 }.then(:io) { File.read __FILE__ }.wait - # => <#Concurrent::Edge::Future:0x7fcc730f98e8 success blocks:[]> - - -### Interoperability with actors - -actor = Concurrent::Actor::Utils::AdHoc.spawn :square do - -> v { v ** 2 } -end - # => # - -Concurrent. - future { 2 }. - then_ask(actor). - then { |v| v + 2 }. - value # => 6 - -actor.ask(2).then(&:succ).value # => 5 - - -### Interoperability with channels - -ch1 = Concurrent::Channel.new # => # -ch2 = Concurrent::Channel.new # => # - -result = Concurrent.select(ch1, ch2) - # => <#Concurrent::Edge::CompletableFuture:0x7fcc730411a8 pending blocks:[]> -ch1.push 1 # => nil -result.value! - # => [1, #] - -Concurrent. - future { 1+1 }. - then_push(ch1) - # => <#Concurrent::Edge::Future:0x7fcc73032c98 pending blocks:[]> -result = Concurrent. - future { '%02d' }. - then_select(ch1, ch2). - then { |format, (value, channel)| format format, value } - # => <#Concurrent::Edge::Future:0x7fcc7302a4f8 pending blocks:[]> -result.value! # => "02" - - -### Common use-cases Examples - -# simple background processing -Concurrent.future { do_stuff } - # => <#Concurrent::Edge::Future:0x7fcc72123c48 pending blocks:[]> - -# parallel background processing -jobs = 10.times.map { |i| Concurrent.future { i } } -Concurrent.zip(*jobs).value # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - - -# periodic task -@end = false # => false - -def schedule_job - Concurrent.schedule(1) { do_stuff }. - rescue { |e| StandardError === e ? report_error(e) : raise(e) }. - then { schedule_job unless @end } -end # => :schedule_job - -schedule_job - # => <#Concurrent::Edge::Future:0x7fcc75011370 pending blocks:[]> -@end = true # => true - - -# How to limit processing where there are limited resources? -# By creating an actor managing the resource -DB = Concurrent::Actor::Utils::AdHoc.spawn :db do - data = Array.new(10) { |i| '*' * i } - lambda do |message| - # pretending that this queries a DB - data[message] - end -end - # => # - -concurrent_jobs = 11.times.map do |v| - Concurrent. - future { v }. - # ask the DB with the `v`, only one at the time, rest is parallel - then_ask(DB). - # get size of the string, fails for 11 - then(&:size). - rescue { |reason| reason.message } # translate error to value (exception, message) -end - -Concurrent.zip(*concurrent_jobs).value! - # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] - - -# In reality there is often a pool though: -data = Array.new(10) { |i| '*' * i } - # => ["", "*", "**", "***", "****", "*****", "******", "*******", "********", "*********"] -pool_size = 5 # => 5 - -DB_POOL = Concurrent::Actor::Utils::Pool.spawn!('DB-pool', pool_size) do |index| - # DB connection constructor - Concurrent::Actor::Utils::AdHoc.spawn(name: "worker-#{index}", args: [data]) do |data| - lambda do |message| - # pretending that this queries a DB - data[message] - end - end -end - # => # - -concurrent_jobs = 11.times.map do |v| - Concurrent. - future { v }. - # ask the DB_POOL with the `v`, only 5 at the time, rest is parallel - then_ask(DB_POOL). - then(&:size). - rescue { |reason| reason.message } -end - -Concurrent.zip(*concurrent_jobs).value! - # => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "undefined method `size' for nil:NilClass"] diff --git a/examples/init.rb b/examples/init.rb index c3ed8aafb..7166a4a76 100644 --- a/examples/init.rb +++ b/examples/init.rb @@ -3,3 +3,5 @@ def do_stuff :stuff end + +Concurrent.use_simple_logger Logger::DEBUG diff --git a/lib/concurrent-edge.rb b/lib/concurrent-edge.rb index f791f9164..59c5fa7d8 100644 --- a/lib/concurrent-edge.rb +++ b/lib/concurrent-edge.rb @@ -6,7 +6,11 @@ require 'concurrent/exchanger' require 'concurrent/lazy_register' -require 'concurrent/edge/future' -require 'concurrent/edge/lock_free_stack' require 'concurrent/edge/atomic_markable_reference' require 'concurrent/edge/lock_free_linked_set' +require 'concurrent/edge/lock_free_queue' +require 'concurrent/edge/lock_free_stack' + +require 'concurrent/edge/promises' +require 'concurrent/edge/cancellation' +require 'concurrent/edge/throttle' diff --git a/lib/concurrent/actor.rb b/lib/concurrent/actor.rb index 626a8762a..9d0d40694 100644 --- a/lib/concurrent/actor.rb +++ b/lib/concurrent/actor.rb @@ -1,7 +1,7 @@ require 'concurrent/configuration' require 'concurrent/executor/serialized_execution' require 'concurrent/synchronization' -require 'concurrent/edge/future' +require 'concurrent/edge/promises' module Concurrent # TODO https://github.com/celluloid/celluloid/wiki/Supervision-Groups ? @@ -34,8 +34,8 @@ def self.current Thread.current[:__current_actor__] end - @root = Concurrent.delay do - Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent.future).reference.tap do + @root = Concurrent::Promises.delay do + Core.new(parent: nil, name: '/', class: Root, initialized: future = Concurrent::Promises.resolvable_future).reference.tap do future.wait! end end @@ -65,16 +65,20 @@ def self.root # @param args see {.to_spawn_options} # @return [Reference] never the actual actor def self.spawn(*args, &block) + options = to_spawn_options(*args) + if options[:executor] && options[:executor].is_a?(ImmediateExecutor) + raise ArgumentError, 'ImmediateExecutor is not supported' + end if Actor.current - Core.new(to_spawn_options(*args).merge(parent: Actor.current), &block).reference + Core.new(options.merge(parent: Actor.current), &block).reference else - root.ask([:spawn, to_spawn_options(*args), block]).value! + root.ask([:spawn, options, block]).value! end end # as {.spawn} but it'll block until actor is initialized or it'll raise exception on error def self.spawn!(*args, &block) - spawn(to_spawn_options(*args).merge(initialized: future = Concurrent.future), &block).tap { future.wait! } + spawn(to_spawn_options(*args).merge(initialized: future = Concurrent::Promises.resolvable_future), &block).tap { future.wait! } end # @overload to_spawn_options(context_class, name, *args) diff --git a/lib/concurrent/actor/behaviour/sets_results.rb b/lib/concurrent/actor/behaviour/sets_results.rb index 8ec50e23c..97f0b47d7 100644 --- a/lib/concurrent/actor/behaviour/sets_results.rb +++ b/lib/concurrent/actor/behaviour/sets_results.rb @@ -1,7 +1,7 @@ module Concurrent module Actor module Behaviour - # Collects returning value and sets the CompletableFuture in the {Envelope} or error on failure. + # Collects returning value and sets the ResolvableFuture in the {Envelope} or error on failure. class SetResults < Abstract attr_reader :error_strategy @@ -13,7 +13,7 @@ def initialize(core, subsequent, core_options, error_strategy) def on_envelope(envelope) result = pass envelope if result != MESSAGE_PROCESSED && !envelope.future.nil? - envelope.future.success result + envelope.future.fulfill result log(DEBUG) { "finished processing of #{envelope.message.inspect}"} end nil @@ -29,7 +29,7 @@ def on_envelope(envelope) else raise end - envelope.future.fail error unless envelope.future.nil? + envelope.future.reject error unless envelope.future.nil? end end end diff --git a/lib/concurrent/actor/behaviour/termination.rb b/lib/concurrent/actor/behaviour/termination.rb index c27b65bf3..355c45901 100644 --- a/lib/concurrent/actor/behaviour/termination.rb +++ b/lib/concurrent/actor/behaviour/termination.rb @@ -14,8 +14,8 @@ class Termination < Abstract def initialize(core, subsequent, core_options, trapping = false, terminate_children = true) super core, subsequent, core_options - @terminated = Concurrent.future - @public_terminated = @terminated.hide_completable + @terminated = Concurrent::Promises.resolvable_future + @public_terminated = @terminated.with_hidden_resolvable @trapping = trapping @terminate_children = terminate_children end @@ -23,7 +23,7 @@ def initialize(core, subsequent, core_options, trapping = false, terminate_child # @note Actor rejects envelopes when terminated. # @return [true, false] if actor is terminated def terminated? - @terminated.completed? + @terminated.resolved? end def trapping? @@ -62,15 +62,15 @@ def on_envelope(envelope) def terminate!(reason = nil, envelope = nil) return true if terminated? - self_termination = Concurrent.completed_future(reason.nil?, reason.nil? || nil, reason) + self_termination = Concurrent::Promises.resolved_future(reason.nil?, reason.nil? || nil, reason) all_terminations = if @terminate_children - Concurrent.zip(*children.map { |ch| ch.ask(:terminate!) }, self_termination) + Concurrent::Promises.zip(*children.map { |ch| ch.ask(:terminate!) }, self_termination) else self_termination end - all_terminations.chain_completable(@terminated) - all_terminations.chain_completable(envelope.future) if envelope && envelope.future + all_terminations.chain_resolvable(@terminated) + all_terminations.chain_resolvable(envelope.future) if envelope && envelope.future broadcast(true, [:terminated, reason]) # TODO do not end up in Dead Letter Router parent << :remove_child if parent diff --git a/lib/concurrent/actor/core.rb b/lib/concurrent/actor/core.rb index c29173ac8..1546dfe0c 100644 --- a/lib/concurrent/actor/core.rb +++ b/lib/concurrent/actor/core.rb @@ -42,7 +42,7 @@ class Core < Synchronization::LockableObject # @option opts [Class] reference a custom descendant of {Reference} to use # @option opts [Array)>] behaviour_definition, array of pairs # where each pair is behaviour class and its args, see {Behaviour.basic_behaviour_definition} - # @option opts [CompletableFuture, nil] initialized, if present it'll be set or failed after {Context} initialization + # @option opts [ResolvableFuture, nil] initialized, if present it'll be set or failed after {Context} initialization # @option opts [Reference, nil] parent **private api** parent of the actor (the one spawning ) # @option opts [Proc, nil] logger a proc accepting (level, progname, message = nil, &block) params, # can be used to hook actor instance to any logging system, see {Concurrent::Concern::Logging} @@ -172,7 +172,6 @@ def ns_initialize(opts, &block) allocate_context @executor = Type! opts.fetch(:executor, @context.default_executor), Concurrent::AbstractExecutorService - raise ArgumentError, 'ImmediateExecutor is not supported' if @executor.is_a? ImmediateExecutor @reference = (Child! opts[:reference_class] || @context.default_reference_class, Reference).new self @name = (Type! opts.fetch(:name), String, Symbol).to_s @@ -192,17 +191,17 @@ def ns_initialize(opts, &block) @args = opts.fetch(:args, []) @block = block - initialized = Type! opts[:initialized], Edge::CompletableFuture, NilClass + initialized = Type! opts[:initialized], Promises::ResolvableFuture, NilClass schedule_execution do begin build_context - initialized.success reference if initialized + initialized.fulfill reference if initialized log DEBUG, 'spawned' rescue => ex log ERROR, ex @first_behaviour.terminate! - initialized.fail ex if initialized + initialized.reject ex if initialized end end end diff --git a/lib/concurrent/actor/envelope.rb b/lib/concurrent/actor/envelope.rb index fa80f111b..118234f57 100644 --- a/lib/concurrent/actor/envelope.rb +++ b/lib/concurrent/actor/envelope.rb @@ -16,7 +16,7 @@ class Envelope def initialize(message, future, sender, address) @message = message - @future = Type! future, Edge::CompletableFuture, NilClass + @future = Type! future, Promises::ResolvableFuture, NilClass @sender = Type! sender, Reference, Thread @address = Type! address, Reference end @@ -34,7 +34,7 @@ def address_path end def reject!(error) - future.fail error unless future.nil? + future.reject error unless future.nil? end end end diff --git a/lib/concurrent/actor/errors.rb b/lib/concurrent/actor/errors.rb index 9e736ba67..8ffb17f43 100644 --- a/lib/concurrent/actor/errors.rb +++ b/lib/concurrent/actor/errors.rb @@ -20,7 +20,7 @@ class UnknownMessage < Error def initialize(envelope) @envelope = Type! envelope, Envelope - super envelope.message.inspect + super "#{envelope.message.inspect} from #{envelope.sender_path}" end end end diff --git a/lib/concurrent/actor/reference.rb b/lib/concurrent/actor/reference.rb index 857c8ef04..64c86939c 100644 --- a/lib/concurrent/actor/reference.rb +++ b/lib/concurrent/actor/reference.rb @@ -45,13 +45,13 @@ def tell(message) # global_io_executor will block on while asking. It's fine to use it form outside of actors and # global_io_executor. # @param [Object] message - # @param [Edge::Future] future to be fulfilled be message's processing result - # @return [Edge::Future] supplied future + # @param [Promises::Future] future to be fulfilled be message's processing result + # @return [Promises::Future] supplied future # @example # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask(1).value # => 2 # adder.ask(nil).wait.reason # => # - def ask(message, future = Concurrent.future) + def ask(message, future = Concurrent::Promises.resolvable_future) message message, future end @@ -63,13 +63,13 @@ def ask(message, future = Concurrent.future) # global_io_executor will block on while asking. It's fine to use it form outside of actors and # global_io_executor. # @param [Object] message - # @param [Edge::Future] future to be fulfilled be message's processing result + # @param [Promises::Future] future to be fulfilled be message's processing result # @return [Object] message's processing result - # @raise [Exception] future.reason if future is #failed? + # @raise [Exception] future.reason if future is #rejected? # @example # adder = AdHoc.spawn('adder') { -> message { message + 1 } } # adder.ask!(1) # => 2 - def ask!(message, future = Concurrent.future) + def ask!(message, future = Concurrent::Promises.resolvable_future) ask(message, future).value! end @@ -80,7 +80,7 @@ def map(messages) # behaves as {#tell} when no future and as {#ask} when future def message(message, future = nil) core.on_envelope Envelope.new(message, future, Actor.current || Thread.current, self) - return future ? future.hide_completable : self + return future ? future.with_hidden_resolvable : self end # @see AbstractContext#dead_letter_routing diff --git a/lib/concurrent/actor/utils/pool.rb b/lib/concurrent/actor/utils/pool.rb index a5ced2b57..b78e37223 100644 --- a/lib/concurrent/actor/utils/pool.rb +++ b/lib/concurrent/actor/utils/pool.rb @@ -43,9 +43,9 @@ def on_message(message) envelope_to_redirect = if envelope.future envelope else - Envelope.new(envelope.message, Concurrent.future, envelope.sender, envelope.address) + Envelope.new(envelope.message, Concurrent::Promises.future, envelope.sender, envelope.address) end - envelope_to_redirect.future.on_completion! { @balancer << :subscribe } # TODO check safety of @balancer reading + envelope_to_redirect.future.on_fulfillment! { @balancer << :subscribe } # TODO check safety of @balancer reading redirect @balancer, envelope_to_redirect end end diff --git a/lib/concurrent/atomic/atomic_fixnum.rb b/lib/concurrent/atomic/atomic_fixnum.rb index 1c2b0726a..cf93f7c06 100644 --- a/lib/concurrent/atomic/atomic_fixnum.rb +++ b/lib/concurrent/atomic/atomic_fixnum.rb @@ -129,5 +129,9 @@ module Concurrent # # @!macro atomic_fixnum_public_api class AtomicFixnum < AtomicFixnumImplementation + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x value:%s>', self.class, object_id << 1, get + end end end diff --git a/lib/concurrent/atomic/atomic_reference.rb b/lib/concurrent/atomic/atomic_reference.rb index 46dbbdf1b..ba935a2d8 100644 --- a/lib/concurrent/atomic/atomic_reference.rb +++ b/lib/concurrent/atomic/atomic_reference.rb @@ -40,3 +40,10 @@ class Concurrent::AtomicReference < Concurrent::CAtomicReference class Concurrent::AtomicReference < Concurrent::MutexAtomicReference end end + +class Concurrent::AtomicReference + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x value:%s>', self.class, object_id << 1, get + end +end diff --git a/lib/concurrent/atomic/ruby_thread_local_var.rb b/lib/concurrent/atomic/ruby_thread_local_var.rb index 4ec041e27..06afae731 100644 --- a/lib/concurrent/atomic/ruby_thread_local_var.rb +++ b/lib/concurrent/atomic/ruby_thread_local_var.rb @@ -29,8 +29,8 @@ class RubyThreadLocalVar < AbstractThreadLocalVar # array, so we don't leak memory # @!visibility private - FREE = [] - LOCK = Mutex.new + FREE = [] + LOCK = Mutex.new ARRAYS = {} # used as a hash set @@next = 0 private_constant :FREE, :LOCK, :ARRAYS @@ -72,9 +72,9 @@ def value=(value) def allocate_storage @index = LOCK.synchronize do FREE.pop || begin - result = @@next - @@next += 1 - result + result = @@next + @@next += 1 + result end end ObjectSpace.define_finalizer(self, self.class.threadlocal_finalizer(@index)) @@ -83,13 +83,15 @@ def allocate_storage # @!visibility private def self.threadlocal_finalizer(index) proc do - LOCK.synchronize do - FREE.push(index) - # The cost of GC'ing a TLV is linear in the number of threads using TLVs - # But that is natural! More threads means more storage is used per TLV - # So naturally more CPU time is required to free more storage - ARRAYS.each_value do |array| - array[index] = nil + Thread.new do # avoid error: can't be called from trap context + LOCK.synchronize do + FREE.push(index) + # The cost of GC'ing a TLV is linear in the number of threads using TLVs + # But that is natural! More threads means more storage is used per TLV + # So naturally more CPU time is required to free more storage + ARRAYS.each_value do |array| + array[index] = nil + end end end end @@ -98,10 +100,12 @@ def self.threadlocal_finalizer(index) # @!visibility private def self.thread_finalizer(array) proc do - LOCK.synchronize do - # The thread which used this thread-local array is now gone - # So don't hold onto a reference to the array (thus blocking GC) - ARRAYS.delete(array.object_id) + Thread.new do # avoid error: can't be called from trap context + LOCK.synchronize do + # The thread which used this thread-local array is now gone + # So don't hold onto a reference to the array (thus blocking GC) + ARRAYS.delete(array.object_id) + end end end end diff --git a/lib/concurrent/atomic/thread_local_var.rb b/lib/concurrent/atomic/thread_local_var.rb index e64a2d268..f86de15cc 100644 --- a/lib/concurrent/atomic/thread_local_var.rb +++ b/lib/concurrent/atomic/thread_local_var.rb @@ -11,7 +11,7 @@ module Concurrent # Creates a thread local variable. # # @param [Object] default the default value when otherwise unset - # @param [Proc] block Optional block that gets called to obtain the + # @param [Proc] default_block Optional block that gets called to obtain the # default value for each thread # @!macro [new] thread_local_var_method_get @@ -72,28 +72,28 @@ module Concurrent # the current thread will ever see that change. # # @!macro thread_safe_variable_comparison - # + # # @example # v = ThreadLocalVar.new(14) # v.value #=> 14 # v.value = 2 # v.value #=> 2 - # + # # @example # v = ThreadLocalVar.new(14) - # + # # t1 = Thread.new do # v.value #=> 14 # v.value = 1 # v.value #=> 1 # end - # + # # t2 = Thread.new do # v.value #=> 14 # v.value = 2 # v.value #=> 2 # end - # + # # v.value #=> 14 # # @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal diff --git a/lib/concurrent/atomic_reference/jruby+truffle.rb b/lib/concurrent/atomic_reference/jruby+truffle.rb index 75c675078..ebb52dc38 100644 --- a/lib/concurrent/atomic_reference/jruby+truffle.rb +++ b/lib/concurrent/atomic_reference/jruby+truffle.rb @@ -1 +1,2 @@ -require 'concurrent/atomic_reference/mutex_atomic' +require 'atomic' +require 'concurrent/atomic_reference/rbx' diff --git a/lib/concurrent/collection/map/non_concurrent_map_backend.rb b/lib/concurrent/collection/map/non_concurrent_map_backend.rb index 1c9aa8984..ba86d7c0f 100644 --- a/lib/concurrent/collection/map/non_concurrent_map_backend.rb +++ b/lib/concurrent/collection/map/non_concurrent_map_backend.rb @@ -95,6 +95,7 @@ def clear end def each_pair + return enum_for :each_pair unless block_given? dupped_backend.each_pair do |k, v| yield k, v end diff --git a/lib/concurrent/configuration.rb b/lib/concurrent/configuration.rb index 9b3953757..b4774bfce 100644 --- a/lib/concurrent/configuration.rb +++ b/lib/concurrent/configuration.rb @@ -10,46 +10,46 @@ module Concurrent extend Concern::Logging - autoload :Options, 'concurrent/options' - autoload :TimerSet, 'concurrent/executor/timer_set' + autoload :Options, 'concurrent/options' + autoload :TimerSet, 'concurrent/executor/timer_set' autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor' # @return [Logger] Logger with provided level and output. - def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr) - logger = Logger.new(output) - logger.level = level - logger.formatter = lambda do |severity, datetime, progname, msg| - formatted_message = case msg + def self.create_simple_logger(level = Logger::FATAL, output = $stderr) + # TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking + lambda do |severity, progname, message = nil, &block| + return false if severity < level + + message = block ? block.call : message + formatted_message = case message when String - msg + message when Exception format "%s (%s)\n%s", - msg.message, msg.class, (msg.backtrace || []).join("\n") + message.message, message.class, (message.backtrace || []).join("\n") else - msg.inspect + message.inspect end - format "[%s] %5s -- %s: %s\n", - datetime.strftime('%Y-%m-%d %H:%M:%S.%L'), - severity, - progname, - formatted_message - end - lambda do |loglevel, progname, message = nil, &block| - logger.add loglevel, message, progname, &block + output.print format "[%s] %5s -- %s: %s\n", + Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'), + Logger::SEV_LABEL[severity], + progname, + formatted_message + true end end - # Use logger created by #create_stdlib_logger to log concurrent-ruby messages. - def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr) - Concurrent.global_logger = create_stdlib_logger level, output + # Use logger created by #create_simple_logger to log concurrent-ruby messages. + def self.use_simple_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_simple_logger level, output end # Suppresses all output when used for logging. NULL_LOGGER = lambda { |level, progname, message = nil, &block| } # @!visibility private - GLOBAL_LOGGER = AtomicReference.new(create_stdlib_logger(Logger::WARN)) + GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN)) private_constant :GLOBAL_LOGGER def self.global_logger @@ -131,23 +131,23 @@ def self.executor(executor_identifier) def self.new_fast_executor(opts = {}) FixedThreadPool.new( - [2, Concurrent.processor_count].max, - auto_terminate: opts.fetch(:auto_terminate, true), - idletime: 60, # 1 minute - max_queue: 0, # unlimited - fallback_policy: :abort # shouldn't matter -- 0 max queue + [2, Concurrent.processor_count].max, + auto_terminate: opts.fetch(:auto_terminate, true), + idletime: 60, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :abort # shouldn't matter -- 0 max queue ) end def self.new_io_executor(opts = {}) ThreadPoolExecutor.new( - min_threads: [2, Concurrent.processor_count].max, - max_threads: ThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE, - # max_threads: 1000, - auto_terminate: opts.fetch(:auto_terminate, true), - idletime: 60, # 1 minute - max_queue: 0, # unlimited - fallback_policy: :abort # shouldn't matter -- 0 max queue + min_threads: [2, Concurrent.processor_count].max, + max_threads: ThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE, + # max_threads: 1000, + auto_terminate: opts.fetch(:auto_terminate, true), + idletime: 60, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :abort # shouldn't matter -- 0 max queue ) end end diff --git a/lib/concurrent/edge/cancellation.rb b/lib/concurrent/edge/cancellation.rb new file mode 100644 index 000000000..30ebf9b33 --- /dev/null +++ b/lib/concurrent/edge/cancellation.rb @@ -0,0 +1,137 @@ +module Concurrent + + # Provides tools for cooperative cancellation. + # Inspired by https://msdn.microsoft.com/en-us/library/dd537607(v=vs.110).aspx + # @example + # # Create new cancellation. `cancellation` is used for cancelling, `token` is passed down to + # # tasks for cooperative cancellation + # cancellation, token = Concurrent::Cancellation.create + # Thread.new(token) do |token| + # # Count 1+1 (simulating some other meaningful work) repeatedly until the token is cancelled through + # # cancellation. + # token.loop_until_canceled { 1+1 } + # end + # sleep 0.1 + # cancellation.cancel # Stop the thread by cancelling + class Cancellation < Synchronization::Object + safe_initialization! + + # Creates the cancellation object. Returns both the cancellation and the token for convenience. + # @param [Object] resolve_args resolve_args Arguments which are used when resolve method is called on + # resolvable_future_or_event + # @param [Promises::Resolvable] resolvable_future_or_event resolvable used to track cancellation. + # Can be retrieved by `token.to_future` ot `token.to_event`. + # @example + # cancellation, token = Concurrent::Cancellation.create + # @return [Array(Cancellation, Cancellation::Token)] + def self.create(resolvable_future_or_event = Promises.resolvable_event, *resolve_args) + cancellation = new(resolvable_future_or_event, *resolve_args) + [cancellation, cancellation.token] + end + + private_class_method :new + + # Returns the token associated with the cancellation. + # @return [Token] + def token + @Token + end + + # Cancel this cancellation. All executions depending on the token will cooperatively stop. + # @return [true, false] + # @raise when cancelling for the second tim + def cancel(raise_on_repeated_call = true) + !!@Cancel.resolve(*@ResolveArgs, raise_on_repeated_call) + end + + # Is the cancellation cancelled? + # @return [true, false] + def canceled? + @Cancel.resolved? + end + + # Short string representation. + # @return [String] + def to_s + format '<#%s:0x%x canceled:%s>', self.class, object_id << 1, canceled? + end + + alias_method :inspect, :to_s + + private + + def initialize(future, *resolve_args) + raise ArgumentError, 'future is not Resolvable' unless future.is_a?(Promises::Resolvable) + @Cancel = future + @Token = Token.new @Cancel.with_hidden_resolvable + @ResolveArgs = resolve_args + end + + # Created through {Cancellation.create}, passed down to tasks to be able to check if canceled. + class Token < Synchronization::Object + safe_initialization! + + # @return [Event] Event which will be resolved when the token is cancelled. + def to_event + @Cancel.to_event + end + + # @return [Future] Future which will be resolved when the token is cancelled with arguments passed in + # {Cancellation.create} . + def to_future + @Cancel.to_future + end + + # Is the token cancelled? + # @return [true, false] + def canceled? + @Cancel.resolved? + end + + # Repeatedly evaluates block until the token is {#canceled?}. + # @yield to the block repeatedly. + # @yieldreturn [Object] + # @return [Object] last result of the block + def loop_until_canceled(&block) + until canceled? + result = block.call + end + result + end + + # Raise error when cancelled + # @param [#exception] error to be risen + # @raise the error + # @return [self] + def raise_if_canceled(error = CancelledOperationError) + raise error if canceled? + self + end + + # Creates a new token which is cancelled when any of the tokens is. + # @param [Token] tokens to combine + # @return [Token] new token + def join(*tokens, &block) + block ||= -> tokens { Promises.any_event(*tokens.map(&:to_event)) } + self.class.new block.call([@Cancel, *tokens]) + end + + # Short string representation. + # @return [String] + def to_s + format '<#%s:0x%x canceled:%s>', self.class, object_id << 1, canceled? + end + + alias_method :inspect, :to_s + + private + + def initialize(cancel) + @Cancel = cancel + end + end + + # FIXME (pitr-ch 27-Mar-2016): cooperation with mutex, condition, select etc? + # TODO (pitr-ch 27-Mar-2016): examples (scheduled to be cancelled in 10 sec) + end +end diff --git a/lib/concurrent/edge/future.rb b/lib/concurrent/edge/future.rb deleted file mode 100644 index a63a881df..000000000 --- a/lib/concurrent/edge/future.rb +++ /dev/null @@ -1,1427 +0,0 @@ -require 'concurrent' # TODO do not require whole concurrent gem -require 'concurrent/concern/deprecation' -require 'concurrent/edge/lock_free_stack' - - -# @note different name just not to collide for now -module Concurrent - module Edge - - # Provides edge features, which will be added to or replace features in main gem. - # - # Contains new unified implementation of Futures and Promises which combines Features of previous `Future`, - # `Promise`, `IVar`, `Event`, `Probe`, `dataflow`, `Delay`, `TimerTask` into single framework. It uses extensively - # new synchronization layer to make all the paths lock-free with exception of blocking threads on `#wait`. - # It offers better performance and does not block threads (exception being #wait and similar methods where it's - # intended). - # - # ## Examples - # {include:file:examples/edge_futures.out.rb} - # - # @!macro edge_warning - module FutureShortcuts - # User is responsible for completing the event once by {Edge::CompletableEvent#complete} - # @return [CompletableEvent] - def event(default_executor = :io) - CompletableEventPromise.new(default_executor).future - end - - # @overload future(default_executor = :io, &task) - # Constructs new Future which will be completed after block is evaluated on executor. Evaluation begins immediately. - # @return [Future] - # @overload future(default_executor = :io) - # User is responsible for completing the future once by {Edge::CompletableFuture#success} or {Edge::CompletableFuture#fail} - # @return [CompletableFuture] - def future(default_executor = :io, &task) - if task - ImmediateEventPromise.new(default_executor).future.then(&task) - else - CompletableFuturePromise.new(default_executor).future - end - end - - # @return [Future] which is already completed - def completed_future(success, value, reason, default_executor = :io) - ImmediateFuturePromise.new(default_executor, success, value, reason).future - end - - # @return [Future] which is already completed in success state with value - def succeeded_future(value, default_executor = :io) - completed_future true, value, nil, default_executor - end - - # @return [Future] which is already completed in failed state with reason - def failed_future(reason, default_executor = :io) - completed_future false, nil, reason, default_executor - end - - # @return [Event] which is already completed - def completed_event(default_executor = :io) - ImmediateEventPromise.new(default_executor).event - end - - alias_method :async, :future - - # Constructs new Future which will evaluate to the block after - # requested by calling `#wait`, `#value`, `#value!`, etc. on it or on any of the chained futures. - # @return [Future] - def delay(default_executor = :io, &task) - DelayPromise.new(default_executor).future.then(&task) - end - - # Schedules the block to be executed on executor in given intended_time. - # @param [Numeric, Time] intended_time Numeric => run in `intended_time` seconds. Time => eun on time. - # @return [Future] - def schedule(intended_time, default_executor = :io, &task) - ScheduledPromise.new(default_executor, intended_time).future.then(&task) - end - - # Constructs new {Future} which is completed after all futures_and_or_events are complete. Its value is array - # of dependent future values. If there is an error it fails with the first one. Event does not - # have a value so it's represented by nil in the array of values. - # @param [Event] futures_and_or_events - # @return [Future] - def zip_futures(*futures_and_or_events) - ZipFuturesPromise.new(futures_and_or_events, :io).future - end - - alias_method :zip, :zip_futures - - # Constructs new {Event} which is completed after all futures_and_or_events are complete - # (Future is completed when Success or Failed). - # @param [Event] futures_and_or_events - # @return [Event] - def zip_events(*futures_and_or_events) - ZipEventsPromise.new(futures_and_or_events, :io).future - end - - # Constructs new {Future} which is completed after first of the futures is complete. - # @param [Event] futures - # @return [Future] - def any_complete(*futures) - AnyCompletePromise.new(futures, :io).future - end - - alias_method :any, :any_complete - - # Constructs new {Future} which becomes succeeded after first of the futures succeedes or - # failed if all futures fail (reason is last error). - # @param [Event] futures - # @return [Future] - def any_successful(*futures) - AnySuccessfulPromise.new(futures, :io).future - end - - # only proof of concept - # @return [Future] - def select(*channels) - future do - # noinspection RubyArgCount - Channel.select do |s| - channels.each do |ch| - s.take(ch) { |value| [value, ch] } - end - end - end - end - - # post job on :fast executor - # @return [true, false] - def post!(*args, &job) - post_on(:fast, *args, &job) - end - - # post job on :io executor - # @return [true, false] - def post(*args, &job) - post_on(:io, *args, &job) - end - - # post job on executor - # @return [true, false] - def post_on(executor, *args, &job) - Concurrent.executor(executor).post(*args, &job) - end - - # TODO add first(futures, count=count) - # TODO allow to to have a zip point for many futures and process them in batches by 10 - end - - # Represents an event which will happen in future (will be completed). It has to always happen. - class Event < Synchronization::Object - safe_initialization! - private(*attr_atomic(:internal_state)) - # @!visibility private - public :internal_state - include Concern::Deprecation - include Concern::Logging - - # @!visibility private - class State - def completed? - raise NotImplementedError - end - - def to_sym - raise NotImplementedError - end - end - - # @!visibility private - class Pending < State - def completed? - false - end - - def to_sym - :pending - end - end - - # @!visibility private - class Completed < State - def completed? - true - end - - def to_sym - :completed - end - end - - # @!visibility private - PENDING = Pending.new - # @!visibility private - COMPLETED = Completed.new - - def initialize(promise, default_executor) - super() - @Lock = Mutex.new - @Condition = ConditionVariable.new - @Promise = promise - @DefaultExecutor = default_executor - @Touched = AtomicBoolean.new false - @Callbacks = LockFreeStack.new - @Waiters = AtomicFixnum.new 0 - self.internal_state = PENDING - end - - # @return [:pending, :completed] - def state - internal_state.to_sym - end - - # Is Event/Future pending? - # @return [Boolean] - def pending?(state = internal_state) - !state.completed? - end - - def unscheduled? - raise 'unsupported' - end - - alias_method :incomplete?, :pending? - - # Has the Event been completed? - # @return [Boolean] - def completed?(state = internal_state) - state.completed? - end - - alias_method :complete?, :completed? - - # Wait until Event is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @return [Event, true, false] self or true/false if timeout is used - # @!macro [attach] edge.periodical_wait - # @note a thread should wait only once! For repeated checking use faster `completed?` check. - # If thread waits periodically it will dangerously grow the waiters stack. - def wait(timeout = nil) - touch - result = wait_until_complete(timeout) - timeout ? result : self - end - - # @!visibility private - def touch - # distribute touch to promise only once - @Promise.touch if @Touched.make_true - self - end - - # @return [Executor] current default executor - # @see #with_default_executor - def default_executor - @DefaultExecutor - end - - # @yield [success, value, reason] of the parent - def chain(executor = nil, &callback) - ChainPromise.new(self, @DefaultExecutor, executor || @DefaultExecutor, &callback).future - end - - alias_method :then, :chain - - def chain_completable(completable_event) - on_completion! { completable_event.complete_with COMPLETED } - end - - alias_method :tangle, :chain_completable - - # Zip with future producing new Future - # @return [Event] - def zip(other) - if other.is?(Future) - ZipFutureEventPromise.new(other, self, @DefaultExecutor).future - else - ZipEventEventPromise.new(self, other, @DefaultExecutor).future - end - end - - alias_method :&, :zip - - # Inserts delay into the chain of Futures making rest of it lazy evaluated. - # @return [Event] - def delay - ZipEventEventPromise.new(self, DelayPromise.new(@DefaultExecutor).event, @DefaultExecutor).event - end - - # # Schedules rest of the chain for execution with specified time or on specified time - # # @return [Event] - # def schedule(intended_time) - # chain do - # ZipEventEventPromise.new(self, - # ScheduledPromise.new(@DefaultExecutor, intended_time).event, - # @DefaultExecutor).event - # end.flat - # end - - # Zips with selected value form the suplied channels - # @return [Future] - def then_select(*channels) - ZipFutureEventPromise(Concurrent.select(*channels), self, @DefaultExecutor).future - end - - # @yield [success, value, reason] executed async on `executor` when completed - # @return self - def on_completion(executor = nil, &callback) - add_callback :async_callback_on_completion, executor || @DefaultExecutor, callback - end - - # @yield [success, value, reason] executed sync when completed - # @return self - def on_completion!(&callback) - add_callback :callback_on_completion, callback - end - - # Changes default executor for rest of the chain - # @return [Event] - def with_default_executor(executor) - EventWrapperPromise.new(self, executor).future - end - - def to_s - "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state.to_sym}>" - end - - def inspect - "#{to_s[0..-2]} blocks:[#{blocks.map(&:to_s).join(', ')}]>" - end - - def set(*args, &block) - raise 'Use CompletableEvent#complete or CompletableFuture#complete instead, ' + - 'constructed by Concurrent.event or Concurrent.future respectively.' - end - - # @!visibility private - def complete_with(state, raise_on_reassign = true) - if compare_and_set_internal_state(PENDING, state) - # go to synchronized block only if there were waiting threads - @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 - call_callbacks - else - Concurrent::MultipleAssignmentError.new('Event can be completed only once') if raise_on_reassign - return false - end - self - end - - # @!visibility private - # just for inspection - # @return [Array] - def blocks - @Callbacks.each_with_object([]) do |callback, promises| - promises.push(*(callback.select { |v| v.is_a? AbstractPromise })) - end - end - - # @!visibility private - # just for inspection - def callbacks - @Callbacks.each.to_a - end - - # @!visibility private - def add_callback(method, *args) - if completed? - call_callback method, *args - else - @Callbacks.push [method, *args] - call_callbacks if completed? - end - self - end - - # @!visibility private - # only for inspection - def promise - @Promise - end - - # @!visibility private - # only for inspection - def touched - @Touched.value - end - - # @!visibility private - # only for debugging inspection - def waiting_threads - @Waiters.each.to_a - end - - private - - # @return [true, false] - def wait_until_complete(timeout) - return true if completed? - - @Lock.synchronize do - @Waiters.increment - begin - unless completed? - @Condition.wait @Lock, timeout - end - ensure - # JRuby may raise ConcurrencyError - @Waiters.decrement - end - end - completed? - end - - def with_async(executor, *args, &block) - Concurrent.post_on(executor, *args, &block) - end - - def async_callback_on_completion(executor, callback) - with_async(executor) { callback_on_completion callback } - end - - def callback_on_completion(callback) - callback.call - end - - def callback_notify_blocked(promise) - promise.on_done self - end - - def call_callback(method, *args) - self.send method, *args - end - - def call_callbacks - method, *args = @Callbacks.pop - while method - call_callback method, *args - method, *args = @Callbacks.pop - end - end - end - - # Represents a value which will become available in future. May fail with a reason instead. - class Future < Event - # @!visibility private - class CompletedWithResult < Completed - def result - [success?, value, reason] - end - - def success? - raise NotImplementedError - end - - def value - raise NotImplementedError - end - - def reason - raise NotImplementedError - end - end - - # @!visibility private - class Success < CompletedWithResult - def initialize(value) - @Value = value - end - - def success? - true - end - - def apply(block) - block.call value - end - - def value - @Value - end - - def reason - nil - end - - def to_sym - :success - end - end - - # @!visibility private - class SuccessArray < Success - def apply(block) - block.call(*value) - end - end - - # @!visibility private - class Failed < CompletedWithResult - def initialize(reason) - @Reason = reason - end - - def success? - false - end - - def value - nil - end - - def reason - @Reason - end - - def to_sym - :failed - end - - def apply(block) - block.call reason - end - end - - # @!visibility private - class PartiallyFailed < CompletedWithResult - def initialize(value, reason) - super() - @Value = value - @Reason = reason - end - - def success? - false - end - - def to_sym - :failed - end - - def value - @Value - end - - def reason - @Reason - end - - def apply(block) - block.call(*reason) - end - end - - # @!method state - # @return [:pending, :success, :failed] - - # Has Future been success? - # @return [Boolean] - def success?(state = internal_state) - state.completed? && state.success? - end - - def fulfilled? - deprecated_method 'fulfilled?', 'success?' - success? - end - - # Has Future been failed? - # @return [Boolean] - def failed?(state = internal_state) - state.completed? && !state.success? - end - - def rejected? - deprecated_method 'rejected?', 'failed?' - failed? - end - - # @return [Object, nil] the value of the Future when success, nil on timeout - # @!macro [attach] edge.timeout_nil - # @note If the Future can have value `nil` then it cannot be distinquished from `nil` returned on timeout. - # In this case is better to use first `wait` then `value` (or similar). - # @!macro edge.periodical_wait - def value(timeout = nil) - touch - internal_state.value if wait_until_complete timeout - end - - # @return [Exception, nil] the reason of the Future's failure - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait - def reason(timeout = nil) - touch - internal_state.reason if wait_until_complete timeout - end - - # @return [Array(Boolean, Object, Exception), nil] triplet of success, value, reason - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait - def result(timeout = nil) - touch - internal_state.result if wait_until_complete timeout - end - - # Wait until Future is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @raise reason on failure - # @return [Event, true, false] self or true/false if timeout is used - # @!macro edge.periodical_wait - def wait!(timeout = nil) - touch - result = wait_until_complete!(timeout) - timeout ? result : self - end - - # Wait until Future is #complete? - # @param [Numeric] timeout the maximum time in second to wait. - # @raise reason on failure - # @return [Object, nil] - # @!macro edge.timeout_nil - # @!macro edge.periodical_wait - def value!(timeout = nil) - touch - internal_state.value if wait_until_complete! timeout - end - - # @example allows failed Future to be risen - # raise Concurrent.future.fail - def exception(*args) - raise 'obligation is not failed' unless failed? - reason = internal_state.reason - if reason.is_a?(::Array) - reason.each { |e| log ERROR, 'Edge::Future', e } - Concurrent::Error.new 'multiple exceptions, inspect log' - else - reason.exception(*args) - end - end - - # @yield [value] executed only on parent success - # @return [Future] - def then(executor = nil, &callback) - ThenPromise.new(self, @DefaultExecutor, executor || @DefaultExecutor, &callback).future - end - - # Asks the actor with its value. - # @return [Future] new future with the response form the actor - def then_ask(actor) - self.then { |v| actor.ask(v) }.flat - end - - def chain_completable(completable_future) - on_completion! { completable_future.complete_with internal_state } - end - - alias_method :tangle, :chain_completable - - # @yield [reason] executed only on parent failure - # @return [Future] - def rescue(executor = nil, &callback) - RescuePromise.new(self, @DefaultExecutor, executor || @DefaultExecutor, &callback).future - end - - # zips with the Future in the value - # @example - # Concurrent.future { Concurrent.future { 1 } }.flat.value # => 1 - def flat(level = 1) - FlatPromise.new(self, level, @DefaultExecutor).future - end - - # @return [Future] which has first completed value from futures - def any(*futures) - AnyCompletePromise.new([self, *futures], @DefaultExecutor).future - end - - # Inserts delay into the chain of Futures making rest of it lazy evaluated. - # @return [Future] - def delay - ZipFutureEventPromise.new(self, DelayPromise.new(@DefaultExecutor).future, @DefaultExecutor).future - end - - # Schedules rest of the chain for execution with specified time or on specified time - # @return [Future] - def schedule(intended_time) - chain do - ZipFutureEventPromise.new(self, - ScheduledPromise.new(@DefaultExecutor, intended_time).event, - @DefaultExecutor).future - end.flat - end - - # Zips with selected value form the suplied channels - # @return [Future] - def then_select(*channels) - ZipFuturesPromise.new([self, Concurrent.select(*channels)], @DefaultExecutor).future - end - - # Changes default executor for rest of the chain - # @return [Future] - def with_default_executor(executor) - FutureWrapperPromise.new(self, executor).future - end - - # Zip with future producing new Future - # @return [Future] - def zip(other) - if other.is_a?(Future) - ZipFutureFuturePromise.new(self, other, @DefaultExecutor).future - else - ZipFutureEventPromise.new(self, other, @DefaultExecutor).future - end - end - - alias_method :&, :zip - - alias_method :|, :any - - # @note may block - # @note only proof of concept - def then_put(channel) - on_success(:io) { |value| channel.put value } - end - - # @yield [value] executed async on `executor` when success - # @return self - def on_success(executor = nil, &callback) - add_callback :async_callback_on_success, executor || @DefaultExecutor, callback - end - - # @yield [reason] executed async on `executor` when failed? - # @return self - def on_failure(executor = nil, &callback) - add_callback :async_callback_on_failure, executor || @DefaultExecutor, callback - end - - # @yield [value] executed sync when success - # @return self - def on_success!(&callback) - add_callback :callback_on_success, callback - end - - # @yield [reason] executed sync when failed? - # @return self - def on_failure!(&callback) - add_callback :callback_on_failure, callback - end - - # @!visibility private - def complete_with(state, raise_on_reassign = true) - if compare_and_set_internal_state(PENDING, state) - # go to synchronized block only if there were waiting threads - @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 - call_callbacks state - else - if raise_on_reassign - log ERROR, 'Edge::Future', reason if reason # print otherwise hidden error - raise(Concurrent::MultipleAssignmentError.new( - "Future can be completed only once. Current result is #{result}, " + - "trying to set #{state.result}")) - end - return false - end - self - end - - # @!visibility private - def add_callback(method, *args) - state = internal_state - if completed?(state) - call_callback method, state, *args - else - @Callbacks.push [method, *args] - state = internal_state - # take back if it was completed in the meanwhile - call_callbacks state if completed?(state) - end - self - end - - # @!visibility private - def apply(block) - internal_state.apply block - end - - private - - def wait_until_complete!(timeout = nil) - result = wait_until_complete(timeout) - raise self if failed? - result - end - - def call_callbacks(state) - method, *args = @Callbacks.pop - while method - call_callback method, state, *args - method, *args = @Callbacks.pop - end - end - - def call_callback(method, state, *args) - self.send method, state, *args - end - - def async_callback_on_success(state, executor, callback) - with_async(executor, state, callback) do |st, cb| - callback_on_success st, cb - end - end - - def async_callback_on_failure(state, executor, callback) - with_async(executor, state, callback) do |st, cb| - callback_on_failure st, cb - end - end - - def callback_on_success(state, callback) - state.apply callback if state.success? - end - - def callback_on_failure(state, callback) - state.apply callback unless state.success? - end - - def callback_on_completion(state, callback) - callback.call state.result - end - - def callback_notify_blocked(state, promise) - super(promise) - end - - def async_callback_on_completion(state, executor, callback) - with_async(executor, state, callback) do |st, cb| - callback_on_completion st, cb - end - end - - end - - # A Event which can be completed by user. - class CompletableEvent < Event - # Complete the Event, `raise` if already completed - def complete(raise_on_reassign = true) - complete_with COMPLETED, raise_on_reassign - end - - def hide_completable - EventWrapperPromise.new(self, @DefaultExecutor).event - end - end - - # A Future which can be completed by user. - class CompletableFuture < Future - # Complete the future with triplet od `success`, `value`, `reason` - # `raise` if already completed - # return [self] - def complete(success, value, reason, raise_on_reassign = true) - complete_with(success ? Success.new(value) : Failed.new(reason), raise_on_reassign) - end - - # Complete the future with value - # return [self] - def success(value) - promise.success(value) - end - - # Try to complete the future with value - # return [self] - def try_success(value) - promise.try_success(value) - end - - # Fail the future with reason - # return [self] - def fail(reason = StandardError.new) - promise.fail(reason) - end - - # Try to fail the future with reason - # return [self] - def try_fail(reason = StandardError.new) - promise.try_fail(reason) - end - - # Evaluate the future to value if there is an exception the future fails with it - # return [self] - def evaluate_to(*args, &block) - promise.evaluate_to(*args, block) - end - - # Evaluate the future to value if there is an exception the future fails with it - # @raise the exception - # return [self] - def evaluate_to!(*args, &block) - promise.evaluate_to!(*args, block) - end - - def hide_completable - FutureWrapperPromise.new(self, @DefaultExecutor).future - end - end - - # @abstract - # @!visibility private - class AbstractPromise < Synchronization::Object - safe_initialization! - include Concern::Logging - - def initialize(future) - super() - @Future = future - end - - def future - @Future - end - - alias_method :event, :future - - def default_executor - future.default_executor - end - - def state - future.state - end - - def touch - end - - def to_s - "<##{self.class}:0x#{'%x' % (object_id << 1)} #{state}>" - end - - def inspect - to_s - end - - private - - def complete_with(new_state, raise_on_reassign = true) - @Future.complete_with(new_state, raise_on_reassign) - end - - # @return [Future] - def evaluate_to(*args, block) - complete_with Future::Success.new(block.call(*args)) - rescue StandardError => error - complete_with Future::Failed.new(error) - rescue Exception => error - log(ERROR, 'Edge::Future', error) - complete_with Future::Failed.new(error) - end - end - - # @!visibility private - class CompletableEventPromise < AbstractPromise - def initialize(default_executor) - super CompletableEvent.new(self, default_executor) - end - end - - # @!visibility private - class CompletableFuturePromise < AbstractPromise - def initialize(default_executor) - super CompletableFuture.new(self, default_executor) - end - - # Set the `Future` to a value and wake or notify all threads waiting on it. - # - # @param [Object] value the value to store in the `Future` - # @raise [Concurrent::MultipleAssignmentError] if the `Future` has already been set or otherwise completed - # @return [Future] - def success(value) - complete_with Future::Success.new(value) - end - - def try_success(value) - !!complete_with(Future::Success.new(value), false) - end - - # Set the `Future` to failed due to some error and wake or notify all threads waiting on it. - # - # @param [Object] reason for the failure - # @raise [Concurrent::MultipleAssignmentError] if the `Future` has already been set or otherwise completed - # @return [Future] - def fail(reason = StandardError.new) - complete_with Future::Failed.new(reason) - end - - def try_fail(reason = StandardError.new) - !!complete_with(Future::Failed.new(reason), false) - end - - public :evaluate_to - - # @return [Future] - def evaluate_to!(*args, block) - evaluate_to(*args, block).wait! - end - end - - # @abstract - # @!visibility private - class InnerPromise < AbstractPromise - end - - # @abstract - # @!visibility private - class BlockedPromise < InnerPromise - def self.new(*args, &block) - promise = super(*args, &block) - promise.blocked_by.each { |f| f.add_callback :callback_notify_blocked, promise } - promise - end - - def initialize(future, blocked_by_futures, countdown) - super(future) - initialize_blocked_by(blocked_by_futures) - @Countdown = AtomicFixnum.new countdown - end - - # @api private - def on_done(future) - countdown = process_on_done(future) - completable = completable?(countdown, future) - - if completable - on_completable(future) - # futures could be deleted from blocked_by one by one here, but that would be too expensive, - # it's done once when all are done to free the reference - clear_blocked_by! - end - end - - def touch - blocked_by.each(&:touch) - end - - # !visibility private - # for inspection only - def blocked_by - @BlockedBy - end - - def inspect - "#{to_s[0..-2]} blocked_by:[#{ blocked_by.map(&:to_s).join(', ')}]>" - end - - private - - def initialize_blocked_by(blocked_by_futures) - @BlockedBy = [blocked_by_futures].flatten - end - - def clear_blocked_by! - # not synchronized because we do not care when this change propagates - @BlockedBy = [] - nil - end - - # @return [true,false] if completable - def completable?(countdown, future) - countdown.zero? - end - - def process_on_done(future) - @Countdown.decrement - end - - def on_completable(done_future) - raise NotImplementedError - end - end - - # @abstract - # @!visibility private - class BlockedTaskPromise < BlockedPromise - def initialize(blocked_by_future, default_executor, executor, &task) - raise ArgumentError, 'no block given' unless block_given? - super Future.new(self, default_executor), blocked_by_future, 1 - @Executor = executor - @Task = task - end - - def executor - @Executor - end - end - - # @!visibility private - class ThenPromise < BlockedTaskPromise - private - - def initialize(blocked_by_future, default_executor, executor, &task) - raise ArgumentError, 'only Future can be appended with then' unless blocked_by_future.is_a? Future - super blocked_by_future, default_executor, executor, &task - end - - def on_completable(done_future) - if done_future.success? - Concurrent.post_on(@Executor, done_future, @Task) do |future, task| - evaluate_to lambda { future.apply task } - end - else - complete_with done_future.internal_state - end - end - end - - # @!visibility private - class RescuePromise < BlockedTaskPromise - private - - def initialize(blocked_by_future, default_executor, executor, &task) - super blocked_by_future, default_executor, executor, &task - end - - def on_completable(done_future) - if done_future.failed? - Concurrent.post_on(@Executor, done_future, @Task) do |future, task| - evaluate_to lambda { future.apply task } - end - else - complete_with done_future.internal_state - end - end - end - - # @!visibility private - class ChainPromise < BlockedTaskPromise - private - - def on_completable(done_future) - if Future === done_future - Concurrent.post_on(@Executor, done_future, @Task) { |future, task| evaluate_to(*future.result, task) } - else - Concurrent.post_on(@Executor, @Task) { |task| evaluate_to task } - end - end - end - - # will be immediately completed - # @!visibility private - class ImmediateEventPromise < InnerPromise - def initialize(default_executor) - super Event.new(self, default_executor).complete_with(Event::COMPLETED) - end - end - - # @!visibility private - class ImmediateFuturePromise < InnerPromise - def initialize(default_executor, success, value, reason) - super Future.new(self, default_executor). - complete_with(success ? Future::Success.new(value) : Future::Failed.new(reason)) - end - end - - # @!visibility private - class FlatPromise < BlockedPromise - - # !visibility private - def blocked_by - @BlockedBy.each.to_a - end - - private - - def process_on_done(future) - countdown = super(future) - if countdown.nonzero? - internal_state = future.internal_state - - unless internal_state.success? - complete_with internal_state - return countdown - end - - value = internal_state.value - case value - when Future - value.touch if self.future.touched - @BlockedBy.push value - value.add_callback :callback_notify_blocked, self - @Countdown.value - when Event - evaluate_to(lambda { raise TypeError, 'cannot flatten to Event' }) - else - evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) - end - end - countdown - end - - def initialize(blocked_by_future, levels, default_executor) - raise ArgumentError, 'levels has to be higher than 0' if levels < 1 - super Future.new(self, default_executor), blocked_by_future, 1 + levels - end - - def initialize_blocked_by(blocked_by_future) - @BlockedBy = LockFreeStack.new.push(blocked_by_future) - end - - def on_completable(done_future) - complete_with done_future.internal_state - end - - def clear_blocked_by! - @BlockedBy.clear - nil - end - - def completable?(countdown, future) - !@Future.internal_state.completed? && super(countdown, future) - end - end - - # @!visibility private - class ZipEventEventPromise < BlockedPromise - def initialize(event1, event2, default_executor) - super Event.new(self, default_executor), [event1, event2], 2 - end - - def on_completable(done_future) - complete_with Event::COMPLETED - end - end - - # @!visibility private - class ZipFutureEventPromise < BlockedPromise - def initialize(future, event, default_executor) - super Future.new(self, default_executor), [future, event], 2 - @FutureResult = future - end - - def on_completable(done_future) - complete_with @FutureResult.internal_state - end - end - - # @!visibility private - class ZipFutureFuturePromise < BlockedPromise - def initialize(future1, future2, default_executor) - super Future.new(self, default_executor), [future1, future2], 2 - @Future1Result = future1 - @Future2Result = future2 - end - - def on_completable(done_future) - success1, value1, reason1 = @Future1Result.result - success2, value2, reason2 = @Future2Result.result - success = success1 && success2 - new_state = if success - Future::SuccessArray.new([value1, value2]) - else - Future::PartiallyFailed.new([value1, value2], [reason1, reason2]) - end - complete_with new_state - end - end - - # @!visibility private - class EventWrapperPromise < BlockedPromise - def initialize(event, default_executor) - super Event.new(self, default_executor), event, 1 - end - - def on_completable(done_future) - complete_with Event::COMPLETED - end - end - - # @!visibility private - class FutureWrapperPromise < BlockedPromise - def initialize(future, default_executor) - super Future.new(self, default_executor), future, 1 - end - - def on_completable(done_future) - complete_with done_future.internal_state - end - end - - # @!visibility private - class ZipFuturesPromise < BlockedPromise - - private - - def initialize(blocked_by_futures, default_executor) - super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - - on_completable nil if blocked_by_futures.empty? - end - - def on_completable(done_future) - all_success = true - values = Array.new(blocked_by.size) - reasons = Array.new(blocked_by.size) - - blocked_by.each_with_index do |future, i| - if future.is_a?(Future) - success, values[i], reasons[i] = future.result - all_success &&= success - else - values[i] = reasons[i] = nil - end - end - - if all_success - complete_with Future::SuccessArray.new(values) - else - complete_with Future::PartiallyFailed.new(values, reasons) - end - end - end - - # @!visibility private - class ZipEventsPromise < BlockedPromise - - private - - def initialize(blocked_by_futures, default_executor) - super(Event.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - - on_completable nil if blocked_by_futures.empty? - end - - def on_completable(done_future) - complete_with Event::COMPLETED - end - end - - # @!visibility private - class AnyCompletePromise < BlockedPromise - - private - - def initialize(blocked_by_futures, default_executor) - blocked_by_futures.all? { |f| f.is_a? Future } or - raise ArgumentError, 'accepts only Futures not Events' - super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - end - - def completable?(countdown, future) - true - end - - def on_completable(done_future) - complete_with done_future.internal_state, false - end - end - - # @!visibility private - class AnySuccessfulPromise < BlockedPromise - - private - - def initialize(blocked_by_futures, default_executor) - blocked_by_futures.all? { |f| f.is_a? Future } or - raise ArgumentError, 'accepts only Futures not Events' - super(Future.new(self, default_executor), blocked_by_futures, blocked_by_futures.size) - end - - def completable?(countdown, future) - future.success? || super(countdown, future) - end - - def on_completable(done_future) - complete_with done_future.internal_state, false - end - end - - # @!visibility private - class DelayPromise < InnerPromise - def touch - @Future.complete_with Event::COMPLETED - end - - private - - def initialize(default_executor) - super Event.new(self, default_executor) - end - end - - # will be evaluated to task in intended_time - # @!visibility private - class ScheduledPromise < InnerPromise - def intended_time - @IntendedTime - end - - def inspect - "#{to_s[0..-2]} intended_time:[#{@IntendedTime}}>" - end - - private - - def initialize(default_executor, intended_time) - super Event.new(self, default_executor) - - @IntendedTime = intended_time - - in_seconds = begin - now = Time.now - schedule_time = if @IntendedTime.is_a? Time - @IntendedTime - else - now + @IntendedTime - end - [0, schedule_time.to_f - now.to_f].max - end - - Concurrent.global_timer_set.post(in_seconds) do - @Future.complete_with Event::COMPLETED - end - end - end - end -end - -Concurrent::Edge.send :extend, Concurrent::Edge::FutureShortcuts -Concurrent::Edge.send :include, Concurrent::Edge::FutureShortcuts - -Concurrent.send :extend, Concurrent::Edge::FutureShortcuts -Concurrent.send :include, Concurrent::Edge::FutureShortcuts diff --git a/lib/concurrent/edge/lock_free_linked_set.rb b/lib/concurrent/edge/lock_free_linked_set.rb index 62964037d..42c3a9f8f 100644 --- a/lib/concurrent/edge/lock_free_linked_set.rb +++ b/lib/concurrent/edge/lock_free_linked_set.rb @@ -124,7 +124,7 @@ def remove(item) # # An iterator to loop through the set. # - # @yield [Object] each item in the set + # @yield [item] each item in the set # @yieldparam [Object] item the item you to remove from the set # # @return [Object] self: the linked set on which each was called diff --git a/lib/concurrent/edge/lock_free_queue.rb b/lib/concurrent/edge/lock_free_queue.rb new file mode 100644 index 000000000..8ef216e54 --- /dev/null +++ b/lib/concurrent/edge/lock_free_queue.rb @@ -0,0 +1,116 @@ +module Concurrent + + class LockFreeQueue < Synchronization::Object + + class Node < Synchronization::Object + attr_atomic :successor + + def initialize(item, successor) + super() + # published through queue, no need to be volatile or final + @Item = item + self.successor = successor + end + + def item + @Item + end + end + + safe_initialization! + + attr_atomic :head, :tail + + def initialize + super() + dummy_node = Node.new(:dummy, nil) + + self.head = dummy_node + self.tail = dummy_node + end + + def push(item) + # allocate a new node with the item embedded + new_node = Node.new(item, nil) + + # keep trying until the operation succeeds + while true + current_tail_node = tail + current_tail_successor = current_tail_node.successor + + # if our stored tail is still the current tail + if current_tail_node == tail + # if that tail was really the last node + if current_tail_successor.nil? + # if we can update the previous successor of tail to point to this new node + if current_tail_node.compare_and_set_successor(nil, new_node) + # then update tail to point to this node as well + compare_and_set_tail(current_tail_node, new_node) + # and return + return true + # else, start the loop over + end + else + # in this case, the tail ref we had wasn't the real tail + # so we try to set its successor as the real tail, then start the loop again + compare_and_set_tail(current_tail_node, current_tail_successor) + end + end + end + end + + def pop + # retry until some value can be returned + while true + # the value in @head is just a dummy node that always sits in that position, + # the real 'head' is in its successor + current_dummy_node = head + current_tail_node = tail + + current_head_node = current_dummy_node.successor + + # if our local head is still consistent with the head node, continue + # otherwise, start over + if current_dummy_node == head + # if either the queue is empty, or falling behind + if current_dummy_node == current_tail_node + # if there's nothing after the 'dummy' head node + if current_head_node.nil? + # just return nil + return nil + else + # here the head element succeeding head is not nil, but the head and tail are equal + # so tail is falling behind, update it, then start over + compare_and_set_tail(current_tail_node, current_head_node) + end + + # the queue isn't empty + # if we can set the dummy head to the 'real' head, we're free to return the value in that real head, success + elsif compare_and_set_head(current_dummy_node, current_head_node) + # grab the item from the popped node + item = current_head_node.item + + # return it, success! + return item + end + end + end + end + + # approximate + def size + successor = head.successor + count = 0 + + while true + break if successor.nil? + + current_node = successor + successor = current_node.successor + count += 1 + end + + count + end + end +end diff --git a/lib/concurrent/edge/lock_free_stack.rb b/lib/concurrent/edge/lock_free_stack.rb index 1749a0d16..211580a99 100644 --- a/lib/concurrent/edge/lock_free_stack.rb +++ b/lib/concurrent/edge/lock_free_stack.rb @@ -1,100 +1,122 @@ module Concurrent - module Edge - class LockFreeStack < Synchronization::Object + class LockFreeStack < Synchronization::Object - safe_initialization! + safe_initialization! - class Node - attr_reader :value, :next_node + class Node + # TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class? - def initialize(value, next_node) - @value = value - @next_node = next_node - end + attr_reader :value, :next_node + # allow to nil-ify to free GC when the entry is no longer relevant, not synchronised + attr_writer :value - singleton_class.send :alias_method, :[], :new + def initialize(value, next_node) + @value = value + @next_node = next_node end - class Empty < Node - def next_node - self - end + singleton_class.send :alias_method, :[], :new + end + + class Empty < Node + def next_node + self end + end - EMPTY = Empty[nil, nil] + EMPTY = Empty[nil, nil] - private(*attr_atomic(:head)) + private(*attr_atomic(:head)) - def initialize - super() - self.head = EMPTY - end + def self.of1(value) + new Node[value, EMPTY] + end - def empty? - head.equal? EMPTY - end + def self.of2(value1, value2) + new Node[value1, Node[value2, EMPTY]] + end - def compare_and_push(head, value) - compare_and_set_head head, Node[value, head] - end + def initialize(head = EMPTY) + super() + self.head = head + end - def push(value) - while true - current_head = head - return self if compare_and_set_head current_head, Node[value, current_head] - end - end + def empty?(head = self.head) + head.equal? EMPTY + end - def peek - head - end + def compare_and_push(head, value) + compare_and_set_head head, Node[value, head] + end - def compare_and_pop(head) - compare_and_set_head head, head.next_node + def push(value) + while true + current_head = head + return self if compare_and_set_head current_head, Node[value, current_head] end + end - def pop - while true - current_head = head - return current_head.value if compare_and_set_head current_head, current_head.next_node - end - end + def peek + head + end + + def compare_and_pop(head) + compare_and_set_head head, head.next_node + end - def compare_and_clear(head) - compare_and_set_head head, EMPTY + def pop + while true + current_head = head + return current_head.value if compare_and_set_head current_head, current_head.next_node end + end - include Enumerable + def compare_and_clear(head) + compare_and_set_head head, EMPTY + end - def each(head = nil) - return to_enum(:each, head) unless block_given? - it = head || peek - until it.equal?(EMPTY) - yield it.value - it = it.next_node - end - self + include Enumerable + + def each(head = nil) + return to_enum(:each, head) unless block_given? + it = head || peek + until it.equal?(EMPTY) + yield it.value + it = it.next_node end + self + end - def clear - while true - current_head = head - return false if current_head == EMPTY - return true if compare_and_set_head current_head, EMPTY - end + def clear + while true + current_head = head + return false if current_head == EMPTY + return true if compare_and_set_head current_head, EMPTY end + end - def clear_each(&block) - while true - current_head = head - return self if current_head == EMPTY - if compare_and_set_head current_head, EMPTY - each current_head, &block - return self - end + def clear_if(head) + compare_and_set_head head, EMPTY + end + + def replace_if(head, new_head) + compare_and_set_head head, new_head + end + + def clear_each(&block) + while true + current_head = head + return self if current_head == EMPTY + if compare_and_set_head current_head, EMPTY + each current_head, &block + return self end end + end + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x %s>', self.class, object_id << 1, to_a.to_s end end end diff --git a/lib/concurrent/edge/old_channel_integration.rb b/lib/concurrent/edge/old_channel_integration.rb new file mode 100644 index 000000000..fbdbba728 --- /dev/null +++ b/lib/concurrent/edge/old_channel_integration.rb @@ -0,0 +1,54 @@ +module Concurrent + module Promises + module FactoryMethods + + # @!visibility private + + module OldChannelIntegration + + # @!visibility private + + # only proof of concept + # @return [Future] + def select(*channels) + # TODO (pitr-ch 26-Mar-2016): re-do, has to be non-blocking + future do + # noinspection RubyArgCount + Channel.select do |s| + channels.each do |ch| + s.take(ch) { |value| [value, ch] } + end + end + end + end + end + + include OldChannelIntegration + end + + class Future < AbstractEventFuture + + # @!visibility private + + module OldChannelIntegration + + # @!visibility private + + # Zips with selected value form the suplied channels + # @return [Future] + def then_select(*channels) + future = Concurrent::Promises.select(*channels) + ZipFuturesPromise.new_blocked_by2(self, future, @DefaultExecutor).future + end + + # @note may block + # @note only proof of concept + def then_put(channel) + on_fulfillment_using(:io, channel) { |value, channel| channel.put value } + end + end + + include OldChannelIntegration + end + end +end diff --git a/lib/concurrent/edge/promises.rb b/lib/concurrent/edge/promises.rb new file mode 100644 index 000000000..36b5ba3a2 --- /dev/null +++ b/lib/concurrent/edge/promises.rb @@ -0,0 +1,2079 @@ +require 'concurrent/synchronization' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/edge/lock_free_stack' +require 'concurrent/errors' + +module Concurrent + + + # {include:file:doc/promises-main.md} + module Promises + + # TODO (pitr-ch 23-Dec-2016): move out + # @!visibility private + module ReInclude + def included(base) + included_into << [:include, base] + super(base) + end + + def extended(base) + included_into << [:extend, base] + super(base) + end + + def include(*modules) + super(*modules) + modules.reverse.each do |module_being_included| + included_into.each do |method, mod| + mod.send method, module_being_included + end + end + end + + private + + def included_into + @included_into ||= [] + end + end + + # @!macro [new] promises.param.default_executor + # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the + # global executor. Default executor propagates to chained futures unless overridden with + # executor parameter or changed with {AbstractEventFuture#with_default_executor}. + # + # @!macro [new] promises.param.executor + # @param [Executor, :io, :fast] executor Instance of an executor or a name of the + # global executor. The task is executed on it, default executor remains unchanged. + # + # @!macro [new] promises.param.args + # @param [Object] args arguments which are passed to the task when it's executed. + # (It might be prepended with other arguments, see the @yeild section). + # + # @!macro [new] promises.shortcut.on + # Shortcut of {#$0_on} with default `:io` executor supplied. + # @see #$0_on + # + # @!macro [new] promises.shortcut.using + # Shortcut of {#$0_using} with default `:io` executor supplied. + # @see #$0_using + # + # @!macro [new] promise.param.task-future + # @yieldreturn will become result of the returned Future. + # Its returned value becomes {Future#value} fulfilling it, + # raised exception becomes {Future#reason} rejecting it. + # + # @!macro [new] promise.param.callback + # @yieldreturn is forgotten. + + # Container of all {Future}, {Event} factory methods. They are never constructed directly with + # new. + module FactoryMethods + extend ReInclude + + # @!macro promises.shortcut.on + # @return [ResolvableEvent] + def resolvable_event + resolvable_event_on :io + end + + # Created resolvable event, user is responsible for resolving the event once by + # {Promises::ResolvableEvent#resolve}. + # + # @!macro promises.param.default_executor + # @return [ResolvableEvent] + def resolvable_event_on(default_executor = :io) + ResolvableEventPromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [ResolvableFuture] + def resolvable_future + resolvable_future_on :io + end + + # Creates resolvable future, user is responsible for resolving the future once by + # {Promises::ResolvableFuture#resolve}, {Promises::ResolvableFuture#fulfill}, + # or {Promises::ResolvableFuture#reject} + # + # @!macro promises.param.default_executor + # @return [ResolvableFuture] + def resolvable_future_on(default_executor = :io) + ResolvableFuturePromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def future(*args, &task) + future_on(:io, *args, &task) + end + + # @!macro [new] promises.future-on1 + # Constructs new Future which will be resolved after block is evaluated on default executor. + # Evaluation begins immediately. + # + # @!macro [new] promises.future-on2 + # @!macro promises.param.default_executor + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + def future_on(default_executor, *args, &task) + ImmediateEventPromise.new(default_executor).future.then(*args, &task) + end + + # Creates resolved future with will be either fulfilled with the given value or rejection with + # the given reason. + # + # @!macro promises.param.default_executor + # @return [Future] + def resolved_future(fulfilled, value, reason, default_executor = :io) + ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future + end + + # Creates resolved future with will be fulfilled with the given value. + # + # @!macro promises.param.default_executor + # @return [Future] + def fulfilled_future(value, default_executor = :io) + resolved_future true, value, nil, default_executor + end + + # Creates resolved future with will be rejected with the given reason. + # + # @!macro promises.param.default_executor + # @return [Future] + def rejected_future(reason, default_executor = :io) + resolved_future false, nil, reason, default_executor + end + + # Creates resolved event. + # + # @!macro promises.param.default_executor + # @return [Event] + def resolved_event(default_executor = :io) + ImmediateEventPromise.new(default_executor).event + end + + # General constructor. Behaves differently based on the argument's type. It's provided for convenience + # but it's better to be explicit. + # + # @see rejected_future, resolved_event, fulfilled_future + # @!macro promises.param.default_executor + # @return [Event, Future] + # + # @overload create(nil, default_executor = :io) + # @param [nil] nil + # @return [Event] resolved event. + # + # @overload create(a_future, default_executor = :io) + # @param [Future] a_future + # @return [Future] a future which will be resolved when a_future is. + # + # @overload create(an_event, default_executor = :io) + # @param [Event] an_event + # @return [Event] an event which will be resolved when an_event is. + # + # @overload create(exception, default_executor = :io) + # @param [Exception] exception + # @return [Future] a rejected future with the exception as its reason. + # + # @overload create(value, default_executor = :io) + # @param [Object] value when none of the above overloads fits + # @return [Future] a fulfilled future with the value. + def create(argument = nil, default_executor = :io) + case argument + when AbstractEventFuture + # returning wrapper would change nothing + argument + when Exception + rejected_future argument, default_executor + when nil + resolved_event default_executor + else + fulfilled_future argument, default_executor + end + end + + # @!macro promises.shortcut.on + # @return [Future] + def delay(*args, &task) + delay_on :io, *args, &task + end + + # @!macro promises.future-on1 + # The task will be evaluated only after the future is touched, see {AbstractEventFuture#touch} + # + # @!macro promises.future-on2 + def delay_on(default_executor, *args, &task) + DelayPromise.new(default_executor).event.chain(*args, &task) + end + + # @!macro promises.shortcut.on + # @return [Future] + def schedule(intended_time, *args, &task) + schedule_on :io, intended_time, *args, &task + end + + # @!macro promises.future-on1 + # The task is planned for execution in intended_time. + # + # @!macro promises.future-on2 + # @!macro [new] promises.param.intended_time + # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. + # `Time` means to run on `intended_time`. + def schedule_on(default_executor, intended_time, *args, &task) + ScheduledPromise.new(default_executor, intended_time).event.chain(*args, &task) + end + + # @!macro promises.shortcut.on + # @return [Future] + def zip_futures(*futures_and_or_events) + zip_futures_on :io, *futures_and_or_events + end + + # Creates new future which is resolved after all futures_and_or_events are resolved. + # Its value is array of zipped future values. Its reason is array of reasons for rejection. + # If there is an error it rejects. + # @!macro [new] promises.event-conversion + # If event is supplied, which does not have value and can be only resolved, it's + # represented as `:fulfilled` with value `nil`. + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def zip_futures_on(default_executor, *futures_and_or_events) + ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + alias_method :zip, :zip_futures + + # @!macro promises.shortcut.on + # @return [Event] + def zip_events(*futures_and_or_events) + zip_events_on :io, *futures_and_or_events + end + + # Creates new event which is resolved after all futures_and_or_events are resolved. + # (Future is resolved when fulfilled or rejected.) + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def zip_events_on(default_executor, *futures_and_or_events) + ZipEventsPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_resolved_future(*futures_and_or_events) + any_resolved_future_on :io, *futures_and_or_events + end + + alias_method :any, :any_resolved_future + + # Creates new future which is resolved after first futures_and_or_events is resolved. + # Its result equals result of the first resolved future. + # @!macro [new] promises.any-touch + # If resolved it does not propagate {AbstractEventFuture#touch}, leaving delayed + # futures un-executed if they are not required any more. + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_resolved_future_on(default_executor, *futures_and_or_events) + AnyResolvedFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_fulfilled_future(*futures_and_or_events) + any_fulfilled_future_on :io, *futures_and_or_events + end + + # Creates new future which is resolved after first of futures_and_or_events is fulfilled. + # Its result equals result of the first resolved future or if all futures_and_or_events reject, + # it has reason of the last resolved future. + # @!macro promises.any-touch + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_fulfilled_future_on(default_executor, *futures_and_or_events) + AnyFulfilledFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_event(*futures_and_or_events) + any_event_on :io, *futures_and_or_events + end + + # Creates new event which becomes resolved after first of the futures_and_or_events resolves. + # @!macro promises.any-touch + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def any_event_on(default_executor, *futures_and_or_events) + AnyResolvedEventPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # TODO consider adding first(count, *futures) + # TODO consider adding zip_by(slice, *futures) processing futures in slices + end + + module InternalStates + # @private + class State + def resolved? + raise NotImplementedError + end + + def to_sym + raise NotImplementedError + end + end + + private_constant :State + + # @private + class Pending < State + def resolved? + false + end + + def to_sym + :pending + end + end + + private_constant :Pending + + # @private + class ResolvedWithResult < State + def resolved? + true + end + + def to_sym + :resolved + end + + def result + [fulfilled?, value, reason] + end + + def fulfilled? + raise NotImplementedError + end + + def value + raise NotImplementedError + end + + def reason + raise NotImplementedError + end + + def apply + raise NotImplementedError + end + end + + private_constant :ResolvedWithResult + + # @private + class Fulfilled < ResolvedWithResult + + def initialize(value) + @Value = value + end + + def fulfilled? + true + end + + def apply(args, block) + block.call value, *args + end + + def value + @Value + end + + def reason + nil + end + + def to_sym + :fulfilled + end + end + + private_constant :Fulfilled + + # @private + class FulfilledArray < Fulfilled + def apply(args, block) + block.call(*value, *args) + end + end + + private_constant :FulfilledArray + + # @private + class Rejected < ResolvedWithResult + def initialize(reason) + @Reason = reason + end + + def fulfilled? + false + end + + def value + nil + end + + def reason + @Reason + end + + def to_sym + :rejected + end + + def apply(args, block) + block.call reason, *args + end + end + + private_constant :Rejected + + # @private + class PartiallyRejected < ResolvedWithResult + def initialize(value, reason) + super() + @Value = value + @Reason = reason + end + + def fulfilled? + false + end + + def to_sym + :rejected + end + + def value + @Value + end + + def reason + @Reason + end + + def apply(args, block) + block.call(*reason, *args) + end + end + + private_constant :PartiallyRejected + + PENDING = Pending.new + RESOLVED = Fulfilled.new(nil) + + def RESOLVED.to_sym + :resolved + end + + private_constant :PENDING, :RESOLVED + end + + private_constant :InternalStates + + # Common ancestor of {Event} and {Future} classes, many shared methods are defined here. + class AbstractEventFuture < Synchronization::Object + safe_initialization! + private(*attr_atomic(:internal_state) - [:internal_state]) + + include InternalStates + + def initialize(promise, default_executor) + super() + @Lock = Mutex.new + @Condition = ConditionVariable.new + @Promise = promise + @DefaultExecutor = default_executor + @Callbacks = LockFreeStack.new + # noinspection RubyArgCount + @Waiters = AtomicFixnum.new 0 + self.internal_state = PENDING + end + + private :initialize + + # @!macro [new] promises.shortcut.event-future + # @see Event#$0 + # @see Future#$0 + + # @!macro [new] promises.param.timeout + # @param [Numeric] timeout the maximum time in second to wait. + + # @!macro [new] promises.warn.blocks + # @note This function potentially blocks current thread until the Future is resolved. + # Be careful it can deadlock. Try to chain instead. + + # Returns its state. + # @return [Symbol] + # + # @overload an_event.state + # @return [:pending, :resolved] + # @overload a_future.state + # Both :fulfilled, :rejected implies :resolved. + # @return [:pending, :fulfilled, :rejected] + def state + internal_state.to_sym + end + + # Is it in pending state? + # @return [Boolean] + def pending?(state = internal_state) + !state.resolved? + end + + # Is it in resolved state? + # @return [Boolean] + def resolved?(state = internal_state) + state.resolved? + end + + # Propagates touch. Requests all the delayed futures, which it depends on, to be + # executed. This method is called by any other method requiring resolved state, like {#wait}. + # @return [self] + def touch + @Promise.touch + self + end + + # @!macro [new] promises.touches + # Calls {AbstractEventFuture#touch}. + + # @!macro [new] promises.method.wait + # Wait (block the Thread) until receiver is {#resolved?}. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Future, true, false] self implies timeout was not used, true implies timeout was used + # and it was resolved, false implies it was not resolved within timeout. + def wait(timeout = nil) + result = wait_until_resolved(timeout) + timeout ? result : self + end + + # Returns default executor. + # @return [Executor] default executor + # @see #with_default_executor + # @see FactoryMethods#future_on + # @see FactoryMethods#resolvable_future + # @see FactoryMethods#any_fulfilled_future_on + # @see similar + def default_executor + @DefaultExecutor + end + + # @!macro promises.shortcut.on + # @return [Future] + def chain(*args, &task) + chain_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @return [Future] + # @!macro promise.param.task-future + # + # @overload an_event.chain_on(executor, *args, &task) + # @yield [*args] to the task. + # @overload a_future.chain_on(executor, *args, &task) + # @yield [fulfilled, value, reason, *args] to the task. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason + def chain_on(executor, *args, &task) + ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x %s>', self.class, object_id << 1, state + end + + alias_method :inspect, :to_s + + # Resolves the resolvable when receiver is resolved. + # + # @param [Resolvable] resolvable + # @return [self] + def chain_resolvable(resolvable) + on_resolution! { resolvable.resolve_with internal_state } + end + + alias_method :tangle, :chain_resolvable + + # @!macro promises.shortcut.using + # @return [self] + def on_resolution(*args, &callback) + on_resolution_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # resolved. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution!(*args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution!(*args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason + def on_resolution!(*args, &callback) + add_callback :callback_on_resolution, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution_using(executor, *args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution_using(executor, *args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason + def on_resolution_using(executor, *args, &callback) + add_callback :async_callback_on_resolution, executor, args, callback + end + + # @!macro [new] promises.method.with_default_executor + # Crates new object with same class with the executor set as its new default executor. + # Any futures depending on it will use the new default executor. + # @!macro promises.shortcut.event-future + # @abstract + # @return [AbstractEventFuture] + def with_default_executor(executor) + raise NotImplementedError + end + + # @!visibility private + def resolve_with(state, raise_on_reassign = true) + if compare_and_set_internal_state(PENDING, state) + # go to synchronized block only if there were waiting threads + @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 + call_callbacks state + else + return rejected_resolution(raise_on_reassign, state) + end + self + end + + # For inspection. + # @!visibility private + # @return [Array] + def blocks + @Callbacks.each_with_object([]) do |(method, args), promises| + promises.push(args[0]) if method == :callback_notify_blocked + end + end + + # For inspection. + # @!visibility private + def callbacks + @Callbacks.each.to_a + end + + # For inspection. + # @!visibility private + def promise + @Promise + end + + # For inspection. + # @!visibility private + def touched? + promise.touched? + end + + # For inspection. + # @!visibility private + def waiting_threads + @Waiters.each.to_a + end + + # @!visibility private + def add_callback(method, *args) + state = internal_state + if resolved?(state) + call_callback method, state, args + else + @Callbacks.push [method, args] + state = internal_state + # take back if it was resolved in the meanwhile + call_callbacks state if resolved?(state) + end + self + end + + private + + # @return [Boolean] + def wait_until_resolved(timeout) + return true if resolved? + + touch + + @Lock.synchronize do + @Waiters.increment + begin + unless resolved? + @Condition.wait @Lock, timeout + end + ensure + # JRuby may raise ConcurrencyError + @Waiters.decrement + end + end + resolved? + end + + def call_callback(method, state, args) + self.send method, state, *args + end + + def call_callbacks(state) + method, args = @Callbacks.pop + while method + call_callback method, state, args + method, args = @Callbacks.pop + end + end + + def with_async(executor, *args, &block) + Concurrent.executor(executor).post(*args, &block) + end + + def async_callback_on_resolution(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_resolution st, ar, cb + end + end + + def callback_notify_blocked(state, promise, index) + promise.on_blocker_resolution self, index + end + end + + # Represents an event which will happen in future (will be resolved). The event is either + # pending or resolved. It should be always resolved. Use {Future} to communicate rejections and + # cancellation. + class Event < AbstractEventFuture + + alias_method :then, :chain + + + # @!macro [new] promises.method.zip + # Creates a new event or a future which will be resolved when receiver and other are. + # Returns an event if receiver and other are events, otherwise returns a future. + # If just one of the parties is Future then the result + # of the returned future is equal to the result of the supplied future. If both are futures + # then the result is as described in {FactoryMethods#zip_futures_on}. + # + # @return [Future, Event] + def zip(other) + if other.is_a?(Future) + ZipFutureEventPromise.new_blocked_by2(other, self, @DefaultExecutor).future + else + ZipEventEventPromise.new_blocked_by2(self, other, @DefaultExecutor).event + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. + # + # @return [Event] + def any(event_or_future) + AnyResolvedEventPromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).event + end + + alias_method :|, :any + + # Creates new event dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Event] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end + + # @!macro [new] promise.method.schedule + # Creates new event dependent on receiver scheduled to execute on/in intended_time. + # In time is interpreted from the moment the receiver is resolved, therefore it inserts + # delay into the chain. + # + # @!macro promises.param.intended_time + # @return [Event] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end.flat_event + end + + # Converts event to a future. The future is fulfilled when the event is resolved, the future may never fail. + # + # @return [Future] + def to_future + future = Promises.resolvable_future + ensure + chain_resolvable(future) + end + + # Returns self, since this is event + # @return [Event] + def to_event + self + end + + # @!macro promises.method.with_default_executor + # @return [Event] + def with_default_executor(executor) + EventWrapperPromise.new_blocked_by1(self, executor).event + end + + private + + def rejected_resolution(raise_on_reassign, state) + Concurrent::MultipleAssignmentError.new('Event can be resolved only once') if raise_on_reassign + return false + end + + def callback_on_resolution(state, args, callback) + callback.call *args + end + end + + # Represents a value which will become available in future. May reject with a reason instead, + # e.g. when the tasks raises an exception. + class Future < AbstractEventFuture + + # Is it in fulfilled state? + # @return [Boolean] + def fulfilled?(state = internal_state) + state.resolved? && state.fulfilled? + end + + # Is it in rejected state? + # @return [Boolean] + def rejected?(state = internal_state) + state.resolved? && !state.fulfilled? + end + + # @!macro [new] promises.warn.nil + # @note Make sure returned `nil` is not confused with timeout, no value when rejected, + # no reason when fulfilled, etc. + # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. + + # @!macro [new] promises.method.value + # Return value of the future. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @return [Object, nil] the value of the Future when fulfilled, nil on timeout or rejection. + def value(timeout = nil) + internal_state.value if wait_until_resolved timeout + end + + # Returns reason of future's rejection. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @return [Exception, nil] nil on timeout or fulfillment. + def reason(timeout = nil) + internal_state.reason if wait_until_resolved timeout + end + + # Returns triplet fulfilled?, value, reason. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Array(Boolean, Object, Exception), nil] triplet of fulfilled?, value, reason, or nil + # on timeout. + def result(timeout = nil) + internal_state.result if wait_until_resolved timeout + end + + # @!macro promises.method.wait + # @raise [Exception] {#reason} on rejection + def wait!(timeout = nil) + result = wait_until_resolved!(timeout) + timeout ? result : self + end + + # @!macro promises.method.value + # @return [Object, nil] the value of the Future when fulfilled, nil on timeout. + # @raise [Exception] {#reason} on rejection + def value!(timeout = nil) + internal_state.value if wait_until_resolved! timeout + end + + # Allows rejected Future to be risen with `raise` method. + # @example + # raise Promises.rejected_future(StandardError.new("boom")) + # @raise [StandardError] when raising not rejected future + # @return [Exception] + def exception(*args) + raise Concurrent::Error, 'it is not rejected' unless rejected? + reason = Array(internal_state.reason).compact + if reason.size > 1 + Concurrent::MultipleErrors.new reason + else + reason[0].exception(*args) + end + end + + # @!macro promises.shortcut.on + # @return [Future] + def then(*args, &task) + then_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it fulfills. Does not run + # the task if it rejects. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [value, *args] to the task. + def then_on(executor, *args, &task) + ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def rescue(*args, &task) + rescue_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it rejects. Does not run + # the task if it fulfills. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [reason, *args] to the task. + def rescue_on(executor, *args, &task) + RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.method.zip + # @return [Future] + def zip(other) + if other.is_a?(Future) + ZipFuturesPromise.new_blocked_by2(self, other, @DefaultExecutor).future + else + ZipFutureEventPromise.new_blocked_by2(self, other, @DefaultExecutor).future + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. Returning future will have value nil if event_or_future is event and resolves + # first. + # + # @return [Future] + def any(event_or_future) + AnyResolvedFuturePromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).future + end + + alias_method :|, :any + + # Creates new future dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Future] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end + + # @!macro promise.method.schedule + # @return [Future] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end.flat + end + + # @!macro promises.method.with_default_executor + # @return [Future] + def with_default_executor(executor) + FutureWrapperPromise.new_blocked_by1(self, executor).future + end + + # Creates new future which will have result of the future returned by receiver. If receiver + # rejects it will have its rejection. + # + # @param [Integer] level how many levels of futures should flatten + # @return [Future] + def flat_future(level = 1) + FlatFuturePromise.new_blocked_by1(self, level, @DefaultExecutor).future + end + + alias_method :flat, :flat_future + + # Creates new event which will be resolved when the returned event by receiver is. + # Be careful if the receiver rejects it will just resolve since Event does not hold reason. + # + # @return [Event] + def flat_event + FlatEventPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # @!macro promises.shortcut.using + # @return [self] + def on_fulfillment(*args, &callback) + on_fulfillment_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment!(*args, &callback) + add_callback :callback_on_fulfillment, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment_using(executor, *args, &callback) + add_callback :async_callback_on_fulfillment, executor, args, callback + end + + # @!macro promises.shortcut.using + # @return [self] + def on_rejection(*args, &callback) + on_rejection_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection!(*args, &callback) + add_callback :callback_on_rejection, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection_using(executor, *args, &callback) + add_callback :async_callback_on_rejection, executor, args, callback + end + + # Allows to use futures as green threads. The receiver has to evaluate to a future which + # represents what should be done next. It basically flattens indefinitely until non Future + # values is returned which becomes result of the returned future. Any encountered exception + # will become reason of the returned future. + # + # @return [Future] + # @example + # body = lambda do |v| + # v += 1 + # v < 5 ? Promises.future(v, &body) : v + # end + # Promises.future(0, &body).run.value! # => 5 + def run + RunFuturePromise.new_blocked_by1(self, @DefaultExecutor).future + end + + # @!visibility private + def apply(args, block) + internal_state.apply args, block + end + + # Converts future to event which is resolved when future is resolved by fulfillment or rejection. + # + # @return [Event] + def to_event + event = Promises.resolvable_event + ensure + chain_resolvable(event) + end + + # Returns self, since this is a future + # @return [Future] + def to_future + self + end + + private + + def rejected_resolution(raise_on_reassign, state) + if raise_on_reassign + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It's #{result}, trying to set #{state.result}.", + current_result: result, new_result: state.result) + end + return false + end + + def wait_until_resolved!(timeout = nil) + result = wait_until_resolved(timeout) + raise self if rejected? + result + end + + def async_callback_on_fulfillment(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_fulfillment st, ar, cb + end + end + + def async_callback_on_rejection(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_rejection st, ar, cb + end + end + + def callback_on_fulfillment(state, args, callback) + state.apply args, callback if state.fulfilled? + end + + def callback_on_rejection(state, args, callback) + state.apply args, callback unless state.fulfilled? + end + + def callback_on_resolution(state, args, callback) + callback.call state.result, *args + end + + end + + # Marker module of Future, Event resolved manually by user. + module Resolvable + end + + # A Event which can be resolved by user. + class ResolvableEvent < Event + include Resolvable + + + # @!macro [new] raise_on_reassign + # @raise [MultipleAssignmentError] when already resolved and raise_on_reassign is true. + + # @!macro [new] promise.param.raise_on_reassign + # @param [Boolean] raise_on_reassign should method raise exception if already resolved + # @return [self, false] false is returner when raise_on_reassign is false and the receiver + # is already resolved. + # + + # Makes the event resolved, which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def resolve(raise_on_reassign = true) + resolve_with RESOLVED, raise_on_reassign + end + + # Creates new event wrapping receiver, effectively hiding the resolve method. + # + # @return [Event] + def with_hidden_resolvable + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked_by1(self, @DefaultExecutor).event + end + end + + # A Future which can be resolved by user. + class ResolvableFuture < Future + include Resolvable + + # Makes the future resolved with result of triplet `fulfilled?`, `value`, `reason`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true) + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign) + end + + # Makes the future fulfilled with `value`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def fulfill(value, raise_on_reassign = true) + promise.fulfill(value, raise_on_reassign) + end + + # Makes the future rejected with `reason`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def reject(reason, raise_on_reassign = true) + promise.reject(reason, raise_on_reassign) + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + def evaluate_to(*args, &block) + # FIXME (pitr-ch 13-Jun-2016): add raise_on_reassign + promise.evaluate_to(*args, block) + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + # @raise [Exception] also raise reason on rejection. + def evaluate_to!(*args, &block) + promise.evaluate_to!(*args, block) + end + + # Creates new future wrapping receiver, effectively hiding the resolve method and similar. + # + # @return [Future] + def with_hidden_resolvable + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked_by1(self, @DefaultExecutor).future + end + end + + # @abstract + # @private + class AbstractPromise < Synchronization::Object + safe_initialization! + include InternalStates + + def initialize(future) + super() + @Future = future + end + + def future + @Future + end + + alias_method :event, :future + + def default_executor + future.default_executor + end + + def state + future.state + end + + def touch + end + + alias_method :inspect, :to_s + + def delayed + nil + end + + private + + def resolve_with(new_state, raise_on_reassign = true) + @Future.resolve_with(new_state, raise_on_reassign) + end + + # @return [Future] + def evaluate_to(*args, block) + resolve_with Fulfilled.new(block.call(*args)) + rescue Exception => error + # TODO (pitr-ch 30-Jul-2016): figure out what should be rescued, there is an issue about it + resolve_with Rejected.new(error) + end + end + + class ResolvableEventPromise < AbstractPromise + def initialize(default_executor) + super ResolvableEvent.new(self, default_executor) + end + end + + class ResolvableFuturePromise < AbstractPromise + def initialize(default_executor) + super ResolvableFuture.new(self, default_executor) + end + + def fulfill(value, raise_on_reassign) + resolve_with Fulfilled.new(value), raise_on_reassign + end + + def reject(reason, raise_on_reassign) + resolve_with Rejected.new(reason), raise_on_reassign + end + + public :evaluate_to + + def evaluate_to!(*args, block) + evaluate_to(*args, block).wait! + end + end + + # @abstract + class InnerPromise < AbstractPromise + end + + # @abstract + class BlockedPromise < InnerPromise + + private_class_method :new + + def self.new_blocked_by1(blocker, *args, &block) + blocker_delayed = blocker.promise.delayed + delayed = blocker_delayed ? LockFreeStack.new.push(blocker_delayed) : nil + promise = new(delayed, 1, *args, &block) + ensure + blocker.add_callback :callback_notify_blocked, promise, 0 + end + + def self.new_blocked_by2(blocker1, blocker2, *args, &block) + blocker_delayed1 = blocker1.promise.delayed + blocker_delayed2 = blocker2.promise.delayed + # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) + delayed = if blocker_delayed1 + if blocker_delayed2 + LockFreeStack.of2(blocker_delayed1, blocker_delayed2) + else + LockFreeStack.of1(blocker_delayed1) + end + else + blocker_delayed2 ? LockFreeStack.of1(blocker_delayed2) : nil + end + promise = new(delayed, 2, *args, &block) + ensure + blocker1.add_callback :callback_notify_blocked, promise, 0 + blocker2.add_callback :callback_notify_blocked, promise, 1 + end + + def self.new_blocked_by(blockers, *args, &block) + delayed = blockers.reduce(nil, &method(:add_delayed)) + promise = new(delayed, blockers.size, *args, &block) + ensure + blockers.each_with_index { |f, i| f.add_callback :callback_notify_blocked, promise, i } + end + + def self.add_delayed(delayed, blocker) + blocker_delayed = blocker.promise.delayed + if blocker_delayed + delayed = unless delayed + LockFreeStack.of1(blocker_delayed) + else + delayed.push(blocker_delayed) + end + end + delayed + end + + def initialize(delayed, blockers_count, future) + super(future) + # noinspection RubyArgCount + @Touched = AtomicBoolean.new false + @Delayed = delayed + # noinspection RubyArgCount + @Countdown = AtomicFixnum.new blockers_count + end + + def on_blocker_resolution(future, index) + countdown = process_on_blocker_resolution(future, index) + resolvable = resolvable?(countdown, future, index) + + on_resolvable(future, index) if resolvable + end + + def delayed + @Delayed + end + + def touch + clear_propagate_touch if @Touched.make_true + end + + def touched? + @Touched.value + end + + # for inspection only + def blocked_by + blocked_by = [] + ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } + blocked_by + end + + private + + def clear_propagate_touch + @Delayed.clear_each { |o| propagate_touch o } if @Delayed + end + + def propagate_touch(stack_or_element = @Delayed) + if stack_or_element.is_a? LockFreeStack + stack_or_element.each { |element| propagate_touch element } + else + stack_or_element.touch unless stack_or_element.nil? # if still present + end + end + + # @return [true,false] if resolvable + def resolvable?(countdown, future, index) + countdown.zero? + end + + def process_on_blocker_resolution(future, index) + @Countdown.decrement + end + + def on_resolvable(resolved_future, index) + raise NotImplementedError + end + end + + # @abstract + class BlockedTaskPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + raise ArgumentError, 'no block given' unless block_given? + super delayed, 1, Future.new(self, default_executor) + @Executor = executor + @Task = task + @Args = args + end + + def executor + @Executor + end + end + + class ThenPromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.fulfilled? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class RescuePromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.rejected? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class ChainPromise < BlockedTaskPromise + private + + def on_resolvable(resolved_future, index) + if Future === resolved_future + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to(*future.result, *args, task) + end + else + Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| + evaluate_to *args, task + end + end + end + end + + # will be immediately resolved + class ImmediateEventPromise < InnerPromise + def initialize(default_executor) + super Event.new(self, default_executor).resolve_with(RESOLVED) + end + end + + class ImmediateFuturePromise < InnerPromise + def initialize(default_executor, fulfilled, value, reason) + super Future.new(self, default_executor). + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason)) + end + end + + class AbstractFlatPromise < BlockedPromise + + private + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + + def resolvable?(countdown, future, index) + !@Future.internal_state.resolved? && super(countdown, future, index) + end + + def add_delayed_of(future) + if touched? + propagate_touch future.promise.delayed + else + BlockedPromise.add_delayed @Delayed, future + clear_propagate_touch if touched? + end + end + + end + + class FlatEventPromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with RESOLVED + return countdown + end + + value = internal_state.value + case value + when Future, Event + add_delayed_of value + value.add_callback :callback_notify_blocked, self, nil + countdown + else + resolve_with RESOLVED + end + end + countdown + end + + end + + class FlatFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, levels, default_executor) + raise ArgumentError, 'levels has to be higher than 0' if levels < 1 + # flat promise may result to a future having delayed futures, therefore we have to have empty stack + # to be able to add new delayed futures + super delayed || LockFreeStack.new, 1 + levels, Future.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return countdown + end + + value = internal_state.value + case value + when Future + add_delayed_of value + value.add_callback :callback_notify_blocked, self, nil + countdown + when Event + evaluate_to(lambda { raise TypeError, 'cannot flatten to Event' }) + else + evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + end + end + countdown + end + + end + + class RunFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return 0 + end + + value = internal_state.value + case value + when Future + add_delayed_of value + value.add_callback :callback_notify_blocked, self, nil + else + resolve_with internal_state + end + + 1 + end + end + + class ZipEventEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class ZipFutureEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Future.new(self, default_executor) + @result = nil + end + + private + + def process_on_blocker_resolution(future, index) + # first blocking is future, take its result + @result = future.internal_state if index == 0 + # super has to be called after above to piggyback on volatile @Countdown + super future, index + end + + def on_resolvable(resolved_future, index) + resolve_with @result + end + end + + class EventWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class FutureWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + end + + class ZipFuturesPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super(delayed, blockers_count, Future.new(self, default_executor)) + @Resolutions = ::Array.new(blockers_count) + + on_resolvable nil, nil if blockers_count == 0 + end + + def process_on_blocker_resolution(future, index) + # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never re-sized? + @Resolutions[index] = future.internal_state # has to be set before countdown in super + super future, index + end + + def on_resolvable(resolved_future, index) + all_fulfilled = true + values = Array.new(@Resolutions.size) + reasons = Array.new(@Resolutions.size) + + @Resolutions.each_with_index do |internal_state, i| + fulfilled, values[i], reasons[i] = internal_state.result + all_fulfilled &&= fulfilled + end + + if all_fulfilled + resolve_with FulfilledArray.new(values) + else + resolve_with PartiallyRejected.new(values, reasons) + end + end + end + + class ZipEventsPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + + on_resolvable nil, nil if blockers_count == 0 + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + # @abstract + class AbstractAnyPromise < BlockedPromise + end + + class AnyResolvedFuturePromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Future.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state, false + end + end + + class AnyResolvedEventPromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED, false + end + end + + class AnyFulfilledFuturePromise < AnyResolvedFuturePromise + + private + + def resolvable?(countdown, future, index) + future.fulfilled? || + # inlined super from BlockedPromise + countdown.zero? + end + end + + class DelayPromise < InnerPromise + + def initialize(default_executor) + super event = Event.new(self, default_executor) + @Delayed = LockFreeStack.new.push self + # TODO (pitr-ch 20-Dec-2016): implement directly without callback? + event.on_resolution!(@Delayed.peek) { |stack_node| stack_node.value = nil } + end + + def touch + @Future.resolve_with RESOLVED + end + + def delayed + @Delayed + end + + end + + class ScheduledPromise < InnerPromise + def intended_time + @IntendedTime + end + + def inspect + "#{to_s[0..-2]} intended_time: #{@IntendedTime}>" + end + + private + + def initialize(default_executor, intended_time) + super Event.new(self, default_executor) + + @IntendedTime = intended_time + + in_seconds = begin + now = Time.now + schedule_time = if @IntendedTime.is_a? Time + @IntendedTime + else + now + @IntendedTime + end + [0, schedule_time.to_f - now.to_f].max + end + + Concurrent.global_timer_set.post(in_seconds) do + @Future.resolve_with RESOLVED + end + end + end + + extend FactoryMethods + + private_constant :AbstractPromise, + :ResolvableEventPromise, + :ResolvableFuturePromise, + :InnerPromise, + :BlockedPromise, + :BlockedTaskPromise, + :ThenPromise, + :RescuePromise, + :ChainPromise, + :ImmediateEventPromise, + :ImmediateFuturePromise, + :AbstractFlatPromise, + :FlatFuturePromise, + :FlatEventPromise, + :RunFuturePromise, + :ZipEventEventPromise, + :ZipFutureEventPromise, + :EventWrapperPromise, + :FutureWrapperPromise, + :ZipFuturesPromise, + :ZipEventsPromise, + :AbstractAnyPromise, + :AnyResolvedFuturePromise, + :AnyFulfilledFuturePromise, + :AnyResolvedEventPromise, + :DelayPromise, + :ScheduledPromise + + + end +end + +# TODO try stealing pool, each thread has it's own queue +# TODO (pitr-ch 18-Dec-2016): doc macro debug method +# TODO (pitr-ch 18-Dec-2016): add macro noting that debug methods may change api without warning + +module Concurrent + module Promises + + class Future < AbstractEventFuture + + module ActorIntegration + # Asks the actor with its value. + # @return [Future] new future with the response form the actor + def then_ask(actor) + self.then { |v| actor.ask(v) }.flat + end + end + + include ActorIntegration + end + + class Channel < Concurrent::Synchronization::Object + safe_initialization! + + # Default size of the Channel, makes it accept unlimited number of messages. + UNLIMITED = Object.new + UNLIMITED.singleton_class.class_eval do + include Comparable + + def <=>(other) + 1 + end + + def to_s + 'unlimited' + end + end + + # A channel to pass messages between promises. The size is limited to support back pressure. + # @param [Integer, UNLIMITED] size the maximum number of messages stored in the channel. + def initialize(size = UNLIMITED) + super() + @Size = size + # TODO (pitr-ch 26-Dec-2016): replace with lock-free implementation + @Mutex = Mutex.new + @Probes = [] + @Messages = [] + @PendingPush = [] + end + + + # Returns future which will fulfill when the message is added to the channel. Its value is the message. + # @param [Object] message + # @return [Future] + def push(message) + @Mutex.synchronize do + while true + if @Probes.empty? + if @Size > @Messages.size + @Messages.push message + return Promises.fulfilled_future message + else + pushed = Promises.resolvable_future + @PendingPush.push [message, pushed] + return pushed.with_hidden_resolvable + end + else + probe = @Probes.shift + if probe.fulfill [self, message], false + return Promises.fulfilled_future(message) + end + end + end + end + end + + # Returns a future witch will become fulfilled with a value from the channel when one is available. + # @param [ResolvableFuture] probe the future which will be fulfilled with a channel value + # @return [Future] the probe, its value will be the message when available. + def pop(probe = Concurrent::Promises.resolvable_future) + # TODO (pitr-ch 26-Dec-2016): improve performance + pop_for_select(probe).then(&:last) + end + + # @!visibility private + def pop_for_select(probe = Concurrent::Promises.resolvable_future) + @Mutex.synchronize do + if @Messages.empty? + @Probes.push probe + else + message = @Messages.shift + probe.fulfill [self, message] + + unless @PendingPush.empty? + message, pushed = @PendingPush.shift + @Messages.push message + pushed.fulfill message + end + end + end + probe + end + + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x size:%s>', self.class, object_id << 1, @Size + end + + alias_method :inspect, :to_s + end + + class Future < AbstractEventFuture + module NewChannelIntegration + + # @param [Channel] channel to push to. + # @return [Future] a future which is fulfilled after the message is pushed to the channel. + # May take a moment if the channel is full. + def then_push_channel(channel) + self.then { |value| channel.push value }.flat_future + end + + # TODO (pitr-ch 26-Dec-2016): does it make sense to have rescue an chain variants as well, check other integrations as well + end + + include NewChannelIntegration + end + + module FactoryMethods + + module NewChannelIntegration + + # Selects a channel which is ready to be read from. + # @param [Channel] channels + # @return [Future] a future which is fulfilled with pair [channel, message] when one of the channels is + # available for reading + def select_channel(*channels) + probe = Promises.resolvable_future + channels.each { |ch| ch.pop_for_select probe } + probe + end + end + + include NewChannelIntegration + end + + end +end diff --git a/lib/concurrent/edge/throttle.rb b/lib/concurrent/edge/throttle.rb new file mode 100644 index 000000000..f1cd060d7 --- /dev/null +++ b/lib/concurrent/edge/throttle.rb @@ -0,0 +1,185 @@ +module Concurrent + # @!macro [new] throttle.example.throttled_block + # @example + # max_two = Throttle.new 2 + # 10.times.map do + # Thread.new do + # max_two.throttled_block do + # # Only 2 at the same time + # do_stuff + # end + # end + # end + # @!macro [new] throttle.example.throttled_future_chain + # @example + # throttle.throttled_future_chain do |trigger| + # trigger. + # # 2 throttled promises + # chain { 1 }. + # then(&:succ) + # end + # @!macro [new] throttle.example.then_throttled_by + # @example + # data = (1..5).to_a + # db = data.reduce({}) { |h, v| h.update v => v.to_s } + # max_two = Throttle.new 2 + # + # futures = data.map do |data| + # Promises.future(data) do |data| + # # un-throttled, concurrency level equal data.size + # data + 1 + # end.then_throttled_by(max_two, db) do |v, db| + # # throttled, only 2 tasks executed at the same time + # # e.g. limiting access to db + # db[v] + # end + # end + # + # futures.map(&:value!) # => [2, 3, 4, 5, nil] + + # A tool manage concurrency level of future tasks. + # + # @!macro throttle.example.then_throttled_by + # @!macro throttle.example.throttled_future_chain + # @!macro throttle.example.throttled_block + class Throttle < Synchronization::Object + # TODO (pitr-ch 21-Dec-2016): consider using sized channel for implementation instead when available + + safe_initialization! + private *attr_atomic(:can_run) + + # New throttle. + # @param [Integer] limit + def initialize(limit) + super() + @Limit = limit + self.can_run = limit + @Queue = LockFreeQueue.new + end + + # @return [Integer] The limit. + def limit + @Limit + end + + # New event which will be resolved when depending tasks can execute. + # Has to be used and after the critical work is done {#release} must be called exactly once. + # @return [Promises::Event] + # @see #release + def trigger + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run - 1 + if current_can_run > 0 + return Promises.resolved_event + else + event = Promises.resolvable_event + @Queue.push event + return event + end + end + end + end + + # Has to be called once for each trigger after it is ok to execute another throttled task. + # @return [self] + # @see #trigger + def release + while true + current_can_run = can_run + if compare_and_set_can_run current_can_run, current_can_run + 1 + if current_can_run < 0 + Thread.pass until (trigger = @Queue.pop) + trigger.resolve + end + return self + end + end + end + + # Blocks current thread until the block can be executed. + # @yield to throttled block + # @yieldreturn [Object] is used as a result of the method + # @return [Object] the result of the block + # @!macro throttle.example.throttled_block + def throttled_block(&block) + trigger.wait + block.call + ensure + release + end + + # @return [String] Short string representation. + def to_s + format '<#%s:0x%x limit:%s can_run:%d>', self.class, object_id << 1, @Limit, can_run + end + + alias_method :inspect, :to_s + + module PromisesIntegration + + # Allows to throttle a chain of promises. + # @yield [trigger] a trigger which has to be used to build up a chain of promises, the last one is result + # of the block. When the last one resolves, {Throttle#release} is called on the throttle. + # @yieldparam [Promises::Event, Promises::Future] trigger + # @yieldreturn [Promises::Event, Promises::Future] The final future of the throttled chain. + # @return [Promises::Event, Promises::Future] The final future of the throttled chain. + # @!macro throttle.example.throttled_future_chain + def throttled_future_chain(&throttled_futures) + throttled_futures.call(trigger).on_resolution! { release } + end + + # Behaves as {Promises::FactoryMethods#future} but the future is throttled. + # @return [Promises::Future] + # @see Promises::FactoryMethods#future + def throttled_future(*args, &task) + trigger.chain(*args, &task).on_resolution! { release } + end + end + + include PromisesIntegration + end + + module Promises + + class AbstractEventFuture < Synchronization::Object + module ThrottleIntegration + def throttled_by(throttle, &throttled_futures) + a_trigger = self & self.chain { throttle.trigger }.flat_event + throttled_futures.call(a_trigger).on_resolution! { throttle.release } + end + + # Behaves as {Promises::AbstractEventFuture#chain} but the it is throttled. + # @return [Promises::Future, Promises::Event] + # @see Promises::AbstractEventFuture#chain + def chain_throttled_by(throttle, *args, &block) + throttled_by(throttle) { |trigger| trigger.chain(*args, &block) } + end + end + + include ThrottleIntegration + end + + class Future < AbstractEventFuture + module ThrottleIntegration + + # Behaves as {Promises::Future#then} but the it is throttled. + # @return [Promises::Future] + # @see Promises::Future#then + # @!macro throttle.example.then_throttled_by + def then_throttled_by(throttle, *args, &block) + throttled_by(throttle) { |trigger| trigger.then(*args, &block) } + end + + # Behaves as {Promises::Future#rescue} but the it is throttled. + # @return [Promises::Future] + # @see Promises::Future#rescue + def rescue_throttled_by(throttle, *args, &block) + throttled_by(throttle) { |trigger| trigger.rescue(*args, &block) } + end + end + + include ThrottleIntegration + end + end +end diff --git a/lib/concurrent/errors.rb b/lib/concurrent/errors.rb index 7d1553638..b69fec01f 100644 --- a/lib/concurrent/errors.rb +++ b/lib/concurrent/errors.rb @@ -30,7 +30,18 @@ module Concurrent # Raised when an attempt is made to modify an immutable object # (such as an `IVar`) after its final state has been set. - MultipleAssignmentError = Class.new(Error) + class MultipleAssignmentError < Error + attr_reader :inspection_data + + def initialize(message = nil, inspection_data = nil) + @inspection_data = inspection_data + super message + end + + def inspect + format '%s %s>', super[0..-2], @inspection_data.inspect + end + end # Raised by an `Executor` when it is unable to process a given task, # possibly because of a reject policy or other internal error. @@ -43,4 +54,16 @@ module Concurrent # Raised when an operation times out. TimeoutError = Class.new(Error) + # Aggregates multiple exceptions. + class MultipleErrors < Error + attr_reader :errors + + def initialize(errors, message = "#{errors.size} errors") + @errors = errors + super [*message, + *errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1) + ].join("\n") + end + end + end diff --git a/lib/concurrent/synchronization/object.rb b/lib/concurrent/synchronization/object.rb index e777b5546..1b8a3c296 100644 --- a/lib/concurrent/synchronization/object.rb +++ b/lib/concurrent/synchronization/object.rb @@ -70,8 +70,8 @@ def self.safe_initialization? # any instance variables with CamelCase names and isn't {.safe_initialization?}. def self.ensure_safe_initialization_when_final_fields_are_present Object.class_eval do - def self.new(*) - object = super + def self.new(*args, &block) + object = super(*args, &block) ensure has_final_field = object.instance_variables.any? { |v| v.to_s =~ /^@[A-Z]/ } if has_final_field && !safe_initialization? diff --git a/lib/concurrent/synchronization/rbx_object.rb b/lib/concurrent/synchronization/rbx_object.rb index 302535992..b9a0e3f90 100644 --- a/lib/concurrent/synchronization/rbx_object.rb +++ b/lib/concurrent/synchronization/rbx_object.rb @@ -30,6 +30,7 @@ def #{name}=(value) def full_memory_barrier # Rubinius instance variables are not volatile so we need to insert barrier + # TODO (pitr 26-Nov-2015): check comments like ^ Rubinius.memory_barrier end end diff --git a/spec/concurrent/actor_spec.rb b/spec/concurrent/actor_spec.rb index be3b74dc5..ee77bd21e 100644 --- a/spec/concurrent/actor_spec.rb +++ b/spec/concurrent/actor_spec.rb @@ -24,7 +24,7 @@ def initialize(queue) def on_message(message) case message when :child - AdHoc.spawn(:pong, @queue) { |queue| -> m { queue << m } } + AdHoc.spawn!(:pong, @queue) { |queue| -> m { queue << m } } else @queue << message message @@ -33,16 +33,16 @@ def on_message(message) end it 'forbids Immediate executor' do - expect { Utils::AdHoc.spawn name: 'test', executor: ImmediateExecutor.new }.to raise_error + expect { Utils::AdHoc.spawn! name: 'test', executor: ImmediateExecutor.new }.to raise_error end describe 'spawning' do - describe 'Actor#spawn' do + describe 'Actor#spawn!' do behaviour = -> v { -> _ { v } } - subjects = { spawn: -> { Actor.spawn(AdHoc, :ping, 'arg', &behaviour) }, - context_spawn: -> { AdHoc.spawn(:ping, 'arg', &behaviour) }, - spawn_by_hash: -> { Actor.spawn(class: AdHoc, name: :ping, args: ['arg'], &behaviour) }, - context_spawn_by_hash: -> { AdHoc.spawn(name: :ping, args: ['arg'], &behaviour) } } + subjects = { spawn: -> { Actor.spawn!(AdHoc, :ping, 'arg', &behaviour) }, + context_spawn: -> { AdHoc.spawn!(:ping, 'arg', &behaviour) }, + spawn_by_hash: -> { Actor.spawn!(class: AdHoc, name: :ping, args: ['arg'], &behaviour) }, + context_spawn_by_hash: -> { AdHoc.spawn!(name: :ping, args: ['arg'], &behaviour) } } subjects.each do |desc, subject_definition| describe desc do @@ -78,7 +78,7 @@ def on_message(message) it 'terminates on failed initialization' do a = AdHoc.spawn(name: :fail, logger: Concurrent::NULL_LOGGER) { raise } - expect(a.ask(nil).wait.failed?).to be_truthy + expect(a.ask(nil).wait.rejected?).to be_truthy expect(a.ask!(:terminated?)).to be_truthy end @@ -89,14 +89,14 @@ def on_message(message) end it 'terminates on failed message processing' do - a = AdHoc.spawn(name: :fail, logger: Concurrent::NULL_LOGGER) { -> _ { raise } } - expect(a.ask(nil).wait.failed?).to be_truthy + a = AdHoc.spawn!(name: :fail, logger: Concurrent::NULL_LOGGER) { -> _ { raise } } + expect(a.ask(nil).wait.rejected?).to be_truthy expect(a.ask!(:terminated?)).to be_truthy end end describe 'messaging' do - subject { AdHoc.spawn(:add) { c = 0; -> v { c = c + v } } } + subject { AdHoc.spawn!(:add) { c = 0; -> v { c = c + v } } } specify do subject.tell(1).tell(1) subject << 1 << 1 @@ -107,10 +107,10 @@ def on_message(message) describe 'children' do let(:parent) do - AdHoc.spawn(:parent) do + AdHoc.spawn!(:parent) do -> message do if message == :child - AdHoc.spawn(:child) { -> _ { parent } } + AdHoc.spawn!(:child) { -> _ { parent } } else children end @@ -128,12 +128,12 @@ def on_message(message) end describe 'envelope' do - subject { AdHoc.spawn(:subject) { -> _ { envelope } } } + subject { AdHoc.spawn!(:subject) { -> _ { envelope } } } specify do envelope = subject.ask!('a') expect(envelope).to be_a_kind_of Envelope expect(envelope.message).to eq 'a' - expect(envelope.future).to be_completed + expect(envelope.future).to be_resolved expect(envelope.future.value).to eq envelope expect(envelope.sender).to eq Thread.current terminate_actors subject @@ -142,8 +142,8 @@ def on_message(message) describe 'termination' do subject do - AdHoc.spawn(:parent) do - child = AdHoc.spawn(:child) { -> v { v } } + AdHoc.spawn!(:parent) do + child = AdHoc.spawn!(:child) { -> v { v } } -> v { child } end end @@ -171,8 +171,8 @@ def on_message(message) describe 'message redirecting' do let(:parent) do - AdHoc.spawn(:parent) do - child = AdHoc.spawn(:child) { -> m { m+1 } } + AdHoc.spawn!(:parent) do + child = AdHoc.spawn!(:child) { -> m { m+1 } } -> message do if message == :child child @@ -192,9 +192,9 @@ def on_message(message) queue = Queue.new failure = nil # FIXME this leads to weird message processing ordering - # failure = AdHoc.spawn(:failure) { -> m { terminate! } } + # failure = AdHoc.spawn!(:failure) { -> m { terminate! } } monitor = AdHoc.spawn!(:monitor) do - failure = AdHoc.spawn(:failure) { -> m { m } } + failure = AdHoc.spawn!(:failure) { -> m { m } } failure << :link -> m { queue << [m, envelope.sender] } end @@ -209,7 +209,7 @@ def on_message(message) queue = Queue.new failure = nil monitor = AdHoc.spawn!(:monitor) do - failure = AdHoc.spawn(name: :failure, link: true) { -> m { m } } + failure = AdHoc.spawn!(name: :failure, link: true) { -> m { m } } -> m { queue << [m, envelope.sender] } end @@ -225,8 +225,8 @@ def on_message(message) queue = Queue.new resuming_behaviour = Behaviour.restarting_behaviour_definition(:resume!) - test = AdHoc.spawn name: :tester, behaviour_definition: resuming_behaviour do - actor = AdHoc.spawn name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do + test = AdHoc.spawn! name: :tester, behaviour_definition: resuming_behaviour do + actor = AdHoc.spawn! name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do queue << :init -> m { m == :add ? 1 : pass } end @@ -248,8 +248,8 @@ def on_message(message) it 'pauses on error and resets' do queue = Queue.new - test = AdHoc.spawn name: :tester, behaviour_definition: Behaviour.restarting_behaviour_definition do - actor = AdHoc.spawn name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do + test = AdHoc.spawn! name: :tester, behaviour_definition: Behaviour.restarting_behaviour_definition do + actor = AdHoc.spawn! name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do queue << :init -> m { m == :object_id ? self.object_id : pass } end @@ -284,9 +284,9 @@ def on_message(message) end end - test = AdHoc.spawn name: :tester, behaviour_definition: resuming_behaviour do + test = AdHoc.spawn! name: :tester, behaviour_definition: resuming_behaviour do - actor = AdHoc.spawn name: :pausing, + actor = AdHoc.spawn! name: :pausing, behaviour_definition: Behaviour.restarting_behaviour_definition do queue << :init -> m { m == :add ? 1 : pass } @@ -316,7 +316,7 @@ def on_message(message) it 'supports asks', buggy: true do children = Queue.new pool = Concurrent::Actor::Utils::Pool.spawn! 'pool', 5 do |index| - worker = Concurrent::Actor::Utils::AdHoc.spawn name: "worker-#{index}", supervised: true do + worker = Concurrent::Actor::Utils::AdHoc.spawn! name: "worker-#{index}", supervised: true do lambda do |message| fail if message == :fail 5 + message diff --git a/spec/concurrent/edge/future_spec.rb b/spec/concurrent/edge/future_spec.rb deleted file mode 100644 index d9ce397a5..000000000 --- a/spec/concurrent/edge/future_spec.rb +++ /dev/null @@ -1,478 +0,0 @@ -require 'concurrent-edge' -require 'thread' - -describe 'Concurrent::Edge futures', edge: true do - - describe 'chain_completable' do - it 'event' do - b = Concurrent.event - a = Concurrent.event.chain_completable(b) - a.complete - expect(b).to be_completed - end - - it 'future' do - b = Concurrent.future - a = Concurrent.future.chain_completable(b) - a.success :val - expect(b).to be_completed - expect(b.value).to eq :val - end - end - - describe '.post' do - it 'executes tasks asynchronously' do - queue = Queue.new - value = 12 - Concurrent.post { queue.push(value) } - Concurrent.post(:io) { queue.push(value) } - expect(queue.pop).to eq value - expect(queue.pop).to eq value - end - end - - describe '.future' do - it 'executes' do - future = Concurrent.future { 1 + 1 } - expect(future.value!).to eq 2 - - future = Concurrent.succeeded_future(1).then { |v| v + 1 } - expect(future.value!).to eq 2 - end - end - - describe '.delay' do - it 'delays execution' do - delay = Concurrent.delay { 1 + 1 } - expect(delay.completed?).to eq false - expect(delay.value!).to eq 2 - - delay = Concurrent.succeeded_future(1).delay.then { |v| v + 1 } - expect(delay.completed?).to eq false - expect(delay.value!).to eq 2 - end - end - - describe '.schedule' do - it 'scheduled execution' do - start = Time.now.to_f - queue = Queue.new - future = Concurrent.schedule(0.1) { 1 + 1 }.then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } - - expect(future.value!).to eq queue - expect(queue.pop).to eq 2 - expect(queue.pop).to be >= 0.09 - - start = Time.now.to_f - queue = Queue.new - future = Concurrent. - succeeded_future(1). - schedule(0.1). - then { |v| v + 1 }. - then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } - - expect(future.value!).to eq queue - expect(queue.pop).to eq 2 - expect(queue.pop).to be >= 0.09 - end - - it 'scheduled execution in graph' do - start = Time.now.to_f - queue = Queue.new - future = Concurrent. - future { sleep 0.1; 1 }. - schedule(0.1). - then { |v| v + 1 }. - then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } - - future.wait! - expect(future.value!).to eq queue - expect(queue.pop).to eq 2 - expect(queue.pop).to be >= 0.09 - end - - end - - describe '.event' do - specify do - completable_event = Concurrent.event - one = completable_event.chain { 1 } - join = Concurrent.zip(completable_event).chain { 1 } - expect(one.completed?).to be false - completable_event.complete - expect(one.value!).to eq 1 - expect(join.wait.completed?).to be true - end - end - - describe '.future without block' do - specify do - completable_future = Concurrent.future - one = completable_future.then(&:succ) - join = Concurrent.zip_futures(completable_future).then { |v| v } - expect(one.completed?).to be false - completable_future.success 0 - expect(one.value!).to eq 1 - expect(join.wait!.completed?).to be true - expect(join.value!).to eq 0 - end - end - - describe '.any_complete' do - it 'continues on first result' do - f1 = Concurrent.future - f2 = Concurrent.future - f3 = Concurrent.future - - any1 = Concurrent.any_complete(f1, f2) - any2 = f2 | f3 - - f1.success 1 - f2.fail - - expect(any1.value!).to eq 1 - expect(any2.reason).to be_a_kind_of StandardError - end - end - - describe '.any_successful' do - it 'continues on first result' do - f1 = Concurrent.future - f2 = Concurrent.future - - any = Concurrent.any_successful(f1, f2) - - f1.fail - f2.success :value - - expect(any.value!).to eq :value - end - end - - describe '.zip' do - it 'waits for all results' do - a = Concurrent.future { 1 } - b = Concurrent.future { 2 } - c = Concurrent.future { 3 } - - z1 = a & b - z2 = Concurrent.zip a, b, c - z3 = Concurrent.zip a - z4 = Concurrent.zip - - expect(z1.value!).to eq [1, 2] - expect(z2.value!).to eq [1, 2, 3] - expect(z3.value!).to eq [1] - expect(z4.value!).to eq [] - - q = Queue.new - z1.then { |*args| q << args } - expect(q.pop).to eq [1, 2] - - z1.then { |a, b, c| q << [a, b, c] } - expect(q.pop).to eq [1, 2, nil] - - z2.then { |a, b, c| q << [a, b, c] } - expect(q.pop).to eq [1, 2, 3] - - z3.then { |a| q << a } - expect(q.pop).to eq 1 - - z3.then { |*a| q << a } - expect(q.pop).to eq [1] - - z4.then { |a| q << a } - expect(q.pop).to eq nil - - z4.then { |*a| q << a } - expect(q.pop).to eq [] - - expect(z1.then { |a, b| a+b }.value!).to eq 3 - expect(z1.then { |a, b| a+b }.value!).to eq 3 - expect(z1.then(&:+).value!).to eq 3 - expect(z2.then { |a, b, c| a+b+c }.value!).to eq 6 - - expect(Concurrent.future { 1 }.delay).to be_a_kind_of Concurrent::Edge::Future - expect(Concurrent.future { 1 }.delay.wait!).to be_completed - expect(Concurrent.event.complete.delay).to be_a_kind_of Concurrent::Edge::Event - expect(Concurrent.event.complete.delay.wait).to be_completed - - a = Concurrent.future { 1 } - b = Concurrent.future { raise 'b' } - c = Concurrent.future { raise 'c' } - - Concurrent.zip(a, b, c).chain { |*args| q << args } - expect(q.pop.flatten.map(&:class)).to eq [FalseClass, 0.class, NilClass, NilClass, NilClass, RuntimeError, RuntimeError] - Concurrent.zip(a, b, c).rescue { |*args| q << args } - expect(q.pop.map(&:class)).to eq [NilClass, RuntimeError, RuntimeError] - - expect(Concurrent.zip.wait(0.1)).to eq true - end - - context 'when a future raises an error' do - - let(:future) { Concurrent.future { raise 'error' } } - - it 'raises a concurrent error' do - expect { Concurrent.zip(future).value! }.to raise_error(Concurrent::Error) - end - - end - end - - describe '.zip_events' do - it 'waits for all and returns event' do - a = Concurrent.succeeded_future 1 - b = Concurrent.failed_future :any - c = Concurrent.event.complete - - z2 = Concurrent.zip_events a, b, c - z3 = Concurrent.zip_events a - z4 = Concurrent.zip_events - - expect(z2.completed?).to be_truthy - expect(z3.completed?).to be_truthy - expect(z4.completed?).to be_truthy - end - end - - describe 'Future' do - it 'has sync and async callbacks' do - callbacks_tester = ->(future) do - queue = Queue.new - future.on_completion(:io) { |result| queue.push("async on_completion #{ result.inspect }") } - future.on_completion! { |result| queue.push("sync on_completion #{ result.inspect }") } - future.on_success(:io) { |value| queue.push("async on_success #{ value.inspect }") } - future.on_success! { |value| queue.push("sync on_success #{ value.inspect }") } - future.on_failure(:io) { |reason| queue.push("async on_failure #{ reason.inspect }") } - future.on_failure! { |reason| queue.push("sync on_failure #{ reason.inspect }") } - future.wait - [queue.pop, queue.pop, queue.pop, queue.pop].sort - end - callback_results = callbacks_tester.call(Concurrent.future { :value }) - expect(callback_results).to eq ["async on_completion [true, :value, nil]", - "async on_success :value", - "sync on_completion [true, :value, nil]", - "sync on_success :value"] - - callback_results = callbacks_tester.call(Concurrent.future { raise 'error' }) - expect(callback_results).to eq ["async on_completion [false, nil, #]", - "async on_failure #", - "sync on_completion [false, nil, #]", - "sync on_failure #"] - end - - [:wait, :wait!, :value, :value!, :reason, :result].each do |method_with_timeout| - it "#{ method_with_timeout } supports setting timeout" do - start_latch = Concurrent::CountDownLatch.new - end_latch = Concurrent::CountDownLatch.new - - future = Concurrent.future do - start_latch.count_down - end_latch.wait(1) - end - - start_latch.wait(1) - future.send(method_with_timeout, 0.1) - expect(future).not_to be_completed - end_latch.count_down - future.wait - end - end - - - it 'chains' do - future0 = Concurrent.future { 1 }.then { |v| v + 2 } # both executed on default FAST_EXECUTOR - future1 = future0.then(:fast) { raise 'boo' } # executed on IO_EXECUTOR - future2 = future1.then { |v| v + 1 } # will fail with 'boo' error, executed on default FAST_EXECUTOR - future3 = future1.rescue { |err| err.message } # executed on default FAST_EXECUTOR - future4 = future0.chain { |success, value, reason| success } # executed on default FAST_EXECUTOR - future5 = future3.with_default_executor(:fast) # connects new future with different executor, the new future is completed when future3 is - future6 = future5.then(&:capitalize) # executes on IO_EXECUTOR because default was set to :io on future5 - future7 = future0 & future3 - future8 = future0.rescue { raise 'never happens' } # future0 succeeds so future8'll have same value as future 0 - - futures = [future0, future1, future2, future3, future4, future5, future6, future7, future8] - futures.each &:wait - - table = futures.each_with_index.map do |f, i| - '%5i %7s %10s %6s %4s %6s' % [i, f.success?, f.value, f.reason, - (f.promise.executor if f.promise.respond_to?(:executor)), - f.default_executor] - end.unshift('index success value reason pool d.pool') - - expect(table.join("\n")).to eq <<-TABLE.gsub(/^\s+\|/, '').strip - |index success value reason pool d.pool - | 0 true 3 io io - | 1 false boo fast io - | 2 false boo io io - | 3 true boo io io - | 4 true true io io - | 5 true boo fast - | 6 true Boo fast fast - | 7 true [3, "boo"] io - | 8 true 3 io io - TABLE - end - - it 'constructs promise like tree' do - # if head of the tree is not constructed with #future but with #delay it does not start execute, - # it's triggered later by calling wait or value on any of the dependent futures or the delay itself - three = (head = Concurrent.delay { 1 }).then { |v| v.succ }.then(&:succ) - four = three.delay.then(&:succ) - - # meaningful to_s and inspect defined for Future and Promise - expect(head.to_s).to match /<#Concurrent::Edge::Future:0x[\da-f]+ pending>/ - expect(head.inspect).to( - match(/<#Concurrent::Edge::Future:0x[\da-f]+ pending blocks:\[<#Concurrent::Edge::ThenPromise:0x[\da-f]+ pending>\]>/)) - - # evaluates only up to three, four is left unevaluated - expect(three.value!).to eq 3 - expect(four).not_to be_completed - - expect(four.value!).to eq 4 - - # futures hidden behind two delays trigger evaluation of both - double_delay = Concurrent.delay { 1 }.delay.then(&:succ) - expect(double_delay.value!).to eq 2 - end - - it 'allows graphs' do - head = Concurrent.future { 1 } - branch1 = head.then(&:succ) - branch2 = head.then(&:succ).delay.then(&:succ) - results = [ - Concurrent.zip(branch1, branch2).then { |b1, b2| b1 + b2 }, - branch1.zip(branch2).then { |b1, b2| b1 + b2 }, - (branch1 & branch2).then { |b1, b2| b1 + b2 }] - - sleep 0.1 - expect(branch1).to be_completed - expect(branch2).not_to be_completed - - expect(results.map(&:value)).to eq [5, 5, 5] - expect(Concurrent.zip(branch1, branch2).value!).to eq [2, 3] - end - - describe '#flat' do - it 'returns value of inner future' do - f = Concurrent.future { Concurrent.future { 1 } }.flat.then(&:succ) - expect(f.value!).to eq 2 - end - - it 'propagates failure of inner future' do - err = StandardError.new('boo') - f = Concurrent.future { Concurrent.failed_future(err) }.flat - expect(f.reason).to eq err - end - - it 'it propagates failure of the future which was suppose to provide inner future' do - f = Concurrent.future { raise 'boo' }.flat - expect(f.reason.message).to eq 'boo' - end - - it 'fails if inner value is not a future' do - f = Concurrent.future { 'boo' }.flat - expect(f.reason).to be_an_instance_of TypeError - - f = Concurrent.future { Concurrent.completed_event }.flat - expect(f.reason).to be_an_instance_of TypeError - end - - it 'propagates requests for values to delayed futures' do - expect(Concurrent.future { Concurrent.delay { 1 } }.flat.value!(0.1)).to eq 1 - end - end - - it 'completes future when Exception raised' do - f = Concurrent.future { raise Exception, 'fail' } - f.wait 1 - expect(f).to be_completed - expect(f).to be_failed - expect { f.value! }.to raise_error(Exception, 'fail') - end - end - - describe 'interoperability' do - it 'with actor' do - actor = Concurrent::Actor::Utils::AdHoc.spawn :doubler do - -> v { v * 2 } - end - - expect(Concurrent. - future { 2 }. - then_ask(actor). - then { |v| v + 2 }. - value!).to eq 6 - end - - it 'with channel' do - ch1 = Concurrent::Channel.new - ch2 = Concurrent::Channel.new - - result = Concurrent.select(ch1, ch2) - ch1.put 1 - expect(result.value!).to eq [1, ch1] - - Concurrent. - future { 1+1 }. - then_put(ch1) - result = Concurrent. - future { '%02d' }. - then_select(ch1, ch2). - then { |format, (value, channel)| format format, value } - expect(result.value!).to eq '02' - end - end - - specify do - expect(Concurrent.future { :v }.value!).to eq :v - end - -end - -# def synchronize -# if @__mutex__do_not_use_directly.owned? -# yield -# else -# @__mutex__do_not_use_directly.synchronize { yield } -# # @__mutex__do_not_use_directly.synchronize do -# # locking = (Thread.current[:locking] ||= []) -# # locking.push self -# # puts "locking #{locking.size}" # : #{locking}" -# # begin -# # yield -# # ensure -# # if locking.size > 2 -# # # binding.pry -# # end -# # locking.pop -# # end -# # end -# end -# end - -__END__ - -puts '-- connecting existing promises' - -source = Concurrent.delay { 1 } -promise = Concurrent.promise -promise.connect_to source -p promise.future.value # 1 -# or just -p Concurrent.promise.connect_to(source).value - - -puts '-- using shortcuts' - -include Concurrent # includes Future::Shortcuts - -# now methods on Concurrent are accessible directly - -p delay { 1 }.value, future { 1 }.value # => 1\n1 - -promise = promise() -promise.connect_to(future { 3 }) -p promise.future.value # 3 - diff --git a/spec/concurrent/edge/promises_spec.rb b/spec/concurrent/edge/promises_spec.rb new file mode 100644 index 000000000..5635791aa --- /dev/null +++ b/spec/concurrent/edge/promises_spec.rb @@ -0,0 +1,595 @@ +require 'concurrent/edge/promises' +require 'thread' + + +describe 'Concurrent::Promises' do + + include Concurrent::Promises::FactoryMethods + + describe 'chain_resolvable' do + it 'event' do + b = resolvable_event + a = resolvable_event.chain_resolvable(b) + a.resolve + expect(b).to be_resolved + end + + it 'future' do + b = resolvable_future + a = resolvable_future.chain_resolvable(b) + a.fulfill :val + expect(b).to be_resolved + expect(b.value).to eq :val + end + end + + describe '.future' do + it 'executes' do + future = future { 1 + 1 } + expect(future.value!).to eq 2 + + future = fulfilled_future(1).then { |v| v + 1 } + expect(future.value!).to eq 2 + end + + it 'executes with args' do + future = future(1, 2, &:+) + expect(future.value!).to eq 3 + + future = fulfilled_future(1).then(1) { |v, a| v + 1 } + expect(future.value!).to eq 2 + end + end + + describe '.delay' do + + def behaves_as_delay(delay, value) + expect(delay.resolved?).to eq false + expect(delay.value!).to eq value + end + + specify do + behaves_as_delay delay { 1 + 1 }, 2 + behaves_as_delay fulfilled_future(1).delay.then { |v| v + 1 }, 2 + behaves_as_delay delay(1) { |a| a + 1 }, 2 + behaves_as_delay fulfilled_future(1).delay.then { |v| v + 1 }, 2 + end + end + + describe '.schedule' do + it 'scheduled execution' do + start = Time.now.to_f + queue = Queue.new + future = schedule(0.1) { 1 + 1 }.then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } + + expect(future.value!).to eq queue + expect(queue.pop).to eq 2 + expect(queue.pop).to be >= 0.09 + + start = Time.now.to_f + queue = Queue.new + future = resolved_event. + schedule(0.1). + then { 1 }. + then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } + + expect(future.value!).to eq queue + expect(queue.pop).to eq 1 + expect(queue.pop).to be >= 0.09 + end + + it 'scheduled execution in graph' do + start = Time.now.to_f + queue = Queue.new + future = future { sleep 0.1; 1 }. + schedule(0.1). + then { |v| v + 1 }. + then { |v| queue.push(v); queue.push(Time.now.to_f - start); queue } + + future.wait! + expect(future.value!).to eq queue + expect(queue.pop).to eq 2 + expect(queue.pop).to be >= 0.09 + + scheduled = resolved_event.schedule(0.1) + expect(scheduled.resolved?).to be_falsey + scheduled.wait + expect(scheduled.resolved?).to be_truthy + end + + end + + describe '.event' do + specify do + resolvable_event = resolvable_event() + one = resolvable_event.chain(1) { |arg| arg } + join = zip(resolvable_event).chain { 1 } + expect(one.resolved?).to be false + resolvable_event.resolve + expect(one.value!).to eq 1 + expect(join.wait.resolved?).to be true + end + end + + describe '.future without block' do + specify do + resolvable_future = resolvable_future() + one = resolvable_future.then(&:succ) + join = zip_futures(resolvable_future).then { |v| v } + expect(one.resolved?).to be false + resolvable_future.fulfill 0 + expect(one.value!).to eq 1 + expect(join.wait!.resolved?).to be true + expect(join.value!).to eq 0 + end + end + + describe '.any_resolved' do + it 'continues on first result' do + f1 = resolvable_future + f2 = resolvable_future + f3 = resolvable_future + + any1 = any_resolved_future(f1, f2) + any2 = f2 | f3 + + f1.fulfill 1 + f2.reject StandardError.new + + expect(any1.value!).to eq 1 + expect(any2.reason).to be_a_kind_of StandardError + end + end + + describe '.any_fulfilled' do + it 'continues on first result' do + f1 = resolvable_future + f2 = resolvable_future + + any = any_fulfilled_future(f1, f2) + + f1.reject StandardError.new + f2.fulfill :value + + expect(any.value!).to eq :value + end + end + + describe '.zip' do + it 'waits for all results' do + a = future { 1 } + b = future { 2 } + c = future { 3 } + + z1 = a & b + z2 = zip a, b, c + z3 = zip a + z4 = zip + + expect(z1.value!).to eq [1, 2] + expect(z2.value!).to eq [1, 2, 3] + expect(z3.value!).to eq [1] + expect(z4.value!).to eq [] + + q = Queue.new + z1.then { |*args| q << args } + expect(q.pop).to eq [1, 2] + + z1.then { |a, b, c| q << [a, b, c] } + expect(q.pop).to eq [1, 2, nil] + + z2.then { |a, b, c| q << [a, b, c] } + expect(q.pop).to eq [1, 2, 3] + + z3.then { |a| q << a } + expect(q.pop).to eq 1 + + z3.then { |*a| q << a } + expect(q.pop).to eq [1] + + z4.then { |a| q << a } + expect(q.pop).to eq nil + + z4.then { |*a| q << a } + expect(q.pop).to eq [] + + expect(z1.then { |a, b| a+b }.value!).to eq 3 + expect(z1.then { |a, b| a+b }.value!).to eq 3 + expect(z1.then(&:+).value!).to eq 3 + expect(z2.then { |a, b, c| a+b+c }.value!).to eq 6 + + expect(future { 1 }.delay).to be_a_kind_of Concurrent::Promises::Future + expect(future { 1 }.delay.wait!).to be_resolved + expect(resolvable_event.resolve.delay).to be_a_kind_of Concurrent::Promises::Event + expect(resolvable_event.resolve.delay.wait).to be_resolved + + a = future { 1 } + b = future { raise 'b' } + c = future { raise 'c' } + + zip(a, b, c).chain { |*args| q << args } + expect(q.pop.flatten.map(&:class)).to eq [FalseClass, 0.class, NilClass, NilClass, NilClass, RuntimeError, RuntimeError] + zip(a, b, c).rescue { |*args| q << args } + expect(q.pop.map(&:class)).to eq [NilClass, RuntimeError, RuntimeError] + + expect(zip.wait(0.1)).to eq true + end + + context 'when a future raises an error' do + + let(:a_future) { future { raise 'error' } } + + it 'raises a concurrent error' do + expect { zip(a_future).value! }.to raise_error(StandardError) + end + + end + end + + describe '.zip_events' do + it 'waits for all and returns event' do + a = fulfilled_future 1 + b = rejected_future :any + c = resolvable_event.resolve + + z2 = zip_events a, b, c + z3 = zip_events a + z4 = zip_events + + expect(z2.resolved?).to be_truthy + expect(z3.resolved?).to be_truthy + expect(z4.resolved?).to be_truthy + end + end + + describe 'Future' do + it 'has sync and async callbacks' do + callbacks_tester = ->(future) do + queue = Queue.new + future.on_resolution_using(:io) { |result| queue.push("async on_resolution #{ result.inspect }") } + future.on_resolution! { |result| queue.push("sync on_resolution #{ result.inspect }") } + future.on_fulfillment_using(:io) { |value| queue.push("async on_fulfillment #{ value.inspect }") } + future.on_fulfillment! { |value| queue.push("sync on_fulfillment #{ value.inspect }") } + future.on_rejection_using(:io) { |reason| queue.push("async on_rejection #{ reason.inspect }") } + future.on_rejection! { |reason| queue.push("sync on_rejection #{ reason.inspect }") } + future.wait + [queue.pop, queue.pop, queue.pop, queue.pop].sort + end + callback_results = callbacks_tester.call(future { :value }) + expect(callback_results).to eq ["async on_fulfillment :value", + "async on_resolution [true, :value, nil]", + "sync on_fulfillment :value", + "sync on_resolution [true, :value, nil]"] + + callback_results = callbacks_tester.call(future { raise 'error' }) + expect(callback_results).to eq ["async on_rejection #", + "async on_resolution [false, nil, #]", + "sync on_rejection #", + "sync on_resolution [false, nil, #]"] + end + + [:wait, :wait!, :value, :value!, :reason, :result].each do |method_with_timeout| + it "#{ method_with_timeout } supports setting timeout" do + start_latch = Concurrent::CountDownLatch.new + end_latch = Concurrent::CountDownLatch.new + + future = future do + start_latch.count_down + end_latch.wait(1) + end + + start_latch.wait(1) + future.send(method_with_timeout, 0.1) + expect(future).not_to be_resolved + end_latch.count_down + future.wait + end + end + + + it 'chains' do + future0 = future { 1 }.then { |v| v + 2 } # both executed on default FAST_EXECUTOR + future1 = future0.then_on(:fast) { raise 'boo' } # executed on IO_EXECUTOR + future2 = future1.then { |v| v + 1 } # will reject with 'boo' error, executed on default FAST_EXECUTOR + future3 = future1.rescue { |err| err.message } # executed on default FAST_EXECUTOR + future4 = future0.chain { |success, value, reason| success } # executed on default FAST_EXECUTOR + future5 = future3.with_default_executor(:fast) # connects new future with different executor, the new future is resolved when future3 is + future6 = future5.then(&:capitalize) # executes on IO_EXECUTOR because default was set to :io on future5 + future7 = future0 & future3 + future8 = future0.rescue { raise 'never happens' } # future0 fulfills so future8'll have same value as future 0 + + futures = [future0, future1, future2, future3, future4, future5, future6, future7, future8] + futures.each &:wait + + table = futures.each_with_index.map do |f, i| + '%5i %7s %10s %6s %4s %6s' % [i, f.fulfilled?, f.value, f.reason, + (f.promise.executor if f.promise.respond_to?(:executor)), + f.default_executor] + end.unshift('index success value reason pool d.pool') + + expect(table.join("\n")).to eq <<-TABLE.gsub(/^\s+\|/, '').strip + |index success value reason pool d.pool + | 0 true 3 io io + | 1 false boo fast io + | 2 false boo io io + | 3 true boo io io + | 4 true true io io + | 5 true boo fast + | 6 true Boo fast fast + | 7 true [3, "boo"] io + | 8 true 3 io io + TABLE + end + + it 'constructs promise like tree' do + # if head of the tree is not constructed with #future but with #delay it does not start execute, + # it's triggered later by calling wait or value on any of the dependent futures or the delay itself + three = (head = delay { 1 }).then { |v| v.succ }.then(&:succ) + four = three.delay.then(&:succ) + + # meaningful to_s and inspect defined for Future and Promise + expect(head.to_s).to match /<#Concurrent::Promises::Future:0x[\da-f]+ pending>/ + expect(head.inspect).to( + match(/<#Concurrent::Promises::Future:0x[\da-f]+ pending>/)) + + # evaluates only up to three, four is left unevaluated + expect(three.value!).to eq 3 + expect(four).not_to be_resolved + + expect(four.value!).to eq 4 + + # futures hidden behind two delays trigger evaluation of both + double_delay = delay { 1 }.delay.then(&:succ) + expect(double_delay.value!).to eq 2 + end + + it 'allows graphs' do + head = future { 1 } + branch1 = head.then(&:succ) + branch2 = head.then(&:succ).delay.then(&:succ) + results = [ + zip(branch1, branch2).then { |b1, b2| b1 + b2 }, + branch1.zip(branch2).then { |b1, b2| b1 + b2 }, + (branch1 & branch2).then { |b1, b2| b1 + b2 }] + + Thread.pass until branch1.resolved? + expect(branch1).to be_resolved + expect(branch2).not_to be_resolved + + expect(results.map(&:value)).to eq [5, 5, 5] + expect(zip(branch1, branch2).value!).to eq [2, 3] + end + + describe '#flat' do + it 'returns value of inner future' do + f = future { future { 1 } }.flat.then(&:succ) + expect(f.value!).to eq 2 + end + + it 'propagates rejection of inner future' do + err = StandardError.new('boo') + f = future { rejected_future(err) }.flat + expect(f.reason).to eq err + end + + it 'it propagates rejection of the future which was suppose to provide inner future' do + f = future { raise 'boo' }.flat + expect(f.reason.message).to eq 'boo' + end + + it 'rejects if inner value is not a future' do + f = future { 'boo' }.flat + expect(f.reason).to be_an_instance_of TypeError + + f = future { resolved_event }.flat + expect(f.reason).to be_an_instance_of TypeError + end + + it 'propagates requests for values to delayed futures' do + expect(future { delay { 1 } }.flat.value!(0.1)).to eq 1 + end + end + + it 'resolves future when Exception raised' do + f = future { raise Exception, 'reject' } + f.wait 1 + expect(f).to be_resolved + expect(f).to be_rejected + expect { f.value! }.to raise_error(Exception, 'reject') + end + + it 'runs' do + body = lambda do |v| + v += 1 + v < 5 ? future(v, &body) : v + end + expect(future(0, &body).run.value!).to eq 5 + + body = lambda do |v| + v += 1 + v < 5 ? future(v, &body) : raise(v.to_s) + end + expect(future(0, &body).run.reason.message).to eq '5' + end + end + + describe 'interoperability' do + it 'with actor' do + actor = Concurrent::Actor::Utils::AdHoc.spawn :doubler do + -> v { v * 2 } + end + + expect(future { 2 }. + then_ask(actor). + then { |v| v + 2 }. + value!).to eq 6 + end + + it 'with channel' do + ch1 = Concurrent::Promises::Channel.new + ch2 = Concurrent::Promises::Channel.new + + result = Concurrent::Promises.select_channel(ch1, ch2) + ch1.push 1 + expect(result.value!).to eq [ch1, 1] + + + future { 1+1 }.then_push_channel(ch1) + result = (Concurrent::Promises.future { '%02d' } & Concurrent::Promises.select_channel(ch1, ch2)). + then { |format, (channel, value)| format format, value } + expect(result.value!).to eq '02' + end + end + + describe 'Cancellation', edge: true do + specify do + source, token = Concurrent::Cancellation.create + + futures = Array.new(2) { future(token) { |t| t.loop_until_canceled { Thread.pass }; :done } } + + source.cancel + futures.each do |future| + expect(future.value!).to eq :done + end + end + + specify do + source, token = Concurrent::Cancellation.create + source.cancel + expect(token.canceled?).to be_truthy + + cancellable_branch = Concurrent::Promises.delay { 1 } + expect((cancellable_branch | token.to_event).value).to be_nil + expect(cancellable_branch.resolved?).to be_falsey + end + + specify do + source, token = Concurrent::Cancellation.create + + cancellable_branch = Concurrent::Promises.delay { 1 } + expect(any_resolved_future(cancellable_branch, token.to_event).value).to eq 1 + expect(cancellable_branch.resolved?).to be_truthy + end + + specify do + source, token = Concurrent::Cancellation.create( + Concurrent::Promises.resolvable_future, false, nil, err = StandardError.new('Cancelled')) + source.cancel + expect(token.canceled?).to be_truthy + + cancellable_branch = Concurrent::Promises.delay { 1 } + expect((cancellable_branch | token.to_future).reason).to eq err + expect(cancellable_branch.resolved?).to be_falsey + end + end + + describe 'Throttling' do + specify do + limit = 4 + throttle = Concurrent::Throttle.new limit + counter = Concurrent::AtomicFixnum.new + testing = -> *args do + counter.increment + sleep rand * 0.02 + 0.02 + # returns less then 3 since it's throttled + v = counter.decrement + 1 + v + end + + expect(Concurrent::Promises.zip( + *20.times.map do |i| + throttle.throttled_future_chain { |trigger| trigger.then(throttle, &testing) } + end).value!.all? { |v| v <= limit }).to be_truthy + + expect(Concurrent::Promises.zip( + *20.times.map do |i| + throttle.throttled_future(throttle, &testing) + end).value!.all? { |v| v <= limit }).to be_truthy + + expect(Concurrent::Promises.zip( + *20.times.map do |i| + Concurrent::Promises. + fulfilled_future(i). + throttled_by(throttle) { |trigger| trigger.then(throttle, &testing) } + end).value!.all? { |v| v <= limit }).to be_truthy + + expect(Concurrent::Promises.zip( + *20.times.map do |i| + Concurrent::Promises. + fulfilled_future(i). + then_throttled_by(throttle, throttle, &testing) + end).value!.all? { |v| v <= limit }).to be_truthy + end + end + + describe 'Promises::Channel' do + specify do + channel = Concurrent::Promises::Channel.new 1 + + pushed1 = channel.push 1 + expect(pushed1.resolved?).to be_truthy + expect(pushed1.value!).to eq 1 + + pushed2 = channel.push 2 + expect(pushed2.resolved?).to be_falsey + + popped = channel.pop + expect(pushed1.value!).to eq 1 + expect(pushed2.resolved?).to be_truthy + expect(pushed2.value!).to eq 2 + expect(popped.value!).to eq 1 + + popped = channel.pop + expect(popped.value!).to eq 2 + + popped = channel.pop + expect(popped.resolved?).to be_falsey + + pushed3 = channel.push 3 + expect(popped.value!).to eq 3 + expect(pushed3.resolved?).to be_truthy + expect(pushed3.value!).to eq 3 + end + + specify do + ch1 = Concurrent::Promises::Channel.new + ch2 = Concurrent::Promises::Channel.new + ch3 = Concurrent::Promises::Channel.new + + add = -> do + (ch1.pop & ch2.pop).then do |a, b| + if a == :done && b == :done + :done + else + ch3.push a + b + add.call + end + end + end + + ch1.push 1 + ch2.push 2 + ch1.push 'a' + ch2.push 'b' + ch1.push nil + ch2.push true + + result = Concurrent::Promises.future(&add).run.result + expect(result[0..1]).to eq [false, nil] + expect(result[2]).to be_a_kind_of(NoMethodError) + expect(ch3.pop.value!).to eq 3 + expect(ch3.pop.value!).to eq 'ab' + + ch1.push 1 + ch2.push 2 + ch1.push 'a' + ch2.push 'b' + ch1.push :done + ch2.push :done + + expect(Concurrent::Promises.future(&add).run.result).to eq [true, :done, nil] + expect(ch3.pop.value!).to eq 3 + expect(ch3.pop.value!).to eq 'ab' + end + end +end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb index ef3bbe748..30205f522 100644 --- a/spec/spec_helper.rb +++ b/spec/spec_helper.rb @@ -24,7 +24,7 @@ require 'concurrent' require 'concurrent-edge' -Concurrent.use_stdlib_logger Logger::FATAL +Concurrent.use_simple_logger Logger::FATAL # import all the support files Dir[File.join(File.dirname(__FILE__), 'support/**/*.rb')].each { |f| require File.expand_path(f) } diff --git a/tasks/update_doc.rake b/tasks/update_doc.rake index a37af832f..f8dc71ad7 100644 --- a/tasks/update_doc.rake +++ b/tasks/update_doc.rake @@ -1,15 +1,60 @@ require 'yard' -YARD::Rake::YardocTask.new +require 'md_ruby_eval' + +module YARD + module Templates::Helpers + # The helper module for HTML templates. + module HtmlHelper + def signature_types(meth, link = true) + meth = convert_method_to_overload(meth) + if meth.respond_to?(:object) && !meth.has_tag?(:return) + meth = meth.object + end + + type = options.default_return || "" + if meth.tag(:return) && meth.tag(:return).types + types = meth.tags(:return).map { |t| t.types ? t.types : [] }.flatten.uniq + first = link ? h(types.first) : format_types([types.first], false) + # if types.size == 2 && types.last == 'nil' + # type = first + '?' + # elsif types.size == 2 && types.last =~ /^(Array)?<#{Regexp.quote types.first}>$/ + # type = first + '+' + # elsif types.size > 2 + # type = [first, '...'].join(', ') + if types == ['void'] && options.hide_void_return + type = "" + else + type = link ? h(types.join(", ")) : format_types(types, false) + end + elsif !type.empty? + type = link ? h(type) : format_types([type], false) + end + type = "(#{type}) " unless type.empty? + type + end + end + end +end root = File.expand_path File.join(File.dirname(__FILE__), '..') +task yard: %w(yard:preprocess yard:doc) + namespace :yard do + YARD::Rake::YardocTask.new(:doc) + cmd = lambda do |command| puts ">> executing: #{command}" system command or raise "#{command} failed" end + task :preprocess do + Dir.chdir File.join(__dir__, '..', 'doc') do + cmd.call 'bundle exec md-ruby-eval --auto' or raise + end + end + desc 'Pushes generated documentation to github pages: http://ruby-concurrency.github.io/concurrent-ruby/' task :push => [:setup, :yard] do diff --git a/yard-template/default/fulldoc/html/css/common.css b/yard-template/default/fulldoc/html/css/common.css index dfd9d858a..f7f7f98b8 100644 --- a/yard-template/default/fulldoc/html/css/common.css +++ b/yard-template/default/fulldoc/html/css/common.css @@ -4,6 +4,16 @@ body { line-height: 18px; } +.docstring h1:before { + content: '# '; + color: silver; +} + +.docstring h2:before { + content: '## '; + color: silver; +} + .docstring code, .docstring .object_link a, #filecontents code { padding: 0px 3px 1px 3px; border: 1px solid #eef;