192
192
< div class ="pytorch-left-menu-search ">
193
193
194
194
< div class ="version ">
195
- < a href ='https://pytorch.org/docs/versions.html '> master (1.10.0a0+git145a20b ) ▼</ a >
195
+ < a href ='https://pytorch.org/docs/versions.html '> master (1.10.0a0+gitb241698 ) ▼</ a >
196
196
</ div >
197
197
198
198
@@ -403,7 +403,7 @@ <h1>Source code for torch.autograd.function</h1><div class="highlight"><pre>
403
403
404
404
< span class ="k "> class</ span > < span class ="nc "> _ContextMethodMixin</ span > < span class ="p "> (</ span > < span class ="nb "> object</ span > < span class ="p "> ):</ span >
405
405
406
- < span class ="k "> def</ span > < span class ="nf "> save_for_backward</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
406
+ < div class =" viewcode-block " id =" _ContextMethodMixin.save_for_backward " > < a class =" viewcode-back " href =" ../../../generated/torch.autograd.function._ContextMethodMixin.save_for_backward.html#torch.autograd._ContextMethodMixin.save_for_backward " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> save_for_backward</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
407
407
< span class ="sa "> r</ span > < span class ="sd "> """Saves given tensors for a future call to :func:`~Function.backward`.</ span >
408
408
409
409
< span class ="sd "> **This should be called at most once, and only from inside the**</ span >
@@ -415,9 +415,9 @@ <h1>Source code for torch.autograd.function</h1><div class="highlight"><pre>
415
415
416
416
< span class ="sd "> Arguments can also be ``None``.</ span >
417
417
< span class ="sd "> """</ span >
418
- < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> to_save</ span > < span class ="o "> =</ span > < span class ="n "> tensors</ span >
418
+ < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> to_save</ span > < span class ="o "> =</ span > < span class ="n "> tensors</ span > </ div >
419
419
420
- < span class ="k "> def</ span > < span class ="nf "> mark_dirty</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> args</ span > < span class ="p "> ):</ span >
420
+ < div class =" viewcode-block " id =" _ContextMethodMixin.mark_dirty " > < a class =" viewcode-back " href =" ../../../generated/torch.autograd.function._ContextMethodMixin.mark_dirty.html#torch.autograd._ContextMethodMixin.mark_dirty " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> mark_dirty</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> args</ span > < span class ="p "> ):</ span >
421
421
< span class ="sa "> r</ span > < span class ="sd "> """Marks given tensors as modified in an in-place operation.</ span >
422
422
423
423
< span class ="sd "> **This should be called at most once, only from inside the**</ span >
@@ -428,15 +428,15 @@ <h1>Source code for torch.autograd.function</h1><div class="highlight"><pre>
428
428
< span class ="sd "> It doesn't matter whether the function is called before or after</ span >
429
429
< span class ="sd "> modification.</ span >
430
430
< span class ="sd "> """</ span >
431
- < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> dirty_tensors</ span > < span class ="o "> =</ span > < span class ="n "> args</ span >
431
+ < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> dirty_tensors</ span > < span class ="o "> =</ span > < span class ="n "> args</ span > </ div >
432
432
433
433
< span class ="k "> def</ span > < span class ="nf "> mark_shared_storage</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> pairs</ span > < span class ="p "> ):</ span >
434
434
< span class ="n "> warnings</ span > < span class ="o "> .</ span > < span class ="n "> warn</ span > < span class ="p "> (</ span >
435
435
< span class ="s1 "> 'mark_shared_storage is deprecated. '</ span >
436
436
< span class ="s1 "> 'Tensors with shared storages are automatically tracked. Note '</ span >
437
437
< span class ="s1 "> 'that calls to `set_()` are not tracked'</ span > < span class ="p "> )</ span >
438
438
439
- < span class ="k "> def</ span > < span class ="nf "> mark_non_differentiable</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> args</ span > < span class ="p "> ):</ span >
439
+ < div class =" viewcode-block " id =" _ContextMethodMixin.mark_non_differentiable " > < a class =" viewcode-back " href =" ../../../generated/torch.autograd.function._ContextMethodMixin.mark_non_differentiable.html#torch.autograd._ContextMethodMixin.mark_non_differentiable " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> mark_non_differentiable</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> args</ span > < span class ="p "> ):</ span >
440
440
< span class ="sa "> r</ span > < span class ="sd "> """Marks outputs as non-differentiable.</ span >
441
441
442
442
< span class ="sd "> **This should be called at most once, only from inside the**</ span >
@@ -450,17 +450,17 @@ <h1>Source code for torch.autograd.function</h1><div class="highlight"><pre>
450
450
451
451
< span class ="sd "> This is used e.g. for indices returned from a max :class:`Function`.</ span >
452
452
< span class ="sd "> """</ span >
453
- < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> non_differentiable</ span > < span class ="o "> =</ span > < span class ="n "> args</ span >
453
+ < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> non_differentiable</ span > < span class ="o "> =</ span > < span class ="n "> args</ span > </ div >
454
454
455
- < span class ="k "> def</ span > < span class ="nf "> set_materialize_grads</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> value</ span > < span class ="p "> ):</ span >
455
+ < div class =" viewcode-block " id =" _ContextMethodMixin.set_materialize_grads " > < a class =" viewcode-back " href =" ../../../generated/torch.autograd.function._ContextMethodMixin.set_materialize_grads.html#torch.autograd._ContextMethodMixin.set_materialize_grads " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> set_materialize_grads</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> value</ span > < span class ="p "> ):</ span >
456
456
< span class ="sa "> r</ span > < span class ="sd "> """Sets whether to materialize output grad tensors. Default is true.</ span >
457
457
458
458
< span class ="sd "> **This should be called only from inside the** :func:`forward` **method**</ span >
459
459
460
460
< span class ="sd "> If true, undefined output grad tensors will be expanded to tensors full</ span >
461
461
< span class ="sd "> of zeros prior to calling the :func:`backward` method.</ span >
462
462
< span class ="sd "> """</ span >
463
- < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> materialize_grads</ span > < span class ="o "> =</ span > < span class ="n "> value</ span >
463
+ < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> materialize_grads</ span > < span class ="o "> =</ span > < span class ="n "> value</ span > </ div >
464
464
465
465
< span class ="k "> class</ span > < span class ="nc "> _HookMixin</ span > < span class ="p "> (</ span > < span class ="nb "> object</ span > < span class ="p "> ):</ span >
466
466
@@ -547,7 +547,7 @@ <h1>Source code for torch.autograd.function</h1><div class="highlight"><pre>
547
547
< span class ="c1 "> # for the tracer</ span >
548
548
< span class ="n "> is_traceable</ span > < span class ="o "> =</ span > < span class ="kc "> False</ span >
549
549
550
- < span class ="nd "> @staticmethod</ span >
550
+ < div class =" viewcode-block " id =" Function.forward " > < a class =" viewcode-back " href =" ../../../generated/torch.autograd.Function.forward.html#torch.autograd.Function.forward " > [docs] </ a > < span class ="nd "> @staticmethod</ span >
551
551
< span class ="k "> def</ span > < span class ="nf "> forward</ span > < span class ="p "> (</ span > < span class ="n "> ctx</ span > < span class ="p "> :</ span > < span class ="n "> Any</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> args</ span > < span class ="p "> :</ span > < span class ="n "> Any</ span > < span class ="p "> ,</ span > < span class ="o "> **</ span > < span class ="n "> kwargs</ span > < span class ="p "> :</ span > < span class ="n "> Any</ span > < span class ="p "> )</ span > < span class ="o "> -></ span > < span class ="n "> Any</ span > < span class ="p "> :</ span >
552
552
< span class ="sa "> r</ span > < span class ="sd "> """Performs the operation.</ span >
553
553
@@ -560,9 +560,9 @@ <h1>Source code for torch.autograd.function</h1><div class="highlight"><pre>
560
560
< span class ="sd "> retrieved during the backward pass.</ span >
561
561
< span class ="sd "> """</ span >
562
562
< span class ="k "> raise</ span > < span class ="ne "> NotImplementedError</ span > < span class ="p "> (</ span > < span class ="s2 "> "You must implement the forward function for custom"</ span >
563
- < span class ="s2 "> " autograd.Function."</ span > < span class ="p "> )</ span >
563
+ < span class ="s2 "> " autograd.Function."</ span > < span class ="p "> )</ span > </ div >
564
564
565
- < span class ="nd "> @staticmethod</ span >
565
+ < div class =" viewcode-block " id =" Function.backward " > < a class =" viewcode-back " href =" ../../../generated/torch.autograd.Function.backward.html#torch.autograd.Function.backward " > [docs] </ a > < span class ="nd "> @staticmethod</ span >
566
566
< span class ="k "> def</ span > < span class ="nf "> backward</ span > < span class ="p "> (</ span > < span class ="n "> ctx</ span > < span class ="p "> :</ span > < span class ="n "> Any</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> grad_outputs</ span > < span class ="p "> :</ span > < span class ="n "> Any</ span > < span class ="p "> )</ span > < span class ="o "> -></ span > < span class ="n "> Any</ span > < span class ="p "> :</ span >
567
567
< span class ="sa "> r</ span > < span class ="sd "> """Defines a formula for differentiating the operation.</ span >
568
568
@@ -585,7 +585,7 @@ <h1>Source code for torch.autograd.function</h1><div class="highlight"><pre>
585
585
< span class ="sd "> output.</ span >
586
586
< span class ="sd "> """</ span >
587
587
< span class ="k "> raise</ span > < span class ="ne "> NotImplementedError</ span > < span class ="p "> (</ span > < span class ="s2 "> "You must implement the backward function for custom"</ span >
588
- < span class ="s2 "> " autograd.Function."</ span > < span class ="p "> )</ span > </ div >
588
+ < span class ="s2 "> " autograd.Function."</ span > < span class ="p "> )</ span > </ div > </ div >
589
589
590
590
591
591
< span class ="k "> def</ span > < span class ="nf "> once_differentiable</ span > < span class ="p "> (</ span > < span class ="n "> fn</ span > < span class ="p "> ):</ span >
0 commit comments