@@ -193,12 +193,12 @@ def cuda(self, device_id=None, blocking=True):
193193 Examples:
194194 In Static Graph Mode:
195195
196- .. code-block:: python
196+ .. code-block:: pycon
197197
198198 >>> import paddle
199199 >>> paddle.enable_static()
200200
201- >>> x = paddle.static.data(name="x", shape=[2,2], dtype='float32')
201+ >>> x = paddle.static.data(name="x", shape=[2, 2], dtype='float32')
202202 >>> y = x.cpu()
203203 >>> z = y.cuda()
204204 """
@@ -284,7 +284,7 @@ def _ndim(self):
284284 the dimension
285285
286286 Examples:
287- .. code-block:: python
287+ .. code-block:: pycon
288288
289289 >>> import paddle
290290
@@ -306,7 +306,7 @@ def ndimension(self):
306306 the dimension
307307
308308 Examples:
309- .. code-block:: python
309+ .. code-block:: pycon
310310
311311 >>> import paddle
312312
@@ -328,7 +328,7 @@ def dim(self):
328328 the dimension
329329
330330 Examples:
331- .. code-block:: python
331+ .. code-block:: pycon
332332
333333 >>> import paddle
334334
@@ -418,17 +418,16 @@ def astype(self, dtype):
418418 Examples:
419419 In Static Graph Mode:
420420
421- .. code-block:: python
421+ .. code-block:: pycon
422422
423423 >>> import paddle
424424 >>> paddle.enable_static()
425425 >>> startup_prog = paddle.static.Program()
426426 >>> main_prog = paddle.static.Program()
427427 >>> with paddle.static.program_guard(startup_prog, main_prog):
428- ... original_value = paddle.static.data(name = "new_value", shape=[2,2], dtype='float32')
428+ ... original_value = paddle.static.data(name= "new_value", shape=[2, 2], dtype='float32')
429429 ... new_value = original_value.astype('int64')
430430 ... print(f"new value's dtype is: {new_value.dtype}")
431- ...
432431 new Tensor's dtype is: paddle.int64
433432
434433 """
@@ -628,7 +627,7 @@ def _T_(self):
628627 If `n` is the dimensions of `x` , `x.T` is equivalent to `x.transpose([n-1, n-2, ..., 0])`.
629628
630629 Examples:
631- .. code-block:: python
630+ .. code-block:: pycon
632631
633632 >>> import paddle
634633 >>> paddle.enable_static()
@@ -657,7 +656,7 @@ def _mT_(self):
657656 If `n` is the dimensions of `x` , `x.mT` is equivalent to `x.transpose([0, 1, ..., n-1, n-2])`.
658657
659658 Examples:
660- .. code-block:: python
659+ .. code-block:: pycon
661660
662661 >>> import paddle
663662 >>> paddle.enable_static()
@@ -697,7 +696,7 @@ def _new_full_(
697696 By default, the returned Tensor has the same dtype and place as this tensor.
698697
699698 Examples:
700- .. code-block:: python
699+ .. code-block:: pycon
701700
702701 >>> import paddle
703702 >>> paddle.enable_static()
@@ -744,7 +743,7 @@ def _new_empty_(
744743 By default, the returned Tensor has the same dtype and place as this tensor.
745744
746745 Examples:
747- .. code-block:: python
746+ .. code-block:: pycon
748747
749748 >>> import paddle
750749 >>> paddle.enable_static()
@@ -790,7 +789,7 @@ def _new_ones_(
790789 By default, the returned Tensor has the same dtype and place as this tensor.
791790
792791 Examples:
793- .. code-block:: python
792+ .. code-block:: pycon
794793
795794 >>> import paddle
796795 >>> paddle.enable_static()
@@ -837,7 +836,7 @@ def _new_zeros_(
837836 By default, the returned Tensor has the same dtype and place as this tensor.
838837
839838 Examples:
840- .. code-block:: python
839+ .. code-block:: pycon
841840
842841 >>> import paddle
843842 >>> paddle.enable_static()
@@ -984,7 +983,7 @@ def clone(self):
984983 Tensor, The cloned Tensor.
985984
986985 Examples:
987- .. code-block:: python
986+ .. code-block:: pycon
988987
989988 >>> import paddle
990989
@@ -1011,7 +1010,7 @@ def clear_gradient(self):
10111010 Returns: None
10121011
10131012 Examples:
1014- .. code-block:: python
1013+ .. code-block:: pycon
10151014
10161015 >>> import paddle
10171016 >>> import numpy as np
@@ -1191,10 +1190,10 @@ def to(self, *args, **kwargs):
11911190 Tensor: self
11921191
11931192 Examples:
1194- .. code-block:: python
1193+ .. code-block:: pycon
11951194
11961195 >>> import paddle
1197- >>> x = paddle.to_tensor([1,2, 3])
1196+ >>> x = paddle.to_tensor([1, 2, 3])
11981197 >>> print(x)
11991198 Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
12001199 [1, 2, 3])
@@ -1211,7 +1210,7 @@ def to(self, *args, **kwargs):
12111210 >>> print(x)
12121211 Tensor(shape=[3], dtype=int16, place=Place(gpu:0), stop_gradient=True,
12131212 [1, 2, 3])
1214- >>> y = paddle.to_tensor([4,5, 6])
1213+ >>> y = paddle.to_tensor([4, 5, 6])
12151214 >>> y
12161215 Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
12171216 [4, 5, 6])
@@ -1373,7 +1372,7 @@ def numpy(self):
13731372 Returns type:
13741373 ndarray: dtype is same as current Variable
13751374 Examples:
1376- .. code-block:: python
1375+ .. code-block:: pycon
13771376
13781377 >>> import paddle
13791378 >>> import paddle.base as base
@@ -1402,7 +1401,7 @@ def tolist(self):
14021401 list: Elements have the same dtype as current Variable
14031402
14041403 Examples:
1405- .. code-block:: python
1404+ .. code-block:: pycon
14061405
14071406 >>> import paddle
14081407 >>> import paddle.base as base
@@ -1432,7 +1431,7 @@ def requires_grad(self) -> bool:
14321431 Setting requires_grad=True is equivalent to setting stop_gradient=False.
14331432
14341433 Examples:
1435- .. code-block:: python
1434+ .. code-block:: pycon
14361435
14371436 >>> import paddle
14381437 >>> x = paddle.randn([2, 3])
@@ -1477,10 +1476,10 @@ def itemsize(self) -> int:
14771476 Returns the number of bytes allocated on the machine for a single element of the Tensor.
14781477
14791478 Examples:
1480- .. code-block:: python
1479+ .. code-block:: pycon
14811480
14821481 >>> import paddle
1483- >>> x = paddle.randn((2,3),dtype=paddle.float64)
1482+ >>> x = paddle.randn((2, 3), dtype=paddle.float64)
14841483 >>> x.itemsize
14851484 8
14861485 """
0 commit comments