In [1]:
import numpy as np
In [2]:
a = np.array([-100, -10, 0, 10, 100])
print(a)
In [3]:
print(np.signbit(a))
In [4]:
print(type(np.signbit(a)))
In [5]:
print(np.signbit(a).dtype)
In [6]:
print(np.signbit(-100))
In [7]:
print(a == 0)
In [8]:
print(a > 0)
In [9]:
print(a >= 0)
In [10]:
print(a < 0)
In [11]:
print(a <= 0)
In [12]:
print(np.count_nonzero(np.signbit(a)))
In [13]:
print(~np.signbit(a))
In [14]:
print(np.count_nonzero(~np.signbit(a)))
In [15]:
print(np.count_nonzero(a == 0))
In [16]:
print(np.count_nonzero(a < 0))
In [17]:
print(np.count_nonzero(a > 0))
In [18]:
a_special = np.array([0.0, -0.0, np.inf, -np.inf, np.nan])
print(a_special)
In [19]:
print(np.signbit(a_special))
In [20]:
print(a_special == 0)
In [21]:
print(a_special < 0)
In [22]:
print(a_special > 0)
In [23]:
a_complex = np.array([3 + 4j, -3 - 4j])
print(a_complex)
In [24]:
# print(np.signbit(a_complex))
# TypeError: ufunc 'signbit' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe''
In [25]:
print(np.abs(a_complex))
In [26]:
print(a_complex.real)
In [27]:
print(a_complex.imag)
In [28]:
print(np.signbit(a_complex.real))
In [29]:
print(a_complex.real < 0)