C RUBY-ON-RAILS MYSQL ASP.NET DEVELOPMENT RUBY .NET LINUX SQL-SERVER REGEX WINDOWS ALGORITHM ECLIPSE VISUAL-STUDIO STRING SVN PERFORMANCE APACHE-FLEX UNIT-TESTING SECURITY LINQ UNIX MATH EMAIL OOP LANGUAGE-AGNOSTIC VB6 MSBUILD

# How can I optimize this NumPy code?

By : Iván Moreno
Date : November 20 2020, 12:01 PM
it fixes the issue I have the following code that is the bottleneck in my Python code:
code :
``````def _get_payoff(self, actual, predicted):
pred_factor = numpy.abs(0.5 - predicted)
payoff_selector = 2*numpy.isclose(actual, 1) + (predicted < 0.5)
payoff = numpy.choose(payoff_selector,
[
self.fp_payoff,
self.tn_payoff,
self.tp_payoff,
self.fn_payoff,
])
return numpy.sum(payoff * pred_factor)

def get_total_payoff(self):
return self._get_payoff(self.target, predictions)
``````

Share :

## How can I fix and optimize this very simple piece of "Game of Life" code by taking advantage of NumPy's functi

By : Chris Kirubakaran
Date : March 29 2020, 07:55 AM
code :
``````for x in range(rows):
for y in range(cols):
if Z[x][y] == 1:
if (N[x][y] < 2) or (N[x][y] > 3):
Z[x][y] = 0
else:
if (N[x][y] == 3):
Z[x][y] = 1
``````
``````set_zero_idxs = (Z==1) & ((N<2) | (N>3))
set_one_idxs = (Z!=1) & (N==3)
Z[set_zero_idxs] = 0
Z[set_one_idxs] = 1
``````
``````In [49]: %timeit no_loop(z,n)
1000 loops, best of 3: 177 us per loop

In [50]: %timeit loop(z,n)
10 loops, best of 3: 31.2 ms per loop
``````
``````for x in range(rows):
for y in range(cols):
Q = [q for q in [x-1, x, x+1] if ((q >= 0) and (q < cols))]
R = [r for r in [y-1, y, y+1] if ((r >= 0) and (r < rows))]
S = [Z[q][r] for q in Q for r in R if (q, r) != (x, y)]
N[x][y] = sum(S)
``````
``````N = np.roll(Z,1,axis=1) + np.roll(Z,-1,axis=1) + np.roll(Z,1,axis=0) + np.roll(Z,-1,axis=0)
``````
``````shape = Z.shape
new_shape = (shape[0]+2,shape[1]+2)
b_z = np.zeros(new_shape)
b_z[1:-1,1:-1] = Z
b_n = np.roll(b_z,1,axis=1) + np.roll(b_z,-1,axis=1) + np.roll(b_z,1,axis=0) + np.roll(b_z,-1,axis=0)
N = b_n[1:-1,1:-1]
``````
``````In [4]: %timeit computeNeighbours(z)
10 loops, best of 3: 140 ms per loop

In [5]: %timeit noloop_computeNeighbours(z)
10000 loops, best of 3: 133 us per loop

In [6]: %timeit noloop_with_buffer_computeNeighbours(z)
10000 loops, best of 3: 170 us per loop
``````

## Optimize Cython code for numpy variance calculation

By : jack
Date : March 29 2020, 07:55 AM
With these it helps I went through said calculation and I think the reason that it was going so slow is that I was using np.var() which is a python (or numpy) function and does not allow the loop to be compiled in C. If anyone knows how to do that while using numpy let me know.
What I ended up doing was coding the calculation from this:
code :
``````dk[:,0] = np.array([sqrt((PC_t-pc)**2/np.var(PC_l)) for pc in PC_l])
``````
``````cimport cython
cimport numpy as np
import numpy as np
from libc.math cimport sqrt as csqrt
from libc.math cimport pow as cpow
@cython.boundscheck(False)
@cython.cdivision(True)

cdef cy_mahalanobis(np.ndarray[double, ndim=1] PC_l, double PC_t):
cdef unsigned int i,j,L
L = PC_l.shape[0]
cdef np.ndarray[double] dk = np.zeros(L)
cdef double x,total,mean,var

total = 0
for i in xrange(L):
x = PC_l[i]
total = total + x

mean = total / L
total = 0
for i in xrange(L):
x = cpow(PC_l[i]-mean,2)
total = total + x

var = total / L

for j in xrange(L):
dk[j] = csqrt(cpow(PC_t-PC_l[j],2)/var)

return dk
``````

## Optimize code to increment slice of a numpy array once per each row of a pandas dataframe

By : Sergey Shiryaev
Date : March 29 2020, 07:55 AM
I wish did fix the issue. We could vectorize that loop with np.add.at/np.bincount with a similar trick as mentioned in this post, like so -
code :
``````a = df.values
L = 500
s0,s1 = a[:,0], a[:,1]
v = 1.0/(s1-s0)
# @Daniel F's suggestion :
out = np.bincount(s0,v,minlength=L) - np.bincount(s1,v,minlength=L)
np.cumsum(out,out=out)
out[np.isclose(out,0)] = 0
``````
``````def do(n_rows):
df = pd.DataFrame()
df['A'] = np.random.choice(range(0, 400), n_rows)
df['B'] = df['A'] + np.random.choice(range(1, 50), n_rows)
y = np.zeros(500)
for row in df[['A', 'B']].itertuples():
_, l, r = row
y[l:r] += 1.0 / (r - l)
return y

def doNumPy(n_rows):
df = pd.DataFrame()
df['A'] = np.random.choice(range(0, 400), n_rows)
df['B'] = df['A'] + np.random.choice(range(1, 50), n_rows)
a = df.values

L = 500
s0,s1 = a[:,0], a[:,1]
v = 1.0/(s1-s0)
out = np.bincount(s0,v,minlength=L) - np.bincount(s1,v,minlength=L)
np.cumsum(out,out=out)
out[np.isclose(out,0)] = 0
return out

if __name__=='__main__':
from timeit import Timer
for n_rows in [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000]:
print "-------------------------------------------------------------"
t = Timer("do(%d)" % n_rows, "from __main__ import do")
print("Old App : Time with %d rows:" % n_rows, t.timeit(number=1))
t = Timer("doNumPy(%d)" % n_rows, "from __main__ import doNumPy")
print("New App : Time with %d rows:" % n_rows, t.timeit(number=1))
``````
``````-------------------------------------------------------------
('Old App : Time with 1 rows:', 0.0034999847412109375)
('New App : Time with 1 rows:', 0.0022029876708984375)
-------------------------------------------------------------
('Old App : Time with 10 rows:', 0.003280162811279297)
('New App : Time with 10 rows:', 0.00197601318359375)
-------------------------------------------------------------
('Old App : Time with 100 rows:', 0.0030059814453125)
('New App : Time with 100 rows:', 0.0017080307006835938)
-------------------------------------------------------------
('Old App : Time with 1000 rows:', 0.005307912826538086)
('New App : Time with 1000 rows:', 0.0018489360809326172)
-------------------------------------------------------------
('Old App : Time with 10000 rows:', 0.027753829956054688)
('New App : Time with 10000 rows:', 0.0022859573364257812)
-------------------------------------------------------------
('Old App : Time with 100000 rows:', 0.26231813430786133)
('New App : Time with 100000 rows:', 0.008862018585205078)
-------------------------------------------------------------
('Old App : Time with 1000000 rows:', 2.270418882369995)
('New App : Time with 1000000 rows:', 0.06492495536804199)
-------------------------------------------------------------
('Old App : Time with 10000000 rows:', 23.051368951797485)
('New App : Time with 10000000 rows:', 0.6994130611419678)
``````

## How can I optimize my code? (python-opencv-numpy)

By : Vincent Wolsink
Date : March 29 2020, 07:55 AM
To fix this issue Conversion of the integer array to a string is slow. Instead, compare integer arrays directly, using numpy.array_equal.
code :
``````    import cv2
import numpy as np
import time

start_time = time.time()
tam = np.size(img, 0), np.size(img, 1)
target_BGR = [255, 0, 0]

for i in range(tam[0]):
for j in range(tam[1]):
if not np.array_equal(target_BGR, img[i, j]):
img[i, j] = [255, 255, 255]

cv2.imwrite('/home/user/Vision Artificial/out.png', img)

print(time.time() - start_time)
``````

## Optimize code for step function using only NumPy

By : Niwes
Date : March 29 2020, 07:55 AM
this one helps. A very abstract yet interesting problem! for entertaining me, I had fun :)
p.s. I'm not sure about your pw2 I wasn't able to get it output the same as pw1.
code :
``````def pw1(x, udata):
vals = np.arange(1,udata.shape[0]+1).reshape(udata.shape[0],1)
pw_func = np.sum(np.where(np.greater_equal(x,udata)*np.less(x,np.roll(udata,-1)),vals,0),axis=0)
return pw_func

def pw2(xx, uu):
inds = np.searchsorted(uu, xx, side='right')
vals = np.arange(1,uu.shape[0]+1)
pw_func = vals[inds[inds[(inds != uu.shape[0])*(inds != 0)]-1]]
num_mins = np.sum(xx < np.min(uu))
num_maxs = np.sum(xx > np.max(uu))

pw_func = np.concatenate((np.zeros(num_mins), pw_func, np.zeros(xx.shape[0]-pw_func.shape[0]-num_mins)))
return pw_func
``````
``````def pw3(x, udata):
# the None slice is to create new axis
step_bool = x >= udata[None,:].T

# we exploit the fact that bools are integer value of 1s
# skipping the last value in "data"
step_vals = np.sum(step_bool[:-1], axis=0)

# for the step_bool that we skipped from previous step (last index)
# we set it to zerp so that we can negate the step_vals once we reached
# the last value in "data"
step_vals[step_bool[-1]] = 0

return step_vals
``````
``````def pw4(x, udata):
inds = np.searchsorted(udata, x, side='right')

# fix-ups the last data if x is already out of range of data[-1]
if x[-1] > udata[-1]:
inds[inds == inds[-1]] = 0

return inds
``````
``````plt.plot(pw1(x,udata.reshape(udata.shape[0],1)), label='pw1')
plt.plot(pw2(x,udata), label='pw2')
plt.plot(pw3(x,udata), label='pw3')
plt.plot(pw4(x,udata), label='pw4')
``````
``````print(np.all(pw1(x,udata.reshape(udata.shape[0],1)) == pw3(x,udata)))
>>> True
print(np.all(pw1(x,udata.reshape(udata.shape[0],1)) == pw4(x,udata)))
>>> True
``````
``````print(timeit.Timer('pw1(x,udata.reshape(udata.shape[0],1))', "from __main__ import pw1, x, udata").repeat(number=1000))
>>> [3.1938983199979702, 1.6096494779994828, 1.962694135003403]
print(timeit.Timer('pw2(x,udata)', "from __main__ import pw2, x, udata").repeat(number=1000))
>>> [0.6884554479984217, 0.6075002400029916, 0.7799002879983163]
print(timeit.Timer('pw3(x,udata)', "from __main__ import pw3, x, udata").repeat(number=1000))
>>> [0.7369808239964186, 0.7557657590004965, 0.8088172269999632]
print(timeit.Timer('pw4(x,udata)', "from __main__ import pw4, x, udata").repeat(number=1000))
>>> [0.20514375300263055, 0.20203858999957447, 0.19906871100101853]
``````