questions
stringlengths 50
48.9k
| answers
stringlengths 0
58.3k
|
---|---|
Exception: 'module' object is not iterable When I am running the code from Github project, I am getting this error:Exception in thread django-main-thread:Traceback (most recent call last): File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/urls/resolvers.py", line 600, in url_patterns iter(patterns)TypeError: 'module' object is not iterableThe above exception was the direct cause of the following exception:Traceback (most recent call last): File "/data/data/com.termux/files/usr/lib/python3.9/threading.py", line 954, in _bootstrap_inner self.run() File "/data/data/com.termux/files/usr/lib/python3.9/threading.py", line 892, in run self._target(*self._args, **self._kwargs) File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/utils/autoreload.py", line 64, in wrapper fn(*args, **kwargs) File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/core/management/commands/runserver.py", line 118, in inner_run self.check(display_num_errors=True) File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/core/management/base.py", line 419, in check all_issues = checks.run_checks( File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/core/checks/registry.py", line 76, in run_checks new_errors = check(app_configs=app_configs, databases=databases) File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/core/checks/urls.py", line 13, in check_url_config return check_resolver(resolver) File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/core/checks/urls.py", line 23, in check_resolver return check_method() File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/urls/resolvers.py", line 413, in check messages.extend(check_resolver(pattern)) File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/core/checks/urls.py", line 23, in check_resolver return check_method() File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/urls/resolvers.py", line 412, in check for pattern in self.url_patterns: File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/utils/functional.py", line 48, in __get__ res = instance.__dict__[self.name] = self.func(instance) File "/data/data/com.termux/files/usr/lib/python3.9/site-packages/django/urls/resolvers.py", line 607, in url_patterns raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) from edjango.core.exceptions.ImproperlyConfigured: The included URLconf '<module 'blog.urls' from '/storage/emulated/0/application/blog/urls.py'>' does not appear to have any patterns in it. If you see valid patterns in the file then the issue is probably caused by a circular import. | I had a look at your urls.py file from your github linkThere is a small typo where you have capitalised the urlpatterns object.from django.urls import pathfrom .views import blogListViewUrlpatterns = [ path(' ', blogListView.as_view(), name='home'), ]The spelling of urlpatterns must be exact in order for routing to work.Easy fix |
iteratively intersecting line segments in Sympy... is there a better way? OK. I have the points that comprise the borders of a polygon. I want to (a) use Sympy's geometry module to determine, from all of the possible line-segments between any pair of points, which segments do not cross the perimeter. This result will be the "edges" that are allowed to be used in (b) a shortest_distance analysis in Networkx. My end goal is to iterate this process through many shapes, but I've hard-coded the coordinates for just 1 shape in this example. import numpyimport networkx as nxfrom sympy import geometryfrom itertools import combinationsfrom matplotlib import pyplot as plotarr_bou = numpy.array([[-542.62545014, 961.34455209], [-544.45425379, 961.34455209], [-544.45425379, 962.25895392], [-547.19745928, 962.25895392], [-547.19745928, 963.17335575], [-549.02626294, 963.17335575], [-549.02626294, 964.08775758], [-550.85506659, 964.08775758], [-550.85506659, 961.34455209], [-552.68387025, 961.34455209], [-552.68387025, 962.25895392], [-553.59827208, 962.25895392], [-553.59827208, 965.91656123], [-552.68387025, 965.91656123], [-552.68387025, 967.7453649 ], [-551.76946842, 967.7453649 ], [-551.76946842, 968.65976672], [-550.85506659, 968.65976672], [-550.85506659, 967.7453649 ], [-548.11186111, 967.7453649 ], [-548.11186111, 965.91656123], [-547.19745928, 965.91656123], [-547.19745928, 964.08775758], [-546.28305745, 964.08775758], [-546.28305745, 965.00215941], [-543.53985197, 965.00215941], [-543.53985197, 963.17335575], [-542.62545014, 963.17335575], [-542.62545014, 964.08775758], [-540.79664648, 964.08775758], [-540.79664648, 963.17335575], [-539.88224465, 963.17335575], [-539.88224465, 962.25895392], [-542.62545014, 962.25895392], [-542.62545014, 961.34455209]])boundXY = []for i in arr_bou: boundXY.append((i[0],i[1]))points = [geometry.Point(i) for i in boundXY]poly = geometry.Polygon(*points) # use the * first to unpack the points (necessary to avoid errors)G = nx.Graph()positions = {} # build a dictionaryfor i in xrange(len(boundXY)): # that contains coordinates positions[i] = boundXY[i] # of each node on the graph's perimeterG.add_path(positions.keys())# add nodes to graph w/ boundary edgesG.add_path([min(G.nodes()),max(G.nodes())]) combos_o = list(combinations(positions.keys(),2))combos = [i for i in combos_o if i not in G.edges()]keepcombos = []for combo in combos: pt1 = positions[combo[0]] pt2 = positions[combo[1]] line = geometry.Polygon(pt1,pt2) # there are 4 polygon sides that do not count as intersections # because 2 sides will intersect a point on each end test = True for side in poly.sides: if side.p1 != geometry.Point(pt1) and side.p1 != geometry.Point(pt2): if side.p2 != geometry.Point(pt1) and side.p2 != geometry.Point(pt2): if geometry.intersection(line,side): test = False break else: try: if poly.encloses(line.midpoint): pass else: test = False break except NotImplementedError: pass if test == True: keepcombos.append(combo)G.add_edges_from(keepcombos)I've gotten this to work for small polygons (14 vertices) but this takes FOREVER with even just a 35 vertices, and other polygons will be larger than this still. Is there a more efficient way of finding all within-polygon node-pairs?Thanks!! | I found a solution that sped the process up by about 13x (for a polygon with 35 points (like the data listed above), the old method from the code in the question took about 4hours to find all line segments inside the polygon. This new method took 18 minutes instead.) Above I was iteratated through the points, and at each iteration looked at each border ("side") individually to see if the lines intersected. I changed this to instead intersect the line with the entire polygon. If it lines inside or on the edge, there should only be 2 points where it intersects, so if the length of the intersection is >2, I throw the combination outfor combo in combos: pt1 = geometry.Point(positions[combo[0]],evaluate=False) pt2 = geometry.Point(positions[combo[1]],evaluate=False) line = geometry.Polygon(pt1,pt2) try: if poly.encloses(line.midpoint): pass else: continue except NotImplementedError: continue intersect = geometry.intersection(line,poly) if len(intersect)>2: continue keepcombos.append(combo)This list "keepcombos" now has all of the lines (or networkx "edges") that I wish to include in a Dijkstra path analysis |
How to fix freeze_support() error for computing compute Perplexity and Coherence for LDA? I am going to compute Perplexity and Coherence for my textual data for LDA. I run the following codes# Compute Perplexityprint('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.# Compute Coherence Scorecoherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')coherence_lda = coherence_model_lda.get_coherence()print('\nCoherence Score: ', coherence_lda)But I see this error (freeze_support()) I don't know how to fix it or even ignore it :untimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. | # Compute Perplexityprint('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.# Compute Coherence Scoreif __name__ == '__main__': coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score: ', coherence_lda) |
How to filter Django objects based on value returned by a method? I have an Django object with a method get_volume_sum(self) that return a float, and I would like to query the top n objects with the highest value, how can I do that?For example I could do a loop like this but I would like a more elegant solution.vol = []obj = []for m in Market.object.filter(**args): # 3000 objects sum = m.get_volume_sum() vol.append(sum) obj.append(m.id)top = search_top_n(obj, vol, n)And this is how the method looks like:# return sum of volume over last hoursdef get_volume_sum(self, hours): return Candle.objects.filter(market=self, dt__gte=timezone.now()-timedelta(hours=hours) ).aggregate(models.Sum('vo'))From what I see here even with Python there isn't a single line solution. | You should not filter with the method, this will result in an N+1 problem: for 3'000 Market objects, it will generate an additional 3'0000 queries to obtain the volumes.You can do this in bulk with a .annotate(…) [Django-doc]:from django.db.models import Sumhours = 12 # some value for hoursMarket.objects.filter( **args, candle__dt__gte=timezone.now()-timedelta(hours=hours),).annotate( candle_vol=Sum('candle__vo')).order_by('-candle_vol')Here there is however a small caveat: if there is no related Candle, then these Markets will be filtered out. We can prevent that by allowing also Markets without Candles with:from django.db.models import Q, Sumhours = 12 # some value for hoursMarket.objects.filter( Q(candle__dt__gte=timezone.now()-timedelta(hours=hours)) | Q(candle=None), **args).annotate( candle_vol=Sum('candle__vo')).order_by('-candle_vol') |
CMD color problems I want to make my python cmd output colorful!I have color-codes like this:\033[91mNow the output in cmd isn't colorful. I get a "←". How can I change this?Did anybody have the same problem? :DEditIs there an alternative to cmd? Is it hard to programm a cmd window in e.g. C#? | You need to add just two more lines at the beginning of your script.import osos.system("") |
How to check differences between column values in pandas? I'm manually comparing two or three rows very similar using pandas. Is there a more automated way to do this? I would like a better method than using '=='. | https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.diff.htmlSee if this will satisfy your needs.df['sales_diff'] = df['sales'].diff()The above code snippet creates a new column in your data frame, which contains the difference between the previous row by default. You can screw around with the parameters (axis) to compare rows or columns and you can change (period) to compare to a specific row or column. |
fish: Unknown command: pip today I am trying Garuda KDE Dr460nized and I am running python on it. But when I use pip for installing packages I open my Konsole and, an error comes like thisfish: Unknown command: pipI thought I should write pip3 instead of pip but still, the same error comesfish: Unknown command: pip3Can anyone please tell me how to solve this issue in Garuda Linux. It is an Arch-based on Linux. Please tell me what is the solution. Any help will be appreciated. Thanks in advance | I think I have answered my questionI have to add:python -m pip install packageNameIt solved my error. If anyone can't solve their error you can see this answer. |
django detect user login in another tab Is there anyway in django that if user has two open tabs, both logged out, then logs in in one tab, tell that he has logged in in another tab? I mean something like github that tells you you have signed in, please refresh the page.The problem is now If I login in one tab and then in the second tab, I get csrf token missing incorrect. | You get csrf token missing incorrect. because when user relogins, the server generates a new csrf token to the cookie. The cookie persists across the same domain. And when you're trying to do smth on the current page, the request fails because csrf in your <form> differs from the cookie which has been changed. That's why github refreshes the page (instead of conitnuing doing request from it). Thus server will return new csrf in html to your form.Edit:Consider the following choices :If your cookie is not readonly: . Set setInterval where you check the session which user loaded the page and current session from cookie.Render 4 first characters too page and save it to a variable. Set this variable when page loads. And with every request pass the variable with headers. Add a middleware which checks if first 4 characters in headers matcher first 4 characters from cookie, and if it doesn't tell the client to refresh the page.If you want to automatically detect your case, you need to frequently spam the server and ask if the session has changed. From the client you can send the old session and the new one (if session is readonly you can send like few first characters from the server). |
Regex to find five consecutive consonants I need a regex for python that finds words with five consecutive consonants. These words would work - tnortvcvni (rtvcvn)kahjdflka (hjdflk)But these words wouldn't (no five letters in row without vowels) - peanut butterjelly | It seems you don't mean a fixed length of 5 characters but a minimum:(?:(?![aeiou])[a-z]){5,}Live demoNote: set i flag if it exists. |
Python Regex Match failed This passed on https://regex101.com/ without any issues. Did I miss anything? The entire string is in one line.def get_title_and_content(html): html = """<!DOCTYPE html> <html> <head> <title>Change delivery date with Deliv</title> </head> <body> <div class="gkms web">The delivery date can be changed up until the package is assigned to a driver.</div> </body> </html> """ title_pattern = re.compile(r'<title>(.*?)</title>(.*)') match = title_pattern.match(html) if match: print('successfully extract title and answer') return match.groups()[0].strip(), match.groups()[1].strip() else: print('unable to extract title or answer') | In a summary of the comments:title_pattern.search(html) Should be used instead of title_pattern.match(html)As the search function will search anywhere in the provided string instead of just from the beginning. match = title_pattern.findall(html) could be used similarly but would return a list of items instead of just one.Also as mentioned using BeautifulSoup would pay of more in the long run as Regular Expression is not properly suited for searching HTML |
Checking ISBN numbers This is my code:def isISBN(n): if len(n)!= 10: return False else: d1=int(n[0])*1 d2=int(n[1])*2 d3=int(n[2])*3 d4=int(n[3])*4 d5=int(n[4])*5 d6=int(n[5])*6 d7=int(n[6])*7 d8=int(n[7])*8 d9=int(n[8])*9 d10=(d1+d2+d3+d4+d5+d6+d7+d8+d9) num=d10%11 print(d10,num) if num==10: return True else: return FalseHere are some test cases my teacher gave us:>>> isISBN('020103803X')True>>> isISBN('0540122068')True>>> isISBN('020108303X')False>>> isISBN('0540122069')FalseThe code fails the test '0540122068' because my output is False, but I don't know why. | Don't forget the 10th value and check for modulo equivalence to 0:def isISBN(n): if len(n)!= 10: return False else: d1=int(n[0])*1 d2=int(n[1])*2 d3=int(n[2])*3 d4=int(n[3])*4 d5=int(n[4])*5 d6=int(n[5])*6 d7=int(n[6])*7 d8=int(n[7])*8 d9=int(n[8])*9 if n[9] == 'X': d10 = 10 else: d10 = int(n[9]) d10 = d10*10 d11=(d1+d2+d3+d4+d5+d6+d7+d8+d9+d10) num=d11%11 if num==0: return True else: return FalseisISBN("3680087837") |
How do you set a variable number of regex expressions? Currently I have out = re.sub(r'[0-9][0-9][0-9]', '', input). I would like to have a variable number of [0-9]'s.So far I have;string = ''for i in xrange(numlen): string = string + '[0-9]'string = 'r' + stringout = re.sub(string, '', input)This doesn't work, and I've tried using re.compile, but haven't had any luck. Is there a better way of doing this? Or am I just missing something trivial? | You can specify repetition using {}, for example 3 digits would be[0-9]{3}So you can do something likereps = 5 # or whatever value you'd likeout = re.sub('[0-9]{{{}}}'.format(reps), '', input)Or if you don't know how many digits there will beout = re.sub('[0-9]+', '', input) |
why is this code removing the file instead of renaming it? I want to rename report.json but it is removing the file instead import osfrom pathlib import Pathimport jsonpath =Path( r'C:\Users\Sajid\Desktop\cuckoo (3)\cuckoo\storage\analyses\3\reports')filename = os.path.join(path,"report.json")with open(filename) as json_file: data=json.load(json_file) var=(data['target']['file']['md5']) print(var) json_file.close() os.rename(filename,var)I expect this code to rename the file and not delete it | It's probably not deleting it, but moving it to your working directory (so if you launched your script from C:\Users\Sajid, the file would be there, not in C:\Users\Sajid\Desktop\cuckoo (3)\cuckoo\storage\analyses\3\reports). Edit: Based on your comment, this is definitely what's happening; the first time you ran your code, it moved it to your working directory (with component cuckoo (1), where you probably expected it to go to the one with component cuckoo (3)), the second time it failed because os.rename won't overwrite an existing file on Windows.Change it to combine the desired target directory with the basename of the file, a la:var = os.path.join(str(path), os.path.basename(data['target']['file']['md5']))so it stays in the same directory.You're also unnecessarily closing the file twice (once explicitly, once implicitly via the with block termination). As it happens, the first one is necessary, but only because you kept the rename inside the with block. The simplest solution is to just remove the extra close, and rename outside the with block (in fact, you don't need it open after the json.load, so you may as well close it then):with open(filename) as json_file: data = json.load(json_file)# Dedent to implicitly close filevar = os.path.join(str(path), os.path.basename(data['target']['file']['md5']))print(var) os.rename(filename,var)As written, you won't replace an existing file (on Windows; on UNIX-like systems, it will); if you want to silently replace the existing file everywhere, use os.replace instead of os.rename. |
How to download GeoTiff files from GeoServer using Python I am trying to download GeoTiff files from GeoServer using Python. I have a found a few resources online about this type of thing, but I have been unable to accomplish this task.For example, here: https://gis.stackexchange.com/questions/181560/download-geotiff-from-geoserver it seems that people have been able to do what I want to do, but they do not explain their process.Likewise, the accepted answer here: How to grab a TIFF image from python works for downloading GeoTiffs like the one at http://imgsrc.hubblesite.org/hu/db/images/hs-2006-01-a-hires_tif.tif, but there is no download link for GeoTiffs on GeoServer.Any help would be much appreciated!EDIT: Here are some more details about what I have tried thus far. GeoServer has a rest API server, at http://localhost:8080/geoserver/rest locally, so I initially tried to access this url in python and then download the GeoTiff I want in "Layers". However, each of the files in "Layers" is an html file; what I would like to know is if there is a place where I can actually access the GeoTiff files programmatically. I think this is the root of the problem – although I am not really sure how to download GeoTiffs programmatically, I must first be able to actually access them in GeoServer.As far as progress, though, I have not been able to make much. As I mentioned above, I was able to download a GeoTiff using the code at How to grab a TIFF image from python, but I have been unable to do this for a GeoTiff on GeoServer. I am new to both GeoServer and the GeoTiff format, so I am not quite sure how to approach this problem. | As the answer to your linked question says you need to make a WCS request to GeoServer to fetch a GeoTiff. The GeoServer manual provides a WCS reference that should help you get an understanding of how to proceed. You can also go to the demos page of your GeoServer installation and use the WCS Request builder to create an XML file that you can use as a template in your python program. |
Cumulative sum in pyspark I am trying to compute the cumulative sum per class. Code is working fine by using sum(df.value).over(Window.partitionBy('class').orderBy('time'))df = sqlContext.createDataFrame( [(1,10,"a"),(3,2,"a"),(1,2,"b"),(2,5,"a"),(2,1,"b"),(9,0,"b"),(4,1,"b"),(7,8,"a"),(3,8,"b"),(2,5,"a"),(0,0,"a"),(4,3,"a")], ["time", "value", "class"] )time|value|class|+----+-----+-----+| 1| 10| a|| 3| 2| a|| 1| 2| b|| 2| 5| a|| 2| 1| b|| 9| 0| b|| 4| 1| b|| 7| 8| a|| 3| 8| b|| 2| 5| a|| 0| 0| a|| 4| 3| a|df.withColumn('cumsum_value', sum(df.value).over(Window.partitionBy('class').orderBy('time'))).show()time|value|class|cumsum_value|+----+-----+-----+------------+| 1| 2| b| 2|| 2| 1| b| 3|| 3| 8| b| 11|| 4| 1| b| 12|| 9| 0| b| 12|| 0| 0| a| 0|| 1| 10| a| 10|| 2| 5| a| 20|| 2| 5| a| 20|| 3| 2| a| 22|| 4| 3| a| 25|| 7| 8| a| 33|+----+-----+-----+------------+But its not working with duplicate rows. Desired output should be: time|value|class|cumsum_value|+----+-----+-----+------------+| 1| 2| b| 2|| 2| 1| b| 3|| 3| 8| b| 11|| 4| 1| b| 12|| 9| 0| b| 12|| 0| 0| a| 0|| 1| 10| a| 10|| 2| 5| a| 15|| 2| 5| a| 20|| 3| 2| a| 22|| 4| 3| a| 25|| 7| 8| a| 33|+----+-----+-----+------------+ | Adding to @pault's comment, I would suggest a row_number() calculation based on orderBy('time', 'value') and then use that column in the orderBy of another window(w2) to get your cum_sum. This will handle both cases where time is the same and value is the same, and where time is the same but value isnt.from pyspark.sql import functions as Ffrom pyspark.sql.window import Windoww1=Window().partitionBy("class").orderBy("time","value")w2=Window().partitionBy("class").orderBy('rownum')df.withColumn('rownum', F.row_number().over(w1))\ .withColumn('cumsum_value', F.sum("value").over(w2)).drop('rownum').show()+----+-----+-----+------------+|time|value|class|cumsum_value|+----+-----+-----+------------+| 1| 2| b| 2|| 2| 1| b| 3|| 3| 8| b| 11|| 4| 1| b| 12|| 9| 0| b| 12|| 0| 0| a| 0|| 1| 10| a| 10|| 2| 5| a| 15|| 2| 5| a| 20|| 3| 2| a| 22|| 4| 3| a| 25|| 7| 8| a| 33|+----+-----+-----+------------+ |
Weird linear regression learning curve I'm trying to build a prediction model for apartments price. I use python scikit-learn toolset. I'm using a dataset having total floor area and location of the apartment, which I have converted to dummy features. So the dataset looks like this:Then I build a learning curve to see how the model is doing. I build the learning curve this way:from matplotlib import pyplot as pltfrom sklearn.linear_model import LinearRegressionfrom sklearn.model_selection import learning_curvemodel = LinearRegression()training_sizes, training_scores, validation_scores = learning_curve( estimator = model, X = X_train, y = y_train, train_sizes = np.linspace(5, len(X_train) * 0.8, dtype = int), cv = 5)line1, line2 = plt.plot( training_sizes, training_scores.mean(axis = 1), 'g', training_sizes, validation_scores.mean(axis = 1), 'r')plt.legend((line1, line2), ('Training', 'Cross-validation'))The picture I see is somewhat confusing:Anomalies I see here are:Huge error on cross-validation setError not steadily decreasing on training examples number growth. Is it normal? Learning curve of training set only is also not so smooth but at least the error isn't that huge:Also I tried to add to add polynomial features of 2nd degree. But this didn't make the model perform any different. And because I have a lot of categorical features (total 106) it takes quite long even for 2nd degree polynomial. So I didn't try for higher degrees. Also I tried to build a model using as simple cost function and gradient descent as possible using Octave. The result with weird error was same. Update:Thanks to tolik I made several amendments:Data preparation:Categorical data are independent. So I can't combine them into one feature.Features were scaled using StandardScaler(). Thank you for that.Feature extraction:After features transformation with PCA I found out one new feature has explained variance ratio over 99%. Though it's strange I used only this one. That also allowed to increase polynomial degree though it didn't increase performance.Model selection:I tried several different models but none seem to perform better than LinearRegression. Interesting thing - all models perform worse on full data set. Probably it's because I sorted by price and higher prices are rather outliers. So when I start training sets on 1000 samples and go to the maximum, I get this picture (for nearly all models): | My explanation have 3 steps: The data preparation, feature extraction, and model selection.Data preparation:In this dataset there are lots of Categorical and Ordinal values. Ifthe column has several non related categories it's ok to one-hot it.but if the column has categories with order like"bad","normal","good" you can convert it to numerical as{Good:1,Normal:0.5,Bad:0}.Value ranges: the value ranges for each feature differs from the other, therefore the best thing to do is to normalise each feature along itself between 0:1.Feature Extraction:Your goal is to maximise the score so I guess you don't care about which feature is more important. Use PCA (has an implementation in scikit-learn library) , this algorithm convert your feature vectors into different features that each of them is a linear combination of the other features. These new features are ordered by their explained variance. The first features describes the data better than the last one. You select the first features that their explained_variance_ sums to 99%. Now you have weigh less features.Model Selection: You don't really know what is a good model, because No Free Lunch Theory but in this problem the best results that don't use deep learning , use these: XGBoost-Regressor , Random-Forest-Regressor ,Ada-Boost.The most important thing is the Data Preparation!!! |
Find most common words from list of strings We have a given list:list_of_versions = ['apple II' ,'apple', 'apple 1' , 'HD APPLE','apple 3.5', 'adventures of apple' , 'apple III','orange 2' ,'300mhz apple', '300-orange II' , 'orange II HD' , 'orange II tvx', 'orange 2' , 'HD berry-vol 2', 'berry II', 'berry 2', 'berry VI', 'berry 1', 'berry II' ,'berry' ,'II Berry']How can I find the main word of each string? For example:word | main--------------------------apple II |appleval/apple |appleapple 1 |appleHD APPLE |appleapple 3.5 |appleadventures of apple |appleapple III |apple300mhz apple |appleorange 2 |orange300-orange II |orangeorange II HD |orange/orange/II-tvx |orangeorange 2 |orangeHD berry-vol 2 |berryberry-II |berry-berry-2 |berry(berry) VI |berryberry 1 |berryberry II |berryberry 2022 B8 |berryII Berry-hd |berry22 Berry II |berryBerry 6.8.9 |berryImportant points:I can not create the main word list which contains three main words (apple, orange, berry). Because the list will be updated with new main words. So we will never know what is the new words.there is no limit to versions. At some point, we can see something like 'apple XII' or 'GB-HD berry 2.4' so version value can be everything. (in case you want to create a stopword list)Nice to have (but it is not mandatory)--> Adding another column as well which shows the version. i.e:word | main | version-----------------------------------apple II |apple | II val/apple |apple | NULLapple 1 |apple | 1HD APPLE |apple | HDapple 3.5 |apple | 3.5apple III |apple | III300mhz apple II |apple | IIorange 2 |orange | 2300-orange II |orange | IIorange II HD |orange | II HD/orange/II-tvx |orange | II tvxorange 2 |orange | 2HD berry-vol 2 |berry | 2 HDberry-II |berry | II-berry-2 |berry | 2(berry) VI |berry | VIberry 1 |berry | 1berry II |berry | II berry 2022 |berry | NULLII Berry-hd |berry | II HD22 Berry |berry | 22Berry 6.8.9 |berry | 6.8.9 | All the other answers omit the entry containing the word "adventures" because it throws off the search. You need a heuristic that can combine "longest" with "most frequent".One thing that helps is that finding the longest word in each row greatly increases SNR. In other words, it filters out the unnecessary words pretty well, and just needs a little help. If you know how many words you are looking for (three in this case), you're all set:from collections import Countercommon_long_words = [word.casefold() for word in (max(re.findall('\\w+', version), key=len) for version in list_of_versions)]words = Counter(common_long_words).most_common(3)Splitting off the version and finding the word of interest is not especially difficult. You have a couple of options regarding what constitutes a version, especially when the main word is embedded in the middle of the phrase. Here is a simple function that takes the entire remainder:def split_main(version, words): for word in words: i = version.find(word) if i > 0: return word, f'{version[:i]} {version[i + len(word)]}' else: raise ValueError(f'Version "{version}" does not contain any of the main words {{{", ".join(words)}}}')result = {version: split_main(version, words) for version in list_of_versions} |
Python: Google-Maps-API sends unknown format to parse I use the Python Client for Google Maps Services to get following data from google-maps:{ 'address_components':[ { 'long_name':'20', 'short_name':'20', 'types':[ 'street_number' ] }, { 'long_name':'Oberböhl', 'short_name':'Oberböhl', 'types':[ 'route' ] }, { 'long_name':'Ingelheim am Rhein', 'short_name':'Ingelheim am Rhein', 'types':[ 'locality', 'political' ] }, { 'long_name':'Mainz-Bingen', 'short_name':'Mainz-Bingen', 'types':[ 'administrative_area_level_3', 'political' ] }, { 'long_name':'Rheinland-Pfalz', 'short_name':'RP', 'types':[ 'administrative_area_level_1', 'political' ] }, { 'long_name':'Germany', 'short_name':'DE', 'types':[ 'country', 'political' ] }, { 'long_name':'55218', 'short_name':'55218', 'types':[ 'postal_code' ] } ], 'adr_address':'<span class="street-address">Oberböhl 20</span>, <span class="postal-code">55218</span> <span class="locality">Ingelheim am Rhein</span>, <span class="country-name">Germany</span>', 'formatted_address':'Oberböhl 20, 55218 Ingelheim am Rhein, Germany', 'formatted_phone_number':'06132 5099968', 'geometry':{ 'location':{ 'lat':49.9810156, 'lng':8.0739617 }, 'viewport':{ 'northeast':{ 'lat':49.9823942302915, 'lng':8.075293780291501 }, 'southwest':{ 'lat':49.9796962697085, 'lng':8.072595819708498 } } }, 'icon':'https://maps.gstatic.com/mapfiles/place_api/icons/generic_business-71.png', 'id':'d2b37ffe23fd5e76648a90df2987558b039fcdf7', 'international_phone_number':'+49 6132 5099968', 'name':'Esch Metalltechnik GmbH', 'place_id':'ChIJHaERGJ_svUcRRfqNoGXq3EU', 'plus_code':{ 'compound_code':'X3JF+CH Ingelheim am Rhein, Germany', 'global_code':'8FXCX3JF+CH' }, 'reference':'ChIJHaERGJ_svUcRRfqNoGXq3EU', 'scope':'GOOGLE', 'types':[ 'general_contractor', 'point_of_interest', 'establishment' ], 'url':'https://maps.google.com/?cid=5034156205699627589', 'utc_offset':60, 'vicinity':'Oberböhl 20, Ingelheim am Rhein', 'website':'http://www.esch-metalltechnik.de/'}{ 'long_name':'55218', 'short_name':'55218', 'types':[ 'postal_code' ]}Now I want to extract certain variables, like the "street_number". I don't know which format this data is, so I worked with it like a dictionary:try: self.hausnr = place_result_2["address_components"][0]["long_name"]except: self.hausnr = "NA"The problem is, that the index "0" isn't always the same position of the data I want, i varies. Is there a way to extract the data in another way? Perhaps I have to use a JSON-parser or something similar?Thanks a lot. | The answer is: List comprehensionstry: # make a list of all address components that have type "street number" comp = [c for c in place_result_2["address_components"] if "street_number" in c["types"]] # the first one of them (assuming there will never be more than one) is the desired one self.hausnr = comp[0]["long_name"]except: self.hausnr = "NA"Since this will probably be a common operation, make a function:def get_address_component(place_result, comp_type, comp_property="long_name", default=None): """ returns the first address component of a given type """ try: comp = [c for c in place_result["address_components"] if comp_type in c["types"]] return comp[0][comp_property] except KeyError: return default# ...self.hausnr = get_address_component(place_result_2, "street_number", default="NA")PS, regarding: Perhaps I have to use a JSON-parser or something similar?JSON is a data transfer format - it's plain text. The Google API server used it to get the data across the wire. In your program it has already been parsed - by the Google API client library you are using. What you are looking at is not JSON anymore, it's a Python data structure (nested dicts and lists and values). It just happens to look quite similar to JSON when you print it to the console, because Python uses a similar format to represent data.On other words, no, you don't need to JSON-parse it again. |
Why using "--requirements_file" uploads dependencies onto GCS? I'm currently generating a template with those parameters: --runner DataflowRunner \ --requirements_file requirements.txt \ --project ${GOOGLE_PROJECT_ID} \ --output ${GENERATED_FILES_PATH}/staging \ --staging_location=${GENERATED_FILES_PATH}/staging \ --temp_location=${GENERATED_FILES_PATH}/temp \ --template_location=${GENERATED_FILES_PATH}/templates/calculation-template \and the SDK is uploading dependencies specified inside requirements.txt onto GCS inside the staging section. I do not understand... For me using this kind of file would allow workers to directly pull dependencies from the official pip registry, not from my GCS, right?It makes running this command very long since it needs to upload packages :/Any explanation why is it happening? Maybe I'm doing something wrong?Thank you, | I believe this is done to make the Dataflow worker startup process more efficient and consistent (both initially and when auto-scaling). Without this, each time a Dataflow worker starts up, that worker has to directly connect to PyPI to find the latest matching versions of dependencies. Instead of this, set of dependencies are staged at pipeline initiation and are consistently installed in workers throughout the pipeline execution. |
Got only first row in table when using Selenium scraping (Python) I'm trying to scrape the whole table from: https://free-proxy-list.net/ And I managed to scrape it but it resulted in only the first row of the table instead of 20 rows. I saw previous similar questions that were answered and I have tried the solutions given but my selenium was unable to locate the element when I use .// for my xpath.for bod in driver.find_elements_by_xpath("//*[@id='proxylisttable']/tbody"): col = bod.find_elements_by_xpath("//*[@id='proxylisttable']/tbody/tr") for c in col: ip = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[1]') port = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[2]') code = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[3]') country = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[4][@class = "hm"]') anonymity = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[5]') google = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[6][@class = "hm"]')My code resulted in only scraping the first row 20 times instead of getting 20 rows. The rows are indicated at ip, port, code, etc. I have tried multiple types of xpath syntax but still end up the same. | I think your problem is in this line :col = bod.find_elements_by_xpath("//*[@id='proxylisttable']/tbody/tr")The correct syntax is :col = bod.find_elements_by_xpath("//*[@id='proxylisttable']/tbody/tr[insert count here]")Like this :table = driver.find_element_by_xpath("//*[@id='proxylisttable']/tbody")rows = table.find_elements_by_xpath("//*[@id='proxylisttable']/tbody/tr")for i in range (1, len(rows)+1): row = table.find_element_by_xpath("//*[@id='proxylisttable']/tbody/tr[" +str(i) +']') for c in row: ip = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[1]') port = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[2]') code = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[3]') country = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[4][@class = "hm"]') anonymity = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[5]') google = c.find_element_by_xpath('//*[@id="proxylisttable"]/tbody/tr/td[6][@class = "hm"]') |
How to concatenate part of three layers in Keras? I can use keras.layers.concatenate to concatenate two layers then send them to next layer, but if I want to take part of two layers then concatenate them and then send them to next layer, what should I do?For example, I want to take part of first conv layer and part of the second conv layer and the last pooling layer, and then concatenate them together to form one layer. But Keras is a high level library, how do we take part of them? You can understand me better by looking at the Figure 2 in paper A Network-based End-to-End Trainable Task-oriented Dialogue System. | Well, you can slice them as you want, like the way you would slice a numpy array or a Python list, and use K.concatenate, all in a Lambda layer. For example:from keras import backend as K# ...out = Lambda(lambda x: K.concatenate([x[0][:,:10], x[1][:,:10], x[2][:,:10]], axis=the_concat_axis))([conv1, conv2, pool])Note that the first axis is the batch axis, so you may want to keep all of it (i.e. use : as above). |
Can we run tensorflow lite on linux ? Or it is for android and ios only Hi is there any possibility to run tensorflow lite on linux platform? If yes, then how we can write code in java/C++/python to load and run models on linux platform? I am familiar with bazel and successfully made Android and ios application using tensorflow lite. | I think the other answers are quite wrong.Look, I'll tell you my experience... I've been working with Django for many years, and I've been using normal tensorflow, but there was a problem with having 4 or 5 or more models in the same project.I don't know if you know Gunicorn + Nginx. This generates workers, so if you have 4 machine learning models, for every worker it multiplies, if you have 3 workers you will have 12 models preloaded in RAM. This is not efficient at all, because if the RAM overflows your project will fall or in fact the service responses are slower.So this is where Tensorflow lite comes in. Switching from a tensorflow model to tensorflow lite improves and makes things much more efficient. Times are reduced absurdly.Also, Django and Gunicorn can be configured so that the model is pre-loaded and compiled at the same time. So every time the API is used up, it only generates the prediction, which helps you make each API call a fraction of a second long.Currently I have a project in production with 14 models and 9 workers, you can understand the magnitude of that in terms of RAM.And besides doing thousands of extra calculations, outside of machine learning, the API call does not take more than 2 seconds.Now, if I used normal tensorflow, it would take at least 4 or 5 seconds.In summary, if you can use tensorflow lite, I use it daily in Windows, MacOS, and Linux, it is not necessary to use Docker at all. Just a python file and that's it. If you have any doubt you can ask me without any problem.Here a example projectDjango + Tensorflow Lite |
Don't understand these ModuleNotFound errors I am a beginner and learning Python. I have setup the environment with SublimeText and Python3.xI am fine in creating code on Sublime and building it locally through Ctrl+B and for input() function I installed SublimeREPL and it works find up till now.The issue I am facing is on Python interpreter. I am facing below error while import any package:import tweepyTraceback (most recent call last): File "<stdin>", line 1, in <module>ModuleNotFoundError: No module named 'tweepy'I cannot even run a Python script from there. My Python path is below:C:\Users\waqas.FVC\AppData\Local\Programs\Python\Python37-32Demo.py and hello.py are the two scripts I wrote initially which I am trying to execute from Python Terminal, but it is showing below errors:test.pyTraceback (most recent call last): File "<stdin>", line 1, in <module>NameError: name 'test' is not definedDemo.pyTraceback (most recent call last): File "<stdin>", line 1, in <module>NameError: name 'Demo' is not defined | The initial Python download includes a number of libraries, but there are many, many more that must be downloaded and installed separately. Tweepy is among those libraries.You can find, and download, tweepy from here:https://pypi.org/project/tweepy/ |
How to group rows, count in one column and do the sum in the other? I want to group rows of a csv file, count in one column and add in the other.For example with the following I would like to group the lines on the Commune to make columns of the winner with the count and a column Swing with the sumCommune Winner Swing longitude latitude turnoutParis PAM 1 12.323 12.093 0.3242Paris PJD 0 12.323 12.093 0.1233Paris PAM 1 12.323 12.093 0.534Paris UDF 1 12.323 12.093 0.65434Madrid PAM 0 10.435 -3.093 0.3423Madrid PAM 1 10.435 -3.093 0.5234Madrid PJD 0 10.435 -3.093 0.235How to group rows, have a column in one column and a sum in the other?Commune PAM PJD UDF SwingParis 3 1 1 3Madrid 2 1 0 1So far I tried try :g = df.groupby('Commune').Winnerpd.concat([g.apply(list), g.count()], axis=1, keys=['members', 'number'])But it returns: members numberCommune Paris [PAM, PJD, PAM, UDF] 4Madrid [PAM, PAM, UDF] 3 | Use crosstab and add new column with DataFrame.join and aggregate sum:df = pd.crosstab(df['Commune'], df['Winner']).join(df.groupby('Commune')['Swing'].sum())print (df) PAM PJD UDF SwingCommune Madrid 2 1 0 1Paris 2 1 1 3But if need counts of rows:df1 = pd.crosstab(df['Commune'], df['Winner'], margins=True, margins_name='Total').iloc[:-1]Or:df = pd.crosstab(df['Commune'], df['Winner']).assign(Total= lambda x: x.sum(axis=1))print (df1)Winner PAM PJD UDF TotalCommune Madrid 2 1 0 3Paris 2 1 1 4EDIT:If another columns then is possible use aggregation by first if all values per groups and for turnout use some another aggregate function like mean, sum...:df1 = (df.groupby('Commune') .agg({'Swing':'sum', 'longitude':'first','latitude':'first','turnout':'mean'}))print (df1) Swing longitude latitude turnoutCommune Madrid 1 10.435 -3.093 0.36690Paris 3 12.323 12.093 0.40896df = pd.crosstab(df['Commune'], df['Winner']).join(df1)print (df) PAM PJD UDF Swing longitude latitude turnoutCommune Madrid 2 1 0 1 10.435 -3.093 0.36690Paris 2 1 1 3 12.323 12.093 0.40896If want mean of all columns without Swing is possible create dictionary dynamically:d = dict.fromkeys(df.columns.difference(['Commune','Winner','Swing']), 'mean')d['Swing'] = 'sum'print (d){'latitude': 'mean', 'longitude': 'mean', 'turnout': 'mean', 'Swing': 'sum'}df1 = df.groupby('Commune').agg(d)print (df1) latitude longitude turnout SwingCommune Madrid -3.093 10.435 0.36690 1Paris 12.093 12.323 0.40896 3df = pd.crosstab(df['Commune'], df['Winner']).join(df1)print (df) PAM PJD UDF latitude longitude turnout SwingCommune Madrid 2 1 0 -3.093 10.435 0.36690 1Paris 2 1 1 12.093 12.323 0.40896 3 |
Best way to fill NULL values with conditions using Pandas? So for example I have a data looks like this:df = pd.DataFrame([[np.NaN, '1-5'], [np.NaN, '26-100'], ['Yes', 'More than 1000'], ['No', '26-100'], ['Yes', '1-5']], columns=['self_employed', 'no_employees'])df self_employed no_employees0 nan 1-51 nan 26-1002 Yes More than 10003 No 26-1004 Yes 1-5And I'm trying to fill the NULL value based on the condition that:If no_employees is '1-6' then 'Yes', otherwise 'No'I was able to complete this using the dictionary such as:self_employed_dict = {'1-5': 'Yes', '6-25': 'No', '26-100': 'No', '100-500': 'No', 'More than 1000':'No', '500-1000': 'No'}df['self_employed'] = df['self_employed'].fillna(df['no_employees'].map(self_employed_dict))But I wanted to know if there is a better, simple way of doing this. In this example, I had to write the dictionary for myself to map it, so how can I do this in the smart way?The expected output looks like this: self_employed no_employees0 Yes 1-51 No 26-1002 Yes More than 10003 No 26-1004 Yes 1-5 | Use fillna is the right way to go, but instead you could do:values = df['no_employees'].eq('1-5').map({False: 'No', True: 'Yes'})df['self_employed'] = df['self_employed'].fillna(values)print(df)Output self_employed no_employees0 Yes 1-51 No 26-1002 Yes More than 10003 No 26-1004 Yes 1-5 |
Cannot upload large file to Google Cloud Storage It is okay when dealing with small files. It doesn't work only when I try to upload large files. I'm using Python client. The snippet is:filename='my_csv.csv'storage_client = storage.Client()bucket_name = os.environ["GOOGLE_STORAGE_BUCKET"]bucket = storage_client.get_bucket(bucket_name)blob = bucket.blob("{}".format(filename))blob.upload_from_filename(filename) # file size is 500 MBThe only thing I get as a Traceback is "Killed" and I'm out of python interpreter.Any suggestions are highly appriciatedEdit:It works okay from local machine. My application runs in Google Container Engine, so problems occurs there when runs in celery task. | upload_by_filename attempts to upload the entire file in a single request.You can use Blob.chunk_size to spread the upload across many requests, each responsible for uploading one "chunk" of your file.For example:my_blob.chunk_size = 1024 * 1024 * 10 |
Python Requests Proxy Error So when i try to use proxy on python requests , the actual requests send is using my own ip http_proxy = "https://103.235.21.128:80"proxyDict = { "http" : http_proxy, }r = requests.get('http://whatismyip.org',proxies=proxyDict)print r.contentAlso Tried http_proxy = "https://103.235.21.128:80"proxyDict = { "https" : http_proxy, }r = requests.get('http://whatismyip.org',proxies=proxyDict)print r.contentSo why the request is using my real ip instead of the proxy i provide | Have you tried setting http on the proxy like this?http_proxy = "http://103.235.21.128:80"orhttp_proxy = "http://{}:{}".format('103.235.21.128', 80)If that doesn't work you might have to find an http proxyIf you're requesting data from multiple websites (both http and https) then you'll have to add both to the dictionary:proxyDict = {"http": http_proxy, "https": http_proxy} |
how to prioritize default mac python environment over miniconda I installed miniconda for some software I need to run. It worked great, but it made all of the other web related stuff I have set up through mac's default python environment stop working. What I would like to have is the mac python environment as the default and conda only when I need to run this specific software. So I would need to know #1 - how do I modify .bash_profile to allow me to run both environments, and #2 - the command I need to switch between environments (if there is one).My bash profile looks like:# Setting PATH for Python 3.6# The original version is saved in .bash_profile.pysavePATH="/Library/Frameworks/Python.framework/Versions/3.6/bin:${PATH}"export PATH# added by Miniconda3 4.3.21 installer# export PATH="/Users/mikeheavers/miniconda3/bin:$PATH"(I have commented out the conda path for now)Thanks! | Have you considered using Python's Virtual env? This allows you to have a completely separate Python installations without causing conflicts with your main python in your path. This sounds ideal for your development needs. You would need to "activate" the virtualenv prior to starting up miniconda, which will adjust your environmental variables such that the virtualenv python, and it's libraries will be used. (copying from the link) This will result in a virtual python installation$ pip install virtualenv$ cd my_project_folder$ virtualenv my_project$ source my_project/bin/activate$ # Do stuff, like install from pip$ deactivate # This will turn off the virtual python in your pathyou can use this interpreter in your bashrc too (Check out the link for a more in depth introduction) export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python2.7 |
Unreadable encoding of a SMB/Browser packet in Scapy I'm trying to parse a pcap file with scapy (in python), and getting raw data at the layer above TCP.on wireshark, all the layers are shown correctly:but on scapy all i'm seeing is just a Raw layer...i'm thinking maybe it didn't parsed the packet well?maybe the NetBIOS moduled did not load? or maybe i didn't import the module right? (i tryied: import scapy.all, import scapy, import scapy.layers.smb )how do i make scapy load the layers of the packet correctly?thanks! | If someone has a similar problem…You need something likepacket[TCP].decode_payload_as(NBTSession)And then you Will get the decoded layers by scapy: packet[TCP].show()[ TCP ] sport = microsoft_ds… options = [][ NBT Session Packet ]### TYPE = Session Message RESERVED = 0 LENGTH = 4873[ SMBNegociate Protocol Request Header ]### Start = '\xfeSMB' Command = 64 Error_Class= 0 Reserved = 1 Error_code= 0 Flags = 0 Flags2 = 0 PIDHigh = 5 Signature = 0 Unused = 0 TID = 183 PID = 0 UID = 0 MID = 0 WordCount = 0 ByteCount = 0[ SMB Negotiate Protocol Request Tail ]###BufferFormat= 0 BufferData= '\x03'Also you can try after that to decode the packet with the different clases:packet.decode_payload_as(SMBNegociate Protocol Request Header) |
Regex - Using * with a set of characters I'm fairly new at regex, and I've run into a problem that I cannot figure out:I am trying to match a set of characters that start with an arbitrary number of A-Z, 0-9, and _ characters that can optionally be followed by a number enclosed in a single set of parentheses and can be separated from the original string by a space (or not)Examples of what this should find:_ABCD1E_123FD(13)ABDF1G (2)This is my current regex expression:[A-Z_0-9]+\s*\({0,1}[\d]*\){0,1}It's finding everything just fine, but a problem exists if I have the following:_ABCDE )It should only grab _ABCDE and not the " )" but it currently grabs '_ABCDE )'Is there some way I can grab the (#) but not get extra characters if that entire pattern does not exist?If possible, please explain syntax as I am aiming to learn, not just get the answer.ANSWER: The following code is working for what I needed so far:[A-Z_0-9]+(\s*\([\d]+\)){0,1}# or, as has been mentioned, the above can be simplified# and cleaned up a bit to be[A-Z_0-9]+(\s*\(\d+\))?# The [] around \d are unnecessary and {0,1} is equivalent to ?Adding the parentheses around the (#) pattern allows for the use of ? or {0,1} on the entire pattern. I also changed the [\d]* to be [\d]+ to ensure at least one number inside of the parentheses.Thanks for the fast answers, all! | Your regex says that each paren (open & closed) may or may not be there, INDEPENDENTLY. Instead, you should say that the number-enclosed-in-parens may or may not be there:(\([\d]*\)){0,1}Note that this allows for there to be nothing in the parens; that's what your regex said, but I'm not clear that's what you actually want. |
Crawling a page using LazyLoader with Python BeautifulSoup I am toying around with BeautifulSoup and I like it so far. The problem is the site I am trying to scrap has a lazyloader... And it only scraps one part of the site. Can I have a hint as to how to proceed? Must I look at how the lazyloader is implemented and parametrize anything else? | It turns out that the problem itself wasn't BeautifulSoup, but the dynamics of the page itself. For this specific scenario that is. The page returns part of the page, so headers need to be analysed and sent to the server accordingly. This isn't a BeautifulSoup problem itself. Therefore, it is important to take a look at how the data is loaded on a specific site. It's not always a "Load a whole page, process the whole page" paradigm. In some cases, you need to load part of the page and send a specific parameter to the server in order to keep loading the rest of the page. |
How can I optimize a plotly graph with updatemenues? So, I have been using plotly a lot and recently came to use the updatemenus method for adding buttons. I've created several graphs with it, but I find it difficult to find an efficient method to update the args section in updatemenus sections. I have a data frame that is bigger than the example but it’s the same idea, so I have df:name unaregate value ageinput1 in11 2 0input1 in11 0 1input1 in11 2 2input1 in11 3 3input1 in11 1 4input1 in12 1 0input1 in12 3 1input1 in12 4 2input1 in12 2 3input1 in12 3 4input1 in13 0 0input1 in13 2 1input1 in13 4 2input1 in13 2 3input1 in13 3 4input2 in21 3 0input2 in21 4 1input2 in21 2 2input2 in21 1 3input2 in21 3 4input2 in22 4 0input2 in22 0 1input2 in22 2 2input2 in22 4 3input2 in22 0 4input2 in23 3 0input2 in23 4 1input2 in23 0 2input2 in23 4 3input2 in23 2 4input3 in31 3 0input3 in31 4 1input3 in31 2 2input3 in31 4 3input3 in31 1 4input3 in32 4 0input3 in32 0 1input3 in32 0 2input3 in32 2 3input3 in32 1 4input3 in33 2 0input3 in33 3 1input3 in33 0 2input3 in33 3 3input3 in33 4 4input3 in34 2 0input3 in34 2 1input3 in34 3 2input3 in34 4 3input3 in34 3 4Here is a super inefficient way to create a data frame similar to this:df = pd.DataFrame(index=range(5),columns=range(1))df12 = pd.DataFrame(index=range(5),columns=range(1))df13 = pd.DataFrame(index=range(5),columns=range(1))df21 = pd.DataFrame(index=range(5),columns=range(1))df22 = pd.DataFrame(index=range(5),columns=range(1))df23 = pd.DataFrame(index=range(5),columns=range(1))df31 = pd.DataFrame(index=range(5),columns=range(1))df32 = pd.DataFrame(index=range(5),columns=range(1))df33 = pd.DataFrame(index=range(5),columns=range(1))df34 = pd.DataFrame(index=range(5),columns=range(1))df["name"] = "input1"df["unaregate"] = "in11"df["value"] = np.random.randint(0,5, size=len(df))df["age"] = range(0,len(df))df12["name"] = "input1"df12["unaregate"] = "in12"df12["value"] = np.random.randint(0,5, size=len(df12))df12["age"] = range(0,len(df12))df13["name"] = "input1"df13["unaregate"] = "in13"df13["value"] = np.random.randint(0,5, size=len(df13))df13["age"] = range(0,len(df13))df21["name"] = "input2"df21["unaregate"] = "in21"df21["value"] = np.random.randint(0,5, size=len(df21))df21["age"] = range(0,len(df21))df22["name"] = "input2"df22["unaregate"] = "in22"df22["value"] = np.random.randint(0,5, size=len(df22))df22["age"] = range(0,len(df22))df23["name"] = "input2"df23["unaregate"] = "in23"df23["value"] = np.random.randint(0,5, size=len(df23))df23["age"] = range(0,len(df23))df31["name"] = "input3"df31["unaregate"] = "in31"df31["value"] = np.random.randint(0,5, size=len(df31))df31["age"] = range(0,len(df31))df32["name"] = "input3"df32["unaregate"] = "in32"df32["value"] = np.random.randint(0,5, size=len(df32))df32["age"] = range(0,len(df32))df33["name"] = "input3"df33["unaregate"] = "in33"df33["value"] = np.random.randint(0,5, size=len(df33))df33["age"] = range(0,len(df33))df34["name"] = "input3"df34["unaregate"] = "in34"df34["value"] = np.random.randint(0,5, size=len(df34))df34["age"] = range(0,len(df34))frames = [df,df12,df13,df21,df22,df23,df31,df32,df33,df34]df = pd.concat(frames)df = df.drop([0],axis=1)This is the method I am employing for the plot:fig = go.Figure()names = df.name.unique()for i in names: db = df[df["name"]==i] uni = db.unaregate.unique() for f in uni: fig.add_trace(go.Scatter( x=db[db.unaregate==f].age, y=db[db.unaregate==f].value, connectgaps=False ,visible=False, mode='lines', legendgroup=f,name=f))fig.update_layout( template="simple_white", xaxis=dict(title_text="age"), yaxis=dict(title_text="Value"), width=1000, height = 600)fig.update_layout( updatemenus=[ dict(# type="buttons",# direction="down", active=0,# x=0.7,# y=1.2,# showactive=True, buttons=list( [ dict( label="Select name", method="update", args=[ {"visible": [False,False,False, False,False,False, False,False,False,False ]}, ], ), dict( label="input 1", method="update", args=[ {"visible": [True,True,True, False,False,False, False,False,False,False ]}, ], ), dict( label="input 2", method="update", args=[ {"visible": [False,False,False, True,True,True, False,False,False,False ]}, ], ), dict( label="input 3", method="update", args=[ {"visible": [False,False,False, False,False,False, True,True,True,True ]}, ], ),] ),# showactive=True, ) ])figIn the part were the True’s and False are, is there a way to add those in a loop so when I have more the fifty lines, I do not have to add more than 50 Trues and Fales’s? Any help is Welcomed I just want to be able to run this script for any type of similar data and that the lengths of data do not matter. | data frame creation can be simplified. Using pandas constructor capability with list comprehensionsfigure / traces creation is far simpler with plotly expresscore question - dynamically create visible liststhe trace is visible if it's in same name group. This where button name corresponds with name level of traceimport pandas as pdimport numpy as npimport plotly.express as pxdf = ( pd.DataFrame( [ { "name": f"input{a}", "unaregate": f"in{a}{b}", "value": np.random.randint(0, 5, 5), } for a in range(1, 4) for b in range(1, 4) ] ) .explode("value") .pipe(lambda d: d.assign(age=np.random.randint(0, 5, len(d)))))# get valid combinations that will create tracescombis = df.groupby(["name","unaregate"]).size().index# for this example - it's far simpler to use plotly express to create tracesfig = px.line(df, x="age", y="value", color="unaregate").update_traces(visible=False)# use list comprehesions to populate visible listsfig.update_layout( updatemenus=[ { "active": 0, "buttons": [ { "label": "Select name", "method": "update", "args": [{"visible": [False for t in fig.data]}], } ] + [ { "label": n, "method": "update", "args": [{"visible": [n == t for t in combis.get_level_values(0)]}], } for n in combis.get_level_values(0).unique() ], } ], template="simple_white") |
How to perform arithmetic with large floating numbers in python I have two numbers a and b:a = 1562239482.739072b = 1562239482.739071If I perform a-b in python, I get 1.1920928955078125e-06. However, I want 0.000001, which is the right answer after subtraction. Any help would be appreciated. Thank you in advance. t = float(1562239482.739071)T = float(1562239482.739072)D = float(T - t)print(float(D))OR t = 1562239482.739071T = 1562239482.739072D = T - t print (D)I get the same answer 1.1920928955078125e-06 using both as mentioned above. However, I want the result 0.000001.Expected Result: 0.000001Result : 1.1920928955078125e-06 | This is common problem with floating point arithmetic. Use the decimal module |
How to use __setitem__ properly? I want to make a data object:class GameData: def __init__(self, data={}): self.data = data def __getitem__(self, item): return self.data[item] def __setitem__(self, key, value): self.data[key] = value def __getattr__(self, item): return self.data[item] def __setattr__(self, key, value): self.data[kay] = value def __repr__(self): return str(self.data)When I create a GameData object, I get RecursionError. How can I avoid setitem recall itself? | In the assignment self.data = data, __setattr__ is called because self has no attribute called data at the moment. __setattr__ then calls __getattr__ to obtain the non-existing attribute data. __getattr__ itself calls __getattr__ again. This is a recursion.Use object.__setattr__(self, 'data', data) to do the assignment when implementing __setattr__.class GameData: def __init__(self, data=None): object.__setattr__(self, 'data', {} if data is None else data) def __getitem__(self, item): return self.data[item] def __setitem__(self, key, value): self.data[key] = value def __getattr__(self, item): return self.data[item] def __setattr__(self, key, value): self.data[key] = value def __repr__(self): return str(self.data)For details, see the __getattr__ manualAdditionally, do not use mutable objects as default parameter because the same object {} in the default argument is shared between GameData instances. |
Python mysql.connector timeout Here's a simple connection to a MySQL database using the mysql.connector module.db = mysql.connector.connect( host=DB_SERVER, port=DB_PORT, user=DB_UNAME, passwd=DB_PASSWORD, db=DB_NAME)db.connect()mysqlCursor.execute(query)I want to control two different timeouts. First, I want it to spend no longer than five seconds on the .connect() function. I've got that figured out.Second, I want it to spend no longer than one second on the .execute() function. How can I do that?I'm the database administrator, so I can do something on that end if I need to. I'd prefer only to change the timeout for one particular MySQL user, though, and not for everyone, which is why I'm starting from the Python side.Here's what I've found so far:The documentation for mysql.connecter lists several timeout parameters. Connect-timeout will set the timeout for the initial connection, but as far as I can tell, it won't set a query timeout. Interactive-timeout will cause it to timeout if there's no activity, but I don't think that means it will timeout if the query takes too long to execute.connect-timeout=secondsConnect timeout in seconds. On Linux this timeout is also used for waiting for the first answer from the server.(timeout has been replaced by connect-timeout, but timeout is still supported in MySQL 5.0 for backward compatibility.)interactive-timeout=secondsPermit seconds of inactivity before closing the connection. The client's session wait_timeout variable is set to the value of the session interactive_timeout variable. | As of MySQL 5.7.8 a maximum execution time for just SELECT statements can be set per session. Set this immediately after connecting:db = mysql.connector.connect(...)cursor = db.cursor()# Limit SELECTs to 1 secondcursor.execute("SET SESSION MAX_EXECUTION_TIME=1000") |
Relative Strength Index in python pandas I am new to pandas. What is the best way to calculate the relative strength part in the RSI indicator in pandas? So far I got the following:from pylab import *import pandas as pdimport numpy as npdef Datapull(Stock): try: df = (pd.io.data.DataReader(Stock,'yahoo',start='01/01/2010')) return df print 'Retrieved', Stock time.sleep(5) except Exception, e: print 'Main Loop', str(e)def RSIfun(price, n=14): delta = price['Close'].diff() #----------- dUp= dDown= RolUp=pd.rolling_mean(dUp, n) RolDown=pd.rolling_mean(dDown, n).abs() RS = RolUp / RolDown rsi= 100.0 - (100.0 / (1.0 + RS)) return rsiStock='AAPL'df=Datapull(Stock)RSIfun(df)Am I doing it correctly so far? I am having trouble with the difference part of the equation where you separate out upward and downward calculations | It is important to note that there are various ways of defining the RSI. It is commonly defined in at least two ways: using a simple moving average (SMA) as above, or using an exponential moving average (EMA). Here's a code snippet that calculates various definitions of RSI and plots them for comparison. I'm discarding the first row after taking the difference, since it is always NaN by definition.Note that when using EMA one has to be careful: since it includes a memory going back to the beginning of the data, the result depends on where you start! For this reason, typically people will add some data at the beginning, say 100 time steps, and then cut off the first 100 RSI values.In the plot below, one can see the difference between the RSI calculated using SMA and EMA: the SMA one tends to be more sensitive. Note that the RSI based on EMA has its first finite value at the first time step (which is the second time step of the original period, due to discarding the first row), whereas the RSI based on SMA has its first finite value at the 14th time step. This is because by default rolling_mean() only returns a finite value once there are enough values to fill the window.import datetimefrom typing import Callableimport matplotlib.pyplot as pltimport numpy as npimport pandas as pdimport pandas_datareader.data as web# Window length for moving averagelength = 14# Datesstart, end = '2010-01-01', '2013-01-27'# Get datadata = web.DataReader('AAPL', 'yahoo', start, end)# Get just the adjusted closeclose = data['Adj Close']# Define function to calculate the RSIdef calc_rsi(over: pd.Series, fn_roll: Callable) -> pd.Series: # Get the difference in price from previous step delta = over.diff() # Get rid of the first row, which is NaN since it did not have a previous row to calculate the differences delta = delta[1:] # Make the positive gains (up) and negative gains (down) Series up, down = delta.clip(lower=0), delta.clip(upper=0).abs() roll_up, roll_down = fn_roll(up), fn_roll(down) rs = roll_up / roll_down rsi = 100.0 - (100.0 / (1.0 + rs)) # Avoid division-by-zero if `roll_down` is zero # This prevents inf and/or nan values. rsi[:] = np.select([roll_down == 0, roll_up == 0, True], [100, 0, rsi]) rsi.name = 'rsi' # Assert range valid_rsi = rsi[length - 1:] assert ((0 <= valid_rsi) & (valid_rsi <= 100)).all() # Note: rsi[:length - 1] is excluded from above assertion because it is NaN for SMA. return rsi# Calculate RSI using MA of choice# Reminder: Provide ≥ `1 + length` extra data points!rsi_ema = calc_rsi(close, lambda s: s.ewm(span=length).mean())rsi_sma = calc_rsi(close, lambda s: s.rolling(length).mean())rsi_rma = calc_rsi(close, lambda s: s.ewm(alpha=1 / length).mean()) # Approximates TradingView.# Compare graphicallyplt.figure(figsize=(8, 6))rsi_ema.plot(), rsi_sma.plot(), rsi_rma.plot()plt.legend(['RSI via EMA/EWMA', 'RSI via SMA', 'RSI via RMA/SMMA/MMA (TradingView)'])plt.show() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.