,4,7,,,,CC BY-SA 4.0
+420618,1,,,1/4/2021 8:38,,1,62,"Context:
+Creating a "middleware" between 2 services I have to Get data from Service Source and get it into Service ERP.
+There is a multiple type of Data: A, B, C. An integration process follows the following script:
+
+- Get pending ID for A
+- Get A for each of those Id
+- For each of those A convert them into ERP A.
+- Submit the A to the ERP.
+- If any pb occurred cancel this A.
+- Else Valid this A.
+
+With a little Log and Error handling around each of those steps.
+The process stays the same for A, B and C.
+I would like to refactor to avoid repetition and having to modify multiple method for a simple change in the overall behavior.
+class Program
+{
+ static IServiceMock_Source ServiceSource;
+ static IServiceMock_ERP ServiceERP;
+
+ static void Main(string[] args)
+ {
+ Integrator_A();
+
+ //Integrator_B();
+ //Integrator_C();
+ }
+
+ public static bool Integrator_A()
+ {
+ var As = GetPending_A();
+
+ if (!As.Any())
+ {
+ Logger.Log(typeof(Program),
+ Level.Debug, "There is no As to integrate", null);
+ return false;
+ }
+ Creator_A(As.ToArray());
+ return true;
+ }
+ private static List<A_Entity> GetPending_A()
+ {
+ var As = new List<A_Entity>();
+
+ int[] AsIds = null;
+ try
+ {
+ AsIds = ServiceSource.GetA_Pending();
+ }
+ catch (Exception e)
+ {
+ Logger.Log(typeof(Program),
+ Level.Error,
+ $"ERR ServiceSource.GetA_Pending : " + e
+ , e);
+ }
+
+ if (AsIds.Any())
+ {
+ Logger.Log(typeof(Program),
+ Level.Info,
+ $"AsIds : [{string.Join(", ", AsIds)}]"
+ , null);
+ }
+
+ foreach (var id in AsIds)
+ {
+ A_Entity tempA;
+ try
+ {
+ tempA = ServiceSource.GetA(id);
+ As.Add(tempA);
+ }
+ catch (Exception e)
+ {
+ Logger.Log(typeof(Program),
+ Level.Error,
+ $"ERR ServiceSource.GetA, Impossible to get l'ID[{id}] : " + e
+ , e);
+ }
+ }
+ return As;
+ }
+ private static void Creator_A(params A_Entity[] a_Entities)
+ {
+ foreach (var entity in a_Entities)
+ {
+ var isACreated = CreateAInERP(entity, out string error);
+
+ if (!isACreated)
+ {
+ var err = $"Failed A creation" +
+ $"[{entity.A_EntityEntityDbId}, entity.otherId, {entity.ProcessableEntityDbId}] [..]" +
+ $"\nError : \n{error}";
+ Logger.Log(typeof(Program), Level.Error, err, null);
+
+ ServiceSource.CancelA(entity.A_EntityEntityDbId, true);
+
+ var source = $"MachineName:{System.Environment.MachineName}" +
+ $", App:{System.AppDomain.CurrentDomain.FriendlyName}" +
+ $", Path:{Environment.GetCommandLineArgs()[0]}"
+ ;
+
+ //ServiceSource.CreateErrorMessage(new ErrorMessageDTO
+ //{
+ // ProcessID = entity.ProcessInformation.ProcessInformationId,
+ // Source = source.Truncate(500),
+ // Category = this.GetType().FullName,
+ // Query = "CreateAInERP()",
+ // Message = err,
+ //});
+ }
+ ServiceSource.ValideA(entity.A_EntityEntityDbId, isACreated);
+ }
+ }
+ private static bool CreateAInERP(A_Entity entity, out string error)
+ {
+ error = "";
+ A_In erpItem = null;
+ try
+ {
+ erpItem = Converter.ToERP(entity);
+ }
+ catch (Exception e)
+ {
+ error = "Erreur projection: CreateAInERP." +
+ $" DB_id = {entity.A_EntityEntityDbId}." + e;
+ return false;
+ }
+
+ A_Response result;
+ try
+ {
+ result = ServiceERP.Submit_A(
+ new A_Request
+ {
+ Context = new Context { },
+ A_In = erpItem,
+ });
+ }
+ catch (Exception e)
+ {// Timeout and service exception
+ error = "Error Integration ERP: Submit_A." +
+ $" DB_id = {entity.A_EntityEntityDbId}." + e;
+ return false;
+ }
+
+ if (result.ErrorCode != "OK")
+ {// bizness Error
+ error = "Error Integration: " +
+ $"DB_id = {entity.A_EntityEntityDbId}. " +
+ $"[{result.ErrorCode}] : result.errorMsg";
+ return false;
+ }
+ return true;
+ }
+}
+
+public class Converter
+{
+ public static A_In ToERP(A_Entity entity)
+ { // Complexe mapping of ERP entities
+ return new A_In();
+ }
+ public static B_In ToERP(B_Entity entity)=> new B_In();
+ // internal static C_In ToERP(C_Entity entity)=> new C_In();
+}
+
+This code as a huge repetition between process A and B.
+You will notice that the following code is 100% the same as A. With only a type difference.
+In fact the real code for B,C,D are coded by copy pasting A block. And using Ctrl+R+R for rename just a few times.
+public static bool Integrator_B()
+{
+ var Bs = GetPending_B();
+
+ if (!Bs.Any())
+ {
+ Logger.Log(typeof(Program),
+ Level.Debug, "There is no Bs to integrate", null);
+ return false;
+ }
+ Creator_B(Bs.ToArray());
+ return true;
+}
+private static List<B_Entity> GetPending_B()
+{
+ var Bs = new List<B_Entity>();
+
+ int[] BsIds = null;
+ try
+ {
+ BsIds = ServiceSource.GetB_Pending();
+ }
+ catch (Exception e)
+ {
+ Logger.Log(typeof(Program),
+ Level.Error,
+ $"ERR ServiceSource.GetB_Pending : " + e
+ , e);
+ }
+
+ if (BsIds.Any())
+ {
+ Logger.Log(typeof(Program),
+ Level.Info,
+ $"BsIds : [{string.Join(", ", BsIds)}]"
+ , null);
+ }
+
+ foreach (var id in BsIds)
+ {
+ B_Entity tempB;
+ try
+ {
+ tempB = ServiceSource.GetB(id);
+ Bs.Add(tempB);
+ }
+ catch (Exception e)
+ {
+ Logger.Log(typeof(Program),
+ Level.Error,
+ $"ERR ServiceSource.GetB, Impossible to get ID[{id}] : " + e
+ , e);
+ }
+ }
+ return Bs;
+}
+private static void Creator_B(params B_Entity[] b_Entities)
+{
+ foreach (var entity in b_Entities)
+ {
+ var isBCreated = CreateBInERP(entity, out string error);
+
+ if (!isBCreated)
+ {
+ var err = $"Failed B creation" +
+ $"[{entity.B_EntityEntityDbId}, entity.otherId, {entity.ProcessableEntityDbId}] [..]" +
+ $"\nError : \n{error}";
+ Logger.Log(typeof(Program), Level.Error, err, null);
+
+ ServiceSource.CancelB(entity.B_EntityEntityDbId, true);
+
+ var source = $"MachineName:{System.Environment.MachineName}" +
+ $", App:{System.AppDomain.CurrentDomain.FriendlyName}" +
+ $", Path:{Environment.GetCommandLineArgs()[0]}"
+ ;
+
+ //ServiceSource.CreateErrorMessage(new ErrorMessageDTO
+ //{
+ // ProcessID = entity.ProcessInformation.ProcessInformationId,
+ // Source = source.Truncate(500),
+ // Category = this.GetType().FullName,
+ // Query = "CreateBInERP()",
+ // Message = err,
+ //});
+ }
+ ServiceSource.ValideB(entity.B_EntityEntityDbId, isBCreated);
+ }
+}
+private static bool CreateBInERP(B_Entity entity, out string error)
+{
+ error = "";
+ B_In erpItem = null;
+ try
+ {
+ erpItem = Converter.ToERP(entity);
+ }
+ catch (Exception e)
+ {
+ error = "Erreur projection: CreateBInERP." +
+ $" DB_id = {entity.B_EntityEntityDbId}." + e;
+ return false;
+ }
+
+ B_Response result;
+ try
+ {
+ result = ServiceERP.Submit_B(
+ new B_Request
+ {
+ Context = new Context { },
+ B_In = erpItem,
+ });
+ }
+ catch (Exception e)
+ {// Timeout and service exception
+ error = "Error Integration ERP: Submit_B." +
+ $" DB_id = {entity.B_EntityEntityDbId}." + e;
+ return false;
+ }
+
+ if (result.ErrorCode != "OK")
+ {// bizness Error
+ error = "Error Integration: " +
+ $"DB_id = {entity.B_EntityEntityDbId}. " +
+ $"[{result.ErrorCode}] : result.errorMsg";
+ return false;
+ }
+
+ return true;
+}
+
+Here is the code use for the mockup. It's out of modification Scope.
+But it's needed to have no compilation error in this MRE.
+public interface IServiceMock_ERP
+{
+ public A_Response Submit_A(A_Request request);
+ public B_Response Submit_B(B_Request request);
+}
+
+public class A_Request
+{
+ public Context Context { get; set; }
+ public A_In A_In { get; set; }
+}
+public class B_Request
+{
+ public Context Context { get; set; }
+ public B_In B_In { get; set; }
+}
+public class Context { }
+public class A_In
+{
+ public string RealDataHere { get; set; }
+}
+public class B_In
+{
+ public string RealDataHere { get; set; }
+}
+public class A_Response
+{
+ public string ErrorCode { get; set; }
+ public A_Out A_Out { get; set; }
+}
+public class B_Response
+{
+ public string ErrorCode { get; set; }
+ public B_Out B_Out { get; set; }
+}
+public class A_Out
+{
+ public string Error { get; set; }
+}
+public class B_Out
+{
+ public string Error { get; set; }
+}
+
+public interface IServiceMock_Source
+{
+ public int[] GetA_Pending();
+ public A_Entity GetA(int id_A);
+ public bool CancelA(int id_A, bool value);
+ public bool ValideA(int id_A, bool value);
+
+ public int[] GetB_Pending();
+ public B_Entity GetB(int id_B);
+ public bool CancelB(int id_B, bool value);
+ public bool ValideB(int id_B, bool value);
+
+ // etc..
+ //public int[] GetC_Pending();
+ //public C_Entity GetC(int id_C);
+ //public bool CancelC(int id_C, bool value);
+ //public bool ValideC(int id_C, bool value);
+
+}
+public class A_Entity : ProcessableEntity
+{
+ public int A_EntityEntityDbId { get; set; }
+ public string RealDataHere { get; set; }
+}
+public class B_Entity : ProcessableEntity
+{
+ public int B_EntityEntityDbId { get; set; }
+ public string Rename { get; set; }
+}
+
+My question is :
+How to refactor this to avoid repeating the same process in Integrator_B, Integrator_C etc.. ?
+What have I try:
+I went to the road of Func
, Action
, and delegate
.
+Giving codes like yhe following.
+public bool Integration_Generic<T>(Func<List<T>> GetItems, Action<T[]> Integrator, string NoElementErreurMessage)
+{
+ var items = GetItems();
+ if (!items.Any())
+ {
+ Logger.Log(this.GetType(), Level.Debug, $"Aucun {NoElementErreurMessage} à integrer.", null);
+ return false;
+ }
+ Integrator(items.ToArray());
+ return true;
+}
+private List<T> GetPending<T>(Func<int[]> PendingIds, Func<int, T> TGetter)
+{
+ var items = new List<T>();
+
+ int[] itemsIds = null;
+ try
+ {
+ itemsIds = PendingIds();
+ }
+ catch (Exception e)
+ {
+ Logger.Log(typeof(Integrateur),
+ Level.Error,
+ $"ERR {this.GetType().Namespace}.{PendingIds.Method.Name} : " + e
+ , e);
+ }
+
+ if (itemsIds.Any())
+ {
+ Logger.Log(typeof(Integrateur),
+ Level.Debug,
+ $"{typeof(T).Name} itemsIds : [{string.Join(", ", itemsIds)}]"
+ , null);
+ }
+
+ foreach (var id in itemsIds)
+ {
+ T tempT;
+ try
+ {
+ tempT = TGetter(id);
+ items.Add(tempT);
+ }
+ catch (Exception e)
+ {
+ Logger.Log(typeof(Integrateur),
+ Level.Error,
+ $"ERR {this.GetType().Namespace}.{TGetter.Method.Name}, Impossible de recuperer l'ID[{id}] : " + e
+ , e);
+ }
+ }
+ return items;
+}
+
+delegate V Creator_Delegate<T, U, V>(T input, out U output);
+
+But there is too mutch compilation error for this code to be functional. Getting a fresh start may be better that fixing my try to tinker with things I don't fully understand.
+",382394,,397719,,1/6/2022 7:16,1/6/2022 7:16,Refactoring similar integration service code block,,1,6,,,,CC BY-SA 4.0
+420620,1,420621,,1/4/2021 9:31,,4,53,"In the UML specification 2.5.1 (Link) on page 117 it is specified that the notation of operations (methods) should look like the following:
+[<visibility>] <name> ‘(‘ [<parameter-list>] ‘)’ [‘:’ [<return-type>] [‘[‘ <multiplicity-range> ‘]’]
+ [‘{‘ <oper-property> [‘,’ <oper-property>]* ‘}’]]
+
+What irritates me are the blanks. If I set them as described in the specification above, then they are unfortunately not consistent with the example found in the same chapter on page 119. Here the example looks like the following:
++createWindow (location: Coordinates, container: Container [0..1]): Window
+
+See for instance: In the example, there is no blank between the <visibility>
and the <name>
but in the specification, there is a blank between them.
+Can someone help me understand this inconsistency? Why are the blanks set so strangely anyway? If one wants to make it 100% correct, how would the blanks be set?
+Kind Regards and Thanks,
+Raphael
+",382396,,,,,1/4/2021 23:36,UML v2.5.1 correct notation of blanks in operations (methods)?,,1,0,,,,CC BY-SA 4.0
+420623,1,,,1/4/2021 9:48,,0,131,"I have a method GetReportAsync
that takes one XML and generates another:
+public async Task<string> GetReportAsync(string id)
+{
+ // Get Order.xml from a file database
+ var order = await _dataService.GetOrderAsync(id);
+
+ var report = _reportGenerator.WriteReport(order);
+
+ var xml = SerializeToXml(report);
+
+ return xml;
+}
+
+As an automated testing tool, I write XMLUnit.NET snapshot tests that assert that an order will match a certain reference XML document, a stored snapshot.
+[TestMethod]
+[DataRow("12345")]
+[DataRow("67890")]
+public async Task Snapshots_Should_Match(string id)
+{
+ var actual = await _testClass.GetReportAsync(id);
+
+ var expected = Input.FromFile($@"Snapshots\__snapshots__\{id}.xml");
+
+ var diff = DiffBuilder
+ .Compare(expected)
+ .WithTest(actual)
+ .Build();
+
+ Assert.IsFalse(diff.HasDifferences());
+}
+
+Now, I realize that the Order.xml
file can be retrieved in two ways:
+
+- Alternative A: Store a number of
Order.xml
files in the project and implement _dataService.GetOrderAsync
to read from these files.
+- Alternative B: Get the actual
Order.xml
from a test (or production) database, just like in the real implementation.
+
+Alternative A will assert that the method works given the order files in their exact state from when this snapshot test was written. However, I struggle to see the reason for such a guarantee.
+Alternative B will give me tests that fail if the data service for some reason changes its response, possibly because of a non-backwards compatible change they introduce. To me this seems to give me more value than alternative A. However I do see that testing against a real database would possibly break some fundamental rules.
+Furthermore, since the method is not writing to the database, then why not test against the production database instead of a test database?
+",339678,,339678,,1/4/2021 15:13,2/4/2022 21:08,Should snapshot tests compare against stored test data or data from a database?,,1,4,,,,CC BY-SA 4.0
+420625,1,,,1/4/2021 10:39,,-3,391,"In many discussion I learnt that was undesirable (forbidden) to expose IQueryable from Respository pattern.
+What is the best practice then for server-side filtering and paging?
+",110839,,,,,1/4/2021 11:44,"How to filter and add paging, if we must not expose IQueryable at Repository pattern?",,1,5,,,,CC BY-SA 4.0
+420629,1,,,1/4/2021 11:47,,1,325,"An Aggregate Root should always have a unique ID within the bounded context. Typically the examples one finds use a GUID for this to ensure global uniqueness.
+However consider a bounded context for a Chat. In this case I deem messages and chats as their own individual aggregate roots. One may consider Message an entity of Chat, however if messages are to grow without bounds, this is infeasible.
+Therefore a Message would hold the reference to the Chat to which it belongs, by ID. In this case I would need a large enough message Id to ensure that it is unique w.r.t. all other messages independent of Chat.
+I am wondering if it is bad practice to instead make a composite key for Message of the form (ChatId, MessageId). This would ensure uniqueness, and at the same time I do not need MessageId to be as large as mentioned above, thereby saving some space.
+",382375,,,,,1/4/2021 15:00,Composite Id based on another Aggregate root?,,2,12,,,,CC BY-SA 4.0
+420630,1,,,1/4/2021 12:09,,1,121,"Situation
+Right now, I am at a point of realization, that at my present position I am not completing requests with regular interval, have spaced out request completion. But to approach my true ability I want to try to in the course of a day, going for my first attempt, to push my ability to push out 3 moderate sized commits in a span of 1 workday (8 hours).
+Scope
+It is fair to mention that I work with vanilla PHP, MySQL and Oracle with no framework other than jQuery. And that at present I have taken ample research time, that I can hone in on.
+Definition
+One commit corresponds to an incoming support request received by email, some which are outstanding but not ready for commit. Right now I am including uncommitted finished support requests that will be pushed through first.
+
+I am asking for feedback from the community here as to overall effectiveness of my process in a span of 2 hours maximum with the above information provided
+
+Proposed 13 Step Solution (1 Hour 30 Minutes - Max 2 Hours)
+Proposed Solution to enhance my productivity (check back immediately by email with manager or requester if I spend more than 10 minutes on a portion each step without comfortable progress)
+Preparatory Work for process of steps I have identified, with bullet points specified at first
+Identify 4 support requests in inbox and email back log to resolve, considering priority and time to completion. Check with Manager or requester throughout.
+For each support request
+Capture problem and verify understanding by email to requester (20 Minutes)
+10 Minutes
+0) Read the email one or two times and write in my own words the problem requested. Open web page of target request to verify problem encountered. Write down questions and clarification needed in step 1...
+5 Minutes
+
+- Create bulleted points of the request to capture the scope of the the current problem (when I do this..this happens), along with the along with bulleted requirement points that I created (this should happen instead...)
+
+5 Minutes
+
+Once received, begin formulating a phrased bulleted requirements list break down each specific requirement and ask by requester if the phrased bulleted requirements capture the scope of the solution
+
+Email requester with a brief email addressing 0), 1), 2) asking if I understand both problem and solution correctly and completely and wait for response and make necessary revisions before proceeding.
+
+
+Test Case Formulation (10 minutes)
+5 Minutes
+
+- Write test case list for proposed solution
+
+5 Minutes
+
+- Test plan adhere to each test case (directly driven by requirements)
+
+Formulate solution (20 minutes)
+10 minutes
+6) Write pseudocode and draw flow chart (half page, one page respectively)
+10 minutes
+7) Write out language specific (PHP primarily, and SQL) lines of code to match step 5
+Implementation/Test and Validation (40 minutes)
+30 Minutes
+
+- Perform code entry for list of bulleted feature requirements points and incrementally verify inserted code lines into proper code region with testing against test cases
+
+10 minutes
+
+Once all cases have passed required functionality...
+Validate by self the complete implemented solution against principal desired function to check if the
+understood and desired functionality was met
+Once validated, share workable solution with requester or manager independently validate that solution functions as intended
+
+
+Revise solution (10 minutes)
+
+- If not, then get clarification and revise solution starting from step 6
+
+Commit # (5 minutes)
+
+- If yes, then begin commit process to appropriate branches
+
+Move to Next Support Request
+
+- Move to next support request
+
+Total planned time for uniform task (1HR 45 MIN up to 2 HR)
+",207922,,207922,,1/4/2021 17:12,1/4/2021 17:12,Productivity - Pushing towards 3 (at max 4) completed requests followed by code commits in a span of 8 hour interval on novel support requests,,2,2,,,,CC BY-SA 4.0
+420643,1,420729,,1/4/2021 16:27,,1,144,"I have written a software as a student. All functionality was in the software and it was a 30 days fully functional version. After some years there were cracks around and my income was 10% of before. This was where I decided to switch the paradigm to full version vs. demo version with limited functionality (the function weren't even in the EXE using compiler switches). This in my case ended all piracy and worked very well.
+Now you could ask what if a customer uploads his full version to a crack website? For this case I compiled the customer's full address visibly and invisibly into the EXE file so that I could see which customer was a bad boy...
+After some more years I had a new problem: anti virus software. Since my software can set keyboard shortcuts, the heuristic algorithms of some anti virus apps started complaining. So I sent the demo version EXE to the anti virus companies to mark it as "safe". This worked very well. But only for the demo version which is fixed in bytesize. When I compile the customer's personal data into the EXE file, the filesize varies a bit and so the checksum differs and the EXE file isn't marked as "safe" by the anti virus software anymore and the complaining starts again for the customers.
+Does anyone have an idea how I could solve this? I can't add a separate file because this could be deleted by the customer, of course.
+Thanks in advance.
+",382430,,,,,1/7/2021 5:55,"Copy protection for Windows software for the case ""demo version vs. full version""",,3,10,,,,CC BY-SA 4.0
+420647,1,420705,,1/4/2021 17:45,,1,52,"To simplify I'll say I'm using a list containing an array of Movies, and Movies.
+Problem
+I have two options to update both the note in the list, and the global note of the Movie.
+Single Calls
+POST www.example.com/api/lists/:id/ // Creates a new movie entry in the list, updates Movie's note
+
+PUT www.example.com/api/lists/:id/movies/:movieId // Updates entry, updates the note in Movie
+
+DELETE www.example.com/api/lists/:id/movies/:movieId // Delete the entry, remove the note from Movie
+
+Multiple Calls
+POST www.example.com/api/lists/:id // Create a new list
++
+PUT www.example.com/api/movies/:id // Updates the note (total =+ note, nbNotes++)
+
+
+PUT www.example.com/api/lists/:id/movies/:movieId // Updates the Movie's note in the list
++
+PUT www.example.com/api/movies/:id // Updates the Movie's notes (total =+ (new-old))
+
+DELETE www.example.com/api/lists/:id/movies/:movieId // Remove the entry
++
+PUT www.example.com/api/movies/:id // Updates the Movie's notes (total =- note, nbNotes--)
+
+I would tend to use the Single Calls but have no idea if modifying another ressource is correct. (Considering that the Movie's note will probably never be modified by its PUT api anywhere else)
+I tried to take into account other StackExchange's answers but they seem more focused on the retrieval of information in the frontend whereas mine is more focused on updating data.
+
+It feels like the Single Calls introduced some concurrency problems on implementation (or I coded it very poorly) as sometimes the note updates negatively (as an update with note: null
, decreases the total note because an entry in a list can not have a note)
+
+",382433,,342873,,1/5/2021 21:24,1/5/2021 21:24,Use multiple or a single endpoint to modify different related ressources,,1,0,,,,CC BY-SA 4.0
+420650,1,,,1/4/2021 19:05,,1,82,"I'm designing a new language and the package-management system for it (something like NPM, Cargo, Pip, Gem, Cpan, Cabal, NuGet or the like).
+I'm trying to decide what's a good way to handle the versioning of a package when only part of it is updated. I'm also curious to learn what's the best way to solve this problem with existing package managers.
+Example of the problem
+Let's suppose a package exports some types and functions that many other packages use:
+# package: string 1.0
+
+type String {
+ length: Number,
+ data: byte*
+}
+
+function from_NUL_terminated( data: byte[] ) -> String { ... }
+function to_NUL_terminated( string: String ) -> byte[] { ... }
+function from_repeated_char( length: Number, char: byte ) -> String { ... }
+function get_UTF8_length( string: String ) -> Number { ... }
+# ...
+
+At some point, something in the "string" package needs to be changed. For instance a bug in from_NUL_terminated()
gets fixed, or a new function get_UTF16_length()
is added.
+Because of this change, a new version of the "string" package is released: string 1.1.
+This makes everything incompatible between the two versions: the language considers String
from "string 1.0" a different type from String
from "string 1.1". Even though the String
type didn't change: only other stuff in that package did.
+A consequence is that packages that depend on a different version of "string" cannot pass String
s to each other. A little function that is only used internally by a few packages causes a major split in the ecosystem of the language.
+Obviously this problem must be avoided. How?
+Further thoughts about the problem
+From the example above it sounds like only types need to be protected from this issue. That's practical in most cases, but it's not technically correct. Ideally functions that don't change shouldn't be re-released in the new version either.
+It sounds like every item in a package should have its own versioning, rather than package itself: every type, every function, every piece of data or metada.
+What's the solution?
+How do existing languages and their package managers handle this situation?
+And how could I handle it for my new language and its new package manager? Can the package manager fix this problem on its own for any language, or would it need support from the language to do things properly?
+Addendum: a minimal example showing the problem in Node.js
+I created a project in Rust where two different versions of the crate "mystr" are used by the "main" crate and the "printerlib" crate: https://github.com/BlueNebulaDev/rust-version-test . (a crate is a package in Rust terminology)
+The "mystr" crate exposes a type MyStr
. This type is defined identically in both version. "mystr 2.0.0" also exposes a function that wasn't available in the previous version: mystr::from_slice()
.
+The program can't compile, because the "main" package creates an mystr(1.0.0)::MyStr
object and tries to pass it to a function that expects a mystr(2.0.0)::MyStr
object. However MyStr
never changes in the two versions: it's only other functions in the same crate that change.
+I also created a Node.js project showing the same issue in that environment: https://github.com/BlueNebulaDev/node-version-test/ .
+",382435,,382435,,1/4/2021 22:48,1/4/2021 22:48,How to manage versioning for changes that only affect some pieces of a package?,,1,2,,,,CC BY-SA 4.0
+420655,1,,,1/4/2021 20:26,,1,407,"I want to develop an end-to-end machine learning application where data will be in GPU-memory and computations will run on the GPU. A stateless RESTfull service with a database is not desirable since the traffic between GPU-memory and database will destroy the "purpose" of it being fast.
+The way I see it is that I need a way to "serve" the class (let's call it as experiment class) which has the data and the methods, then call them using rest APIs.
+Right now I am using FastApi and initialize the experiment class in it which I believe is not optimal. My class (as well as the data) lives in FastAPI runtime. Kinda like,
+import experiment_class
+import FastApi
+
+app = FastAPI()
+my_experiment = expertiment_class()
+
+@app.get("/load_csv")
+my_experiment.load_csv("some_file_path")
+
+// do some more on the data
+...
+
+There are two problems I am having a hard time with,
+One of them is the terminology:
+
+- Is this really a stateful application?
+- Is there a word to describe what I am doing? Is this a "Model, View, Controller" design, can it be a simple "Server-Client" or is it something completely different?
+- Do I need a "Web-server", a "Web-framework" or a "Web-service" for this?
+
+Another one is what technology I can use for this :
+
+- Is it okay to use FastAPI like this?
+- Do I set up an RPC server (Remote Procedure Call) and call it using Rest API?
+- Is WSGI or an ASGI server suitable for this task?
+- Are Django, Flask, Tornado like web frameworks only used for stateless apps? Because nearly all of the examples are.
+- Do I stick to bare bone Python where I use threads or BaseManager servers?
+
+P.S. What I meant with end-to-end machine learning is that I should be able to load data, process it, and give it to the model for training all the while without leaving the GPU-memory. You can think of a Jupyter-notebook, but we call the cells with rest API.
+",382445,,,,,1/4/2021 22:44,Best approach for developing a stateful computation-heavy application with a rest-api interface using python?,,1,0,,,,CC BY-SA 4.0
+420662,1,,,1/5/2021 0:17,,2,599,"I've read a number of conflicting articles as to whether microservices should share a database. How and when should microservices communicate?
+Someone posed the example of 2 microservices:
+
+- Employee
+- Department
+
+Suppose the Employee microservice needs information about a department.
+
+- Should these microservices share a database?
+- Should they communicate over REST?
+- Should they duplicate data in each of their own databases?
+
+",380999,,319783,,1/5/2021 4:07,1/5/2021 12:24,"In a micro service architecture, how should two services communicate with each other? Shared database? REST calls?",,3,4,,,,CC BY-SA 4.0
+420663,1,420683,,1/5/2021 1:25,,2,162,"Let's say a process (P1) is asking for 100 MB of memory, and the RAM looks like this:
+[[50 MB free] [USED] [60 MB free] [USED]]
+
+Since there are technically enough memory that are available (110MB free), what would happen? According to some sources I saw online, the OS will just refuse to allocate the memory, but then again isn't Linux only supposed to throw a memory error when there aren't enough memory?
+Thanks
+",375680,,,,,1/6/2021 19:50,"Can the operating system ""break up"" a memory allocation (Linux)?",,2,3,1,,,CC BY-SA 4.0
+420665,1,420667,,1/5/2021 1:41,,5,822,"I wrote this valid piece code, which made me wonder if there was a name for it:
+public class GenericObject<T> {
+ public T Obj { get; set; }
+}
+public class DerivedClass: GenericObject<DerivedClass> { }
+
+This leads to the capability of:
+var x = new DerivedClass();
+x.Obj = x;
+x.Obj.Obj = x;
+x.Obj.Obj.Obj = x;
+// ...
+x.Obj.Obj.Obj.Obj.Obj.Obj.Obj.Obj.Obj.Obj...Obj = x;
+
+Which is sure to raise a lot of eyebrows depending on the use case.
+
+Is there a name for this? If so, what is it called, and what is a practical application?
+",319749,,319749,,1/5/2021 18:56,1/5/2021 18:56,Is there a name for this construct with generics?,,2,5,,,,CC BY-SA 4.0
+420666,1,,,1/5/2021 1:49,,1,147,"I am working on a project which connects to different data sources and fetches data. The problem is each of these data source needs different parameters to fetch the data
+s3 = S3(ACCESS_KEY, SECRET_KEY, BUCKET, HOST)
+db = DB(HOST, USERNAME, PASSWORD, DB_NAME, SCHEMA)
+sftp = SFTP(HOST, USERNAME, PASSWORD)
+
+
+The fetch data function also a different signature
+s3.fetch_data(folder_path, filename)
+db.fetch_data(table_name, filter_args)
+sftp.fetch_data(file_path)
+
+How to design a common interface that can stream data from and to any of the above data sources(defined dynamically via a config). Is there a design pattern that addresses this problem.
+I have looked into strategy pattern but I assume that it applies to cases where the behavior changes but the is-a
relationship prevails.
+Incase of repository pattern there needs to be a common object on multiple storage
+Both cases doesn't apply here
+",285945,,285945,,1/5/2021 2:29,1/6/2021 14:49,Creating an interface that connects to different data sources,,2,3,,,,CC BY-SA 4.0
+420669,1,,,1/5/2021 3:13,,2,1145,"We have a service (lets say FileService) that provides API to store/download files into various stores (S3/local file etc.,)
+We have a scenario where one microservice (Service A) writes a file to S3 via FileService and the same file needs to be read by another microservice (Service B). In an ideal scenario, Service A will have an API exposed to read/download this file; Service B can leverage this API and be able to read it.
+However, due to the file(s) being huge, we wanted to see if its OK to have Service B read/download the file directly via FileService. (Given that Service A agrees that its fine to provide read access to Service B)
+In this case as the data being shared across microservices is files, is this an acceptable pattern ? Do we foresee any issues with this approach ?
+",382462,,,,,1/16/2021 13:57,Share files between microservices,,2,0,1,,,CC BY-SA 4.0
+420673,1,,,1/5/2021 6:22,,2,40,"Tech Stack: I am using MySQL 8 with InnoDB engine. Application is built using Java with Spring & Hibernate. Database is hosted in AWS RDS and web apps in EC2.
+I have two primary tables for handling orders:
+
+- Orders (Rows Count = 1,294,361)
+- Orders_Item (Rows Count = 2,028,424)
+
+On peak days, my storefront generates orders at a rate of 30 orders per minute and each order information is written primarily in above mentioned tables.
+I have a separate project for OMS (Order Manager System) which lookup in same tables to provide me list of pending orders, changing their status, fulfillment etc. With this order generation rate, it usually causes slowdown of OMS order list page. Also I have a CMS (Customer Management System) which also lookup in same tables for handling customer queries related to their orders.
+Hence, these two tables are used at a very high rate that causes slowdown on one or other place. I am using best possible indexes in these tables.
+I am thinking of below mentioned solution:
+
+- Maintain duplicate orders data, one will serve for CMS and new order creation as a master and duplicate will server for OMS
+
+But I am not sure is this a right approach. Please share your inputs.
+",52534,,52534,,1/5/2021 6:34,1/5/2021 6:34,Scaling Order Management System for ecommerce,,0,1,,,,CC BY-SA 4.0
+420676,1,420687,,1/5/2021 9:30,,-4,663,"Command Query Responsibility Segregation and Model–view–controller patterns look pretty similar to me.
+Are they comparable? Do they act at the same layer of abstraction? How do they differ? Can they be used together or one replaces the other? What am I missing?
+",382469,,,,,1/5/2021 13:47,Comparison of CQRS and MVC,,1,4,,,,CC BY-SA 4.0
+420682,1,,,1/5/2021 12:25,,-1,45,"Assume that I am implementing a method that takes a data source in a system. Assume that it's a multi-tenant system, so a data source belongs to an organization (as other relevant entities, like users/datasets/etc). There are two ways to go for it:
+
+getDataSource(dataSourceName, organizationId)
+getDataSource(organizationId, dataSourceName)
+
+Which one is better, and why?
+",22047,,,,,1/5/2021 12:38,Method parameter ordering,,1,0,,1/5/2021 13:02,,CC BY-SA 4.0
+420685,1,,,1/5/2021 13:32,,0,35,"I have been searching for an answer in this topic but I haven’t been able to find a satisfactory one like in other topics, where the consensus is solid.
+The situation
+To keep things simple: I am implementing a custom Dependency Injection Container into one of my current projects (I know, I should use an already built one, but I’m doing it with learning purposes; so answers like ‘use this func of that container…’ are not useful) and I’ve stumbled a problem with instantiation of new elements inside a collection.
+The problem
+Imagine that I have a complex object, for example a car. This car has several dependencies (engine, axis, seats, airbags…) that have, at the same time, their own dependencies, and so on. It is not a big issue to make the DiC (via autowiring or using a config file) build the object graph and inject all the dependencies with a simple line of code like:
+$car = $container->get(‘car’);
+
+The problem arrives when I build a CarCollection, which is a simple class that wraps an array of cars. The issue comes when I try to use a method that populates the collection with all the cars that exist in the database. It’s obvios that the collection should be able to create the Car objects on the fly when we call the “getAll” method from the database. The code would be something like this:
+public function populate(array $filters) {
+ $all_data = $this->dao->getAll($filters); // Call the data access object to query all cars.
+ foreach($all_data as $data) {
+ $new_object = $this->container(‘car’); // create a template object
+ $new_object->setData($data); // set the info.
+ $this->items[] = $new_object; // Add to the collection.
+ }
+}
+
+If car was not such a complex object it would be easier, because I could pass the car fqcn as a parameter for carCollection and use it in every iteration. But that’s not possible for a very complex object (or if I want to instantiate different sub types of the object - for example: lorry, pick-up, van…- depending on information from the database).
+The question.
+Regarding the collection being aware about the container: does not it break the purpose of the DIC phylosophy?
+I guess not on one side, because I am using PSR\Container to type hint the container I pass to the collection (which loosens the coupling). But it breaks the idea that the container should not be coupled with the domain model at all.
+The only alternative that I have thought about is substituting the creation of one new object for each iteration with a cloning from a prototype object that lives in the collection as a property. But we all know cloning in php can get really tricky and very difficult to debug (Or worse: very difficult to even know that there is a problem going on).
+Similar issue.
+PS: I have the same problem when I try to do lazy loading using Porxy objects: I need the proxy objects to have access to the container if I want to instantiate the full object later, which also breaks the principles of a DiC.
+Thank you all.
+",382485,,,,,1/5/2021 13:32,Dependency Injector and Collections,,0,3,,,,CC BY-SA 4.0
+420686,1,,,1/5/2021 13:44,,2,1874,"In a typical Java Spring Web APP:
+we have the following layers:
+Model [DB Models]
+Repositories [where you have queries to DB]
+Services [Business service where you have the @Transactional annotation]
+Controllers [Rest endpoints]
+So for a simple model e.g Car
+@Entity
+Car {
+ Long id;
+ String name;
+ @ManyToOne(fetch = FetchType.LAZY) // Notice lazy here
+ Engine engine;
+}
+
+CarRepo extends JpaRepository {....}
+
+@Transactional
+CarService {
+ ....
+}
+
+@RestController
+CarController{
+ @GET
+ public CarDto getCar(Long id) {
+ ???
+ }
+}
+
+??? : Here is the big dilemma, I use a mapstruct to convert objects to other formats, whenever I use it as in first following scenario I get LazyInitializationException
:
+Scenario#1 Get the model in controller (Which is not so good to do especially that models should be encapsulated from the view layer) and convert it to CarDto
+CarController{
+ @GET
+ public CarDto getCar(Long id) {
+ Car car= carService.getCar(id);
+ return carMapper.toCarDto(car); // BAM `LazyInitializationException`, on `Engine` field!!!
+ }
+ }
+
+But here the problem, When mapper starts to convert Engine
it will get LazyInitializationException
since transaction was already committed and closed in service and Engine
is lazy initialized
+That moves us to Scenario#2
+Ok so do the conversions in service then daa! while you still have the transaction opened, in service, so update the getCar
method to return a CarDto
instead:
+ @Transactional
+ CarService {
+ CarDto getCar(Long id) {.... return mapper.toCarDto(car);} // Hurrah!!, no lazy exceptions since there is a transnational context wrapping that method
+ }
+
+But here is another problem, for other services in that uses Car
suppose we have FactoryService
and we want to get a car by id so that we can assign it to a factory model so we will diffidently need the Car
model not the dto,
+FactoryService {
+ void createFactory() {
+ Factory factory = ....;
+ Car car = carService.getCarModel...
+ factory.addCar(car);
+ }
+}
+
+so simple solution to this is to add another method with different name but will return model that time in the CarService
+@Transactional
+ CarService {
+ CarDto getCar(Long id) {.... return mapper.toCarDto(car);}
+ Car getCarModel(Long id) {.... return car;}
+ }
+
+But as you can see it is now ugly! to have the same function twice with same logic only with 2 different return types, that will also lead to have aloooot of same type logic method across the services
+Eventually we have Scenario#3 and that is simply use Solution#1 but move the @Transactional annotation to the controller now we won't get the lazy exception when we use mapstruct in controller (But this is not very recommended thing to do since we are taking controlling transactions out of the service (business) layer)
+@Transactional
+CarController{
+ @GET
+ public CarDto getCar(Long id) {
+ ???
+ }
+
+ }
+
+So what would be the best approach to follow here
+",92003,,92003,,1/8/2021 14:48,1/11/2021 14:16,Best way to handle lazy models with mapstruct and spring transnational scope,,1,17,2,,,CC BY-SA 4.0
+420689,1,,,1/5/2021 13:58,,1,506,"In my applications users can perform actions on a few thousand aggregate root instances with a single click. The problem is that the UI is blocked for several seconds (~ 3) what feels too slow. So, I'm looking for way to improve the database operation.
+The respective entity class looks (simplified) like this:
+class InspectionPoint {
+ val id: InspectionPointId
+ val version: Short
+ val description: String
+ val maintenanceLevels: Set<MaintenanceLevelId>
+}
+
+The application uses JPA/Hibernate for persistence. The current behavior is that all aggregate roots are fetched from the database, updated by the application and then written back to the database with lots of update, delete, and insert statements. The flow is as follows:
+
+- Fetch all entities (aggregate roots) from the DB
+- n * update entity (increment version in this case)
+- n * delete
maintenanceLevels
from collection table
+- n * insert
maintenanceLevels
into collection table
+
+As you can see, there are lots of database statements.
+The question is how to speed it up. Since every aggregate root carries a version
attribute for optimistic concurrency control, it wouldn't be possible to just manipulate the collection table. But maybe this flow would work:
+Performing updates without loading entities
+
+- update all
InspectionPoint
rows with the given IDs directly in the database (increment version)
+- insert or delete rows in the collection table for
maintenanceLevels
, what would require to distinguish both operation in the client, public (HTTP) API, and in the application service.
+
+The main disadvantages are:
+
+- client, HTTP service, application service needs to be modified
+- domain logic in the entities gets completely bypassed
+- custom SQL is required what requires some work and makes maintenance harder
+
+Although the performance should be pretty good, there are some severe disadvantages, too.
+Do you have any other suggestion how to solve aggregate root bulk updates?
+",63946,,,,,3/6/2021 21:01,Bulk Update of DDD Aggregate Roots,,1,2,1,,,CC BY-SA 4.0
+420692,1,420741,,1/5/2021 14:20,,7,322,"I have implemented idempotent order placement (mostly to avoid accidental double submissions) but I am not sure how to handle incomplete operations. Example scenario:
+
+- User tries to place an order.
+- An order instance with status
PENDING_PAYMENT
is created in the DB.
+- Order payment succeeds (3rd party processor, supporting idempotence keys, e.g. Stripe).
+- My DB fails to update order status to
PAID
(e.g. it suddenly went down for a minute) and user receives some error.
+
+Since the whole operation is idempotent, it is safe to retry the operation, and some (most?) users would choose to do that.
+But what if the user abandons the operation?
+
+- I could implement a Completer process, which would push all incomplete operations through to completion. However, this might come as a surprise to the user.
+- I could combine the Completer with the assumption that it will eventually be able to successfully place the order, in which case I wouldn't even have to alert the user. However, in an odd case of a failure, I'd have an even more surprising outcome - the once successful order would now appear to be failed.
+
+Questions:
+
+- What are some ways of dealing with this situation?
+- What would the user typically expect?
+
+
+- 2.1. Should I let the user know exactly what happened (i.e. payment ok, status not ok), inform the user of a generic failure (something went wrong, please retry), or let them know nothing at all?
+- 2.2. If I inform the user of a generic error, they might decide to update their basket and then resubmit the order. I was thinking that the way to deal with this is to simply generate a fresh idempotence key and create a second order. What are the alternatives?
+
+Additional details:
+
+- I don't expect a high rate of failures, but I want to be prepared.
+- I am not dealing with big money or sensitive data - consider this a simple e-shop.
+
+Update
+I actually followed this article from Brandur Leach whilst implementing my idempotent operations, in case you're interested: https://brandur.org/idempotency-keys.
+I contacted Brandur directly regarding my problem and you can see what he had to say for yourselves: https://github.com/brandur/sorg/issues/268. The gist is that I should always push all operations to completion, which agrees with the answers here. I can then decide what to do with the result. There may be multiple ways of informing the user too.
+",315196,,315196,,1/7/2021 15:44,1/8/2021 3:50,How to deal with abandoned idempotent operations?,,3,3,1,,,CC BY-SA 4.0
+420701,1,420702,,1/5/2021 18:28,,0,69,"I'm practicing an exercise of use cases but it's hard for me know when it's ok or acceptable. The exercise it's about a system for sacrament of catholic churches and a part say
+The archbishopric requires that:
+1) All baptism and weddings realized in catholic churches at province be registered in a central database which can be accessed from any catholic church (for realize the online documentation controls at the moment of give a turn)
+2) Pcs of each church has to has internet connection. Each parish priest needs to have a user account with a password for access to system
+3) Baptismal certificate and proof of marriage must be able to being printed according the official format of each type of certificate and been handed it over with independency of church where sacrament was celebrated
+4) Know if there was a previous marriage and its actual state
+Clarification: if girlfriend or boyfriend has a previous marriage but it was annulled or it’s windowed then it’s authorized for a new marriage
+5) Know reasons by which marriage was annulated
+From this points I derivated this use cases:
+
+- Register Baptism, Consult Baptism, Register Marriages, Consult Marriages, Register Turn, Consult Turns, Modify Turns
+- Generate Sacrament Certificate
+- and 5) Consult Marriages
+
+and actor Parish Priest, so my diagrame it's:
+![]()
+But I'm not sure how modeling the users of Parish Priests in point 2) and the print of the certificate in point 3) and I would like know what it's your opinion about the derivated use cases as far. Any advice it would be greatly appreciated
+",382089,,382089,,1/5/2021 19:20,1/6/2021 6:16,Doubts for modeling with use cases,,2,0,,,,CC BY-SA 4.0
+420707,1,,,1/5/2021 19:58,,7,358,"I've started in this new company a few weeks ago, this is the CTO CI strategy:
+![]()
+Current: Developer team has the repo prod/master and they merge everything into master (no branching strategy).
+Once the code is ready in prod/master they'll ask Infrastructure team to start the deployment process which uses Jenkins.
+The Infrastructure team executes a job in Jenkins that performs this actions:
+
+- Clone the whole prod/master into build/master (so they don't mess with the developers)
+- Execute scripts to build the binary(ies)
+- Generate a .txt file with the version of the build
+- Commit and push this changes into build/master (reason: prepare the deployment)
+- Apply environment specific settings and push, configurations, binaries and code to distro/master
+
+
+We end up with three repos at the end of the day for each application,
+that means, if we have 10 applications we would have 30 repositories
+
+Reasons of the CTO for this:
+
+- prod/master: For developers and their code (no branching, only master)
+- build/master: For Infra team to generate versions (to prepare the deployment)
+- distro/master: Binaries + code + specific environment configurations (to perform rollbacks, traceability and havebackup)
+
+
+Cons:
+
+
+- Really complex process
+- Unnecesary large data ammounts in repositories and slower processing when performing deployments
+- Only works for FileSystem deployments (Databases are not considered in this sceneario and that kind of changes are manually performed)
+- No instant feedback for developers
+- Complexity when crossed patches/fixes and deployments
+- Developers are involved in the production deployment (quite often, in order to test and apply changes on hot)
+- Most of the deployments are performed directly into production
+
+
+Pros:
+
+
+- There's backup and posibility to rollback
+- Easy traceability (for rollbacks, not for development)
+- Specific configurations per environment are stored in the repos with the code and binaries
+
+And this is my approach:
+![]()
+
+- Developers create a JIRA ticket, which will be used as tag for the build and to create the branch
+- Developers will deploy and test in a Q.A/PRE-PROD environment
+- Once the code works, it will be integrated to master
+- Once integrated with master, the binary goes to a "binary repo like artifactory or other"
+
+
+Pros:
+
+
+- Traceability: The code deployed is easy to find through the tag (JIRA-XXX) for an specific build.
+- Rollback: Taking the binary from the repo (Artifactory)
+- One Repository per project, it means 10 projects are 10 repos, not 30.
+- Instant feedback to developers, if the deployment is not sucessful they can change their code
+- This design contemplates db scripts as hooks
+- The configurations per environment will be handled with Ansible + GIT, generating templates with placeholders and a backup of each configuration.
+
+
+Cons:
+
+
+- Re-educate developers to work in branches
+- Force developers to integrate code only when it really works
+- Change the CTO mindset only will happen through examples (working on it)
+- We must create new infra (new environments to create deployments and not going to production directly)
+- Lots of hours automating through hooks, rest apis
+- Need to implement new technologies
+
+I'd like to know the opinion of people with expertise on this git strategies and the balance between development and operations.
+Regards.
+H.
+",382505,,382505,,1/6/2021 17:16,1/6/2021 17:16,"The ""real and effective"" GIT CI/CD strategy",,2,2,2,,,CC BY-SA 4.0
+420710,1,,,1/5/2021 20:40,,1,167,"I recently found out about Domain Driven Design and I liked it. However, it is quite overwhelming and requires quite of expertise to get it right. For this reason, I wanted to try to model a simple domain using DDD and event storming.
+The domain is the following:
+The application allows publishers to publish small articles or books. Both publishers and users can browse, read, "pin" articles and "follow" more than one publisher to get notification about their new articles. Lets say that publishers are the same as users, with the additional functionality that they can publish.
+Users have free access. Publishers have only a subscription based access. Users have an hard limit on the number of articles they can pin. However, this limit can be increased by buying a subscription. Publishers have an hard limit on the number of articles they can publish. However, this limit can be increased by buying an advanced subscription.
+This is what I modelled till now and it is only a small part of it:
+![]()
+The Article Bounded Context contains a single aggregate Portfolio. The Portfolio holds the owner of the Portfolio, the created Articles entities and the ArticleQuotas ValueObject. To create an Article the Publisher has to go to the Portfolio, so that we can regulate the creation of new Articles. The Publisher can publish an Article of its Portfolio and the published Article will be visible in the PublishedArticle ReadModel. Finally, the PortfolioQuotas are regulated via events generated by the Subscription BoundedContext, by incrementing the PortfolioQuotas.
+At first I was tempted to separate the concept of Article and Quotas, but then there is the problem of the eventual consistency between the creation of an Article and the Exceed of Quotas.
+What I'm asking here is whether I'm going in the right direction and, otherwise, if you have some suggestions in modeling using Domain Driven Design.
+Thank you very much
+",382476,,,,,1/5/2021 20:40,Domain Driven Design Exercise,,0,0,,,,CC BY-SA 4.0
+420716,1,420773,,1/6/2021 2:02,,5,171,"I'm designing a microservices structure, and I'm facing some problems on how to place data on different microservices.
+For example, I have users that subscribe to plans and the plans have different features. For example, a user can download 10 items per month.
+So I'm thinking of building the following microservices:
+
+- User microservice: Maintain users data and the downloads
+- Plans microservice: Maintain plans data and the features each plan enables
+- Other microservices: Other microservices that may use the previous two to check permissions
+So when a user requests a new download, I need to get the user's actual amount of downloads and the user plan. And then check if the actual amount of downloads allows a new download based on the plan amount of download limit. If it's allowed, then I need to reach the users microservice to update the amount of downloads.
+
+When I'm thinking about this design, I'm not sure of the following:
+Where should I store what plan a user has (Users vs Plans microservices)?
+Should the microservices communicate with HTTP?
+Thanks
+",382480,,379622,,1/6/2021 2:29,1/7/2021 22:49,Microservices shared data,,2,0,1,,,CC BY-SA 4.0
+420717,1,,,1/6/2021 2:54,,4,235,"I've been finding that for a lot of code I've been writing recently, it naively might look like this:
+Array approach:
+const options = [
+ {
+ id: 'red',
+ label: 'Red',
+ data: '#f00'
+ },
+ {
+ id: 'blue',
+ label: 'Blue',
+ data: '#00f'
+ }
+];
+
+
+And then something like, in say React context (but my question relates to programming generally):
+return <select>
+ {options.map((v) => <option key = {v.id} value = {v.id}>{v.label}</option>}
+</select>;
+
+Now the problem with storing the list of options as an array is that if you ever need to look up an options object by just an id, you have to do a full scan over the array like:
+function findOptionById(id) {
+ return options.find((v) => v.id === id);
+}
+
+Which doesn't seem particularly efficient (if this function is being called every render for example) and becomes particularly problematic when you have nested objects.
+So the alternative:
+Map approach:
+const options = {
+ red: {
+ id: 'red',
+ label: 'Red',
+ data: '#f00'
+ },
+ blue: {
+ id: 'blue',
+ label: 'Blue',
+ data: '#00f'
+ }
+};
+
+Mapping over it:
+Object.values(options).map((v) => <options key={v.id} value={v.id}>{v.label}</option>)
+
+Finding an item in the list:
+function findOptionById(id) {
+ return options[id]
+}
+
+Faster lookup (I believe? or am I wrong in the context of javascript specifically?), and has the added advantage of enforcing some kind of ID uniqueness which is in my scenarios is always necessary.
+My question(s)
+It seems to me that in a scenario where 'You have a list of items, and they have some kind of unique key' then a map is always (or usually) advantageous to use.
+However, from a code readability and 'the data structures make sense' POV, using arrays seems more intuitive.
+That is for example, if I am creating a RadioList component and I'm saying 'it has an options
property which is a list of items containing id, label, and data', then declaring this type as an array it's a lot more obvious to the user what the meaning of this property is.
+Is there some kind of term or concept in software engineering that considers when an array should be used vs a map?
+Edit: Although I've mentioned performance, it's not really my main concern. My main concern is around the ease of use of this list object, inserting, removing, looking up items etc.
+",109776,,54480,,1/10/2021 19:22,1/10/2021 19:22,Arrays vs Maps for listing items that have a unique id,