image_url
stringlengths
113
131
tags
sequence
discussion
list
title
stringlengths
8
254
created_at
stringlengths
24
24
fancy_title
stringlengths
8
396
views
int64
73
422k
https://www.mongodb.com/…1_2_1024x567.png
[ "node-js", "mongoose-odm" ]
[ { "code": "const express = require(\"express\");\nconst bodyParser = require(\"body-parser\");\nconst crypto = require(\"crypto\");\nconst nodemailer = require(\"nodemailer\");\n\nconst app = express();\nconst port = 8000;\nconst cors = require(\"cors\");\napp.use(cors());\n\napp.use(bodyParser.urlencoded({ extended: false }));\napp.use(bodyParser.json());\n\nconst jwt = require(\"jsonwebtoken\");\n\nconst mongoose = require(\"mongoose\");\nmongoose\n .connect(\"mongodb://192.168.29.166/ecommerce-project\", {\n useNewUrlParser: true,\n useUnifiedTopology: true,\n })\n .then(() => {\n console.log(\"connected to mongodb\");\n })\n .catch((err) => {\n console.log(\"error connecting to mongodb\", err);\n });\n\napp.listen(port, \"192.168.29.166\", () => {\n console.log(\"server is running on port 8000\");\n});\n\nconst User = require(\"./models/user\");\nconst Order = require(\"./models/order\");\n\n//function to send verification email to the user\n\nconst sendVerificationEmail = async (email, verificationToken) => {\n // create a nodemailer trasnsport\n\n const trasnsporter = nodemailer.createTransport({\n //configure the email service\n service: \"gmail\",\n auth: {\n user: \"test@gmaill.com\",\n pass: \"ktyhihvqrwmqcpex\",\n },\n });\n\n //compose the email message\n const mailOptions = {\n from: \"amazon.com\",\n to: email,\n subject: \"Email Verification\",\n text: `Please click the following link to verify your email : http://localhost:8000/verify/${verificationToken}`,\n };\n\n // send the email\n try {\n await trasnsporter.sendMail(mailOptions);\n } catch (error) {\n console.log(\"Error sending verification email\", error);\n }\n};\n\n//endpoint to register in the app\n\napp.post(\"/register\", async (req, res) => {\n try {\n const { name, email, password } = req.body;\n\n //Chech if The email is already registered\n\n const existingUser = await User.findOne({ email });\n\n if (existingUser) {\n return res.status(400).json({ message: \"Email already Registered\" });\n }\n\n //create a new User\n const newUser = new User({ name, email, password });\n\n //generate and store the verification token\n newUser.verificationToken = crypto.randomBytes(20).toString(\"hex\");\n\n //save the user to the database\n await newUser.save();\n\n //send verification email to the user\n sendVerificationEmail(newUser.email, newUser.verificationToken);\n } catch (error) {\n console.log(\"error registering user\", error);\n res.status(500).json({ message: \"Registration failed\" });\n }\n});\n\n//endpoint tp verify the email\n\napp.get(\"/verify/token\", async (req, res) => {\n try {\n const token = req.params.token;\n\n //FInd the user with the given verification token\n const user = await User.findOne({ verificationToken: token });\n\n if (!user) {\n return res.status(404).json({ message: \"Invalid verification token\" });\n }\n\n //Mark the user as verified\n user, (verified = true);\n user.verificationToken = undefined;\n\n await user.save();\n\n res.status(200).json({ message: \"Email verified\" });\n } catch (error) {\n res.status(500).json({ message: \"Email veridication failed\" });\n }\n});\nconst mongoose = require(\"mongoose\");\n\nconst userSchema = new mongoose.Schema({\n name: {\n type: String,\n required: true,\n },\n email: {\n type: String,\n required: true,\n unique: true,\n },\n password: {\n type: String,\n required: true,\n },\n verified: {\n type: Boolean,\n default: false,\n },\n verificationToken: String,\n addresses: [\n {\n name: String,\n mobileNo: String,\n houseNo: String,\n street: String,\n landmark: String,\n city: String,\n country: String,\n postalCode: String,\n },\n ],\n orders: [\n {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"Order\",\n },\n ],\n createdAt: {\n type: Date,\n default: Date.now,\n },\n});\n\nconst User = mongoose.model(\"User\", userSchema);\n\nmodule.exports = User;\n\n{\n \"name\": \"api\",\n \"version\": \"1.0.0\",\n \"description\": \"backend\",\n \"main\": \"index.js\",\n \"scripts\": {\n \"start\": \"nodemon index.js\",\n \"test\": \"echo \\\"Error: no test specified\\\" && exit 1\"\n },\n \"author\": \"\",\n \"license\": \"ISC\",\n \"dependencies\": {\n \"body-parser\": \"^1.20.2\",\n \"cors\": \"^2.8.5\",\n \"express\": \"^4.18.2\",\n \"jsonwebtoken\": \"^9.0.1\",\n \"mongoose\": \"^7.4.3\",\n \"nodemailer\": \"^6.9.4\",\n \"nodemon\": \"^3.0.1\"\n }\n}\n\n", "text": "I’m new to MERN stack. while I was trying to do a simple register endpoint, I’m getting the below error in the command prompt, can someone try to help me out of this please, Thanks in advanceScreenshot of my terminal\ncmd1070×593 35.3 KB\nindex.jsuser.jspackage.json", "username": "Vinothagan_J" }, { "code": "network access", "text": "Hi @Vinothagan_J , welcome to the community.You are having a problem connecting with your database.If you are using the Atlas to create a cluster, probably you forgot to configure the network access from your project.If you are using another server, you need to ensure that it’s able to connect from your machine.", "username": "Jennysson_Junior" }, { "code": "", "text": "@Jennysson_Junior Thanks for you reply. but as I said I totally new to this.I’m still not sure, whether I’m connected or not.I have attached my Screenshots, if that helps, correct If I’m wrong at anything.\nMicrosoftTeams-image (6)1920×924 53 KB\n\nconnecttab1920×939 95.3 KB\nKindly help, Thanks in advance.", "username": "Vinothagan_J" }, { "code": "mongoose\n .connect(YOUR_CONNECTION_STRING, {\n useNewUrlParser: true,\n useUnifiedTopology: true,\n })\n", "text": "connecttab1920×939 95.3 KBHere you can see your connection string in step 3. Look that you must complete the connection string with the password of your user db.You will use this connection on your code to connect with your cluster. Something like", "username": "Jennysson_Junior" }, { "code": "", "text": "@Jennysson_Junior Thanks for your reply,As you can see I have added my connection string as like you have mentioned.\nimage897×344 9.62 KB\nStruglging with this connection issue for a week now ", "username": "Vinothagan_J" }, { "code": "", "text": "Have solved the issues please, I’m having the same issue.", "username": "Anas_Backend_Engineer" }, { "code": "", "text": "@Vinothagan_J Have solved the issues please, I’m having the same issue.@Vinothagan_J Have solved the issues please, I’m having the same issue.", "username": "Anas_Backend_Engineer" }, { "code": "", "text": "No, I didn’t find the solution either.", "username": "Vinothagan_J" }, { "code": "", "text": "I am also having this issue i can’t find a fix it. another here got a solution?", "username": "Its_Aqua" } ]
Error registering user MongooseError: Operation `users.findOne()` buffering timed out after 10000ms at Timeout
2023-08-20T08:08:18.026Z
Error registering user MongooseError: Operation `users.findOne()` buffering timed out after 10000ms at Timeout
1,129
null
[ "python" ]
[ { "code": "my_cursor = my_collection.find_one()\nprint (json.dumps(my_cursor))\nTypeError: Object of type datetime is not JSON serializable\nl_cursor = json.loads(bson.json_util.dumps(my_cursor))\nprint (json.dumps(l_cursor, indent=2))\n\"date\": { \"$date\": \"2019-01-20T05:00:00Z\"}\n\"key\": \"value\"\"key\": {\"$value type\": value}", "text": "Hi all,sorry for my ignorance, I’m quite new to MongoDB and making my first steps.\nI have a task to read all documents from a MongoDB collection as straight-forward JSONs, using python.Assuming my connection works (it does), if i simply read the documents as-is and try to print them, python is throwing this type of exception:…What I have found is that I need to use the bson module to dump the data:It works, but the representation is a little bit unusual:I have found different workarounds, suggesting custom encoders, lambda functions and so on.My question: how do I get a representation \"key\": \"value\" instead of \"key\": {\"$value type\": value} in a more generic way? Without writing an encoder for all possible BSON data types, hopefully.Thank you and apologies for my ignorance once again.Best,\nMichael", "username": "lynyrds" }, { "code": "my_document = my_collection.find_one()\nprint( my_document )\n", "text": "From an example I found I gather that you might simply have toI renamed my_cursor from your code as my_document because find_one() returns a single document rather than a cursor. The find() method would return a cursor.", "username": "steevej" }, { "code": "'date': datetime.datetime(2018, 12, 27, 5, 0)json.loads()find_one()", "text": "You’re right, but then the BSON types are returned like this: 'date': datetime.datetime(2018, 12, 27, 5, 0)\nThat’s why json.loads() can’t serialise that.\nAnd it’s not really human-readable.I was using find_one() to simplify the example.", "username": "lynyrds" }, { "code": "", "text": "In this SO thread they mention to use default=str as an option.", "username": "steevej" }, { "code": "datetimeDecimal128mongoexport", "text": "That, i found too.\nIt looks better, of course.\nBut still the date should be ISO conform, an integer should remain an integer and a float should be a float, not a string.\nI wrote a basic encoder for the datetime and Decimal128, but it’s 20 BSON data types, give or take.\nI mean, there’s mongoexport tool which seems to do the job correctly, so I know it’s possible.", "username": "lynyrds" }, { "code": "\"date\": { \"$date\": \"2019-01-20T05:00:00Z\"}\n", "text": "This is above my little knowledge of python.But as for normal JSON, there is no Date format, no Decimal128 and no difference between integer and float. The official JSON types are array, object, string, number, boolean and null. BSON has a richer data type system and this is why EJSON was brought to life, that is why you get:", "username": "steevej" }, { "code": "\"key\": \"value\"\"key\": {\"$value type\": value}docs = list(coll.find())\n\n# To encode as JSON:\ndocs_as_extended_json = bson.json_util.dumps(docs)\n\n# To decode the JSON back to python/pymongo objects:\ndocs_decoded = bson.json_util.loads(docs_as_extended_json)\nassert docs_decoded == docs\njson.loadsbson.json_util.loads", "text": "My question: how do I get a representation \"key\": \"value\" instead of \"key\": {\"$value type\": value} in a more generic way? Without writing an encoder for all possible BSON data types, hopefully.My suggestion is to commit using MongoDB Extended JSON wherever possible. For example:Using MongoDB Extended JSON should make your life easier because it is crossplatform and supports encoding/decoding all the BSON types. The problem with your initial attempt is that you used json.loads to decode the JSON instead of bson.json_util.loads.", "username": "Shane" } ]
Reading "normal" JSON with pymongo from a collection
2023-11-10T10:46:43.535Z
Reading “normal” JSON with pymongo from a collection
104
null
[ "ops-manager" ]
[ { "code": "", "text": "Hello,I just installed ops manager 6 and deployed a new mongodb instance running in a Rocky Linux 9 distro. But my deployment never finishes. The error log show the following issue:Error loading desired cluster configs : [01:16:29.245] Error retrieving cluster config from ‘http://:8080/agents/api/automation/conf/v1/654a780e890175651ead6d6d?av=12.0.28.7763&aos=linux&aa=x86_64&ab=64&ad=&ah=l&ahs=&at=1699387830756’ : [01:16:29.245] Cluster config did not pass validation for pre-expansion semantics : MongoDB Tools download URL for this host was not found in the list of available URLS : [ {100.9.0 map[linux:map[amazon2:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-amazon2-x86_64-100.9.0.tgz amzn64:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-amazon-x86_64-100.9.0.tgz arm64_amazon2:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-amazon2-aarch64-100.9.0.tgz arm64_rhel82:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel82-aarch64-100.9.0.tgz arm64_ubuntu2204:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu2204-arm64-100.9.0.tgz debian10:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-debian10-x86_64-100.9.0.tgz debian11:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-debian11-x86_64-100.9.0.tgz debian81:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-debian81-x86_64-100.9.0.tgz debian92:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-debian92-x86_64-100.9.0.tgz ppc64le_rhel7:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel71-ppc64le-100.9.0.tgz ppc64le_rhel8:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel81-ppc64le-100.9.0.tgz rhel57:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel62-x86_64-100.9.0.tgz rhel62:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel62-x86_64-100.9.0.tgz rhel7:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel70-x86_64-100.9.0.tgz rhel80:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel80-x86_64-100.9.0.tgz rhel90:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel90-x86_64-100.9.0.tgz s390x_rhel7:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel72-s390x-100.9.0.tgz suse11:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-suse12-x86_64-100.9.0.tgz suse12:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-suse12-x86_64-100.9.0.tgz suse15:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-suse15-x86_64-100.9.0.tgz ubuntu1604:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu1604-x86_64-100.9.0.tgz ubuntu1804:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu1804-x86_64-100.9.0.tgz ubuntu2004:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu2004-x86_64-100.9.0.tgz ubuntu2204:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu2204-x86_64-100.9.0.tgz] osx:map[default:http://:8080/automation/mongodb-releases/hybrid/macos/mongodb-database-tools-macos-x86_64-100.9.0.zip] windows:map[default:http://:8080/automation/mongodb-releases/hybrid/windows/mongodb-database-tools-windows-x86_64-100.9.0.zip]]} ]I installed the package redhat-lsb-release which mongodb documentation mentionsWhat could be ?", "username": "Jose_Felipe_Goncalves_Rocha" }, { "code": "", "text": "Hi @Jose_Felipe_Goncalves_Rocha,\nIt appears that the agent cannot determine the correct operating system to download the database tools, however, it is sincerely not clear to me on how the situation can be solved. do you have any suggestions @chris @Kushagra_Kesav?Best Regards", "username": "Fabio_Ramohitaj" }, { "code": "", "text": "Is that log verbatim or has it been redacted? Because the host portion of the URLs is missing.", "username": "chris" }, { "code": "", "text": "I removed the hostnames for security reasons.An update: I had this issue in a Rocky Linux 9. Then I downgraded the SO to version 8 and everything works fine.", "username": "Jose_Felipe_Goncalves_Rocha" }, { "code": "", "text": "Good to hear, Ops Manager 6.0.20 (Nov 03 2023) adds support for RHEL 9 for both Ops Manager and Managed deployments.", "username": "chris" } ]
MongoDB Tools and Rocky Linux
2023-11-09T04:37:58.121Z
MongoDB Tools and Rocky Linux
114
null
[ "dot-net" ]
[ { "code": "BsonSerializer.RegisterSerializer(new GuidSerializer(GuidRepresentation.Standard));return await (await _collection.FindAsync(x => x.id == id)).FirstOrDefaultAsync();BsonDefaults.GuidRepresentation = GuidRepresentation.Standard;", "text": "Supposedly this topic was beaten to death but no.\nHave this, first thing in my code,\nBsonSerializer.RegisterSerializer(new GuidSerializer(GuidRepresentation.Standard));Insert happens fine, i see in Atlas _id: UUID(‘9ad10b5b-50b4-4e35-bd26-393be728215c’)\nThe LINQ search is not finding it. It comes back with null when id is Guid(‘9ad10b5b-50b4-4e35-bd26-393be728215c’)\nreturn await (await _collection.FindAsync(x => x.id == id)).FirstOrDefaultAsync();Everything works as soon as i add\nBsonDefaults.GuidRepresentation = GuidRepresentation.Standard;But it says it’s obsolete.", "username": "George_Ter-Saakov" }, { "code": "", "text": "I also have this issue", "username": "pierre-luc_des" } ]
C#, GUID and MongoDB
2023-07-29T22:03:33.987Z
C#, GUID and MongoDB
504
null
[]
[ { "code": "", "text": "Hello,We are trying to setup mongodb atlas monitoring integration provided through new relic and want to understand the volume of metrics being collected and feed to new relic. This will help us understand the potential cost from new relic as they charge based on the amount of data being sent. Looking for thoughts from the community if anyone has implemented the same in their environment.", "username": "Ammara_Sheikh" }, { "code": "", "text": "Hi @Ammara_SheikhThank you for your post and for being a part of the MongoDB community! The New Relic integration with MongoDB Atlas uses a Prometheus exporter that scrapes the MongoDB process.The exact payload size being sent to New Relic will depend on your environment. However, assuming you have a project with a single 3-node replica set, we would be sending around 110 KB every 10 seconds by default. This would equate to around 29GB per month if you wanted 10 second metrics resolution.You can further reduce the amount of data being sent by modifying the scrape_interval in the prometheus config file.I hope this helps answer your question! Please let me know if you have any other questions.Thanks!\nFrank", "username": "Frank_Sun" } ]
New relic integration for mongodb atlas metrics volume estimate
2023-11-03T13:50:03.215Z
New relic integration for mongodb atlas metrics volume estimate
164
null
[ "node-js" ]
[ { "code": "", "text": "I want to use Mongo Atlas as my DB on my application, but I am facing an issue:I am using a Cloudflare Proxy. So anytime there is a new request to my website, the IP changes. So, how can I secure the access to my DB without having a unique IP address to access from?I’ve tried to add to the whitelist all the Cloudflare IPs, but Mongo does not accept IpV6 ranges. Is there any way to restrict the access by domain? Any other solution to have secure connection to MongoDB in my case?", "username": "Javier_Alvarez1" }, { "code": "", "text": "Note that Atlas maintains a posture of security in depth, requiring database authentication and TLS network encryption on top of the firewall: so you may consider opening up a wider public IP block", "username": "Andrew_Davidson" } ]
How to secure access to Mongo DB Atlas
2023-11-10T08:00:53.888Z
How to secure access to Mongo DB Atlas
75
null
[]
[ { "code": "", "text": "Hello,Has anyone been successful is sending a message to AWS SQS using the JavaScript SDK v3? This is inside of an Atlas function.It simply will not work for me with a fifo queue.The same function has no issues interacting with S3.The error when trying to send an SQS message is that data must be a string or a buffer. (The data is a string)I know the code works though because identical code, package, etc. works in node 18.Thank you in advance!", "username": "Dima" }, { "code": "", "text": "Hi Dima,\nCan you share your code snippet and the full error message you’re seeing? Thanks!", "username": "mpobrien" }, { "code": "exports = async function () {\n const AWS_CONFIG = {\n credentials: {\n accessKeyId: context.environment.values['AWS_ACCESS_KEY_ID'],\n secretAccessKey: context.values.get('AWS_SECRET_KEY_VALUE'),\n },\n region: context.values.get(\"AWS_REGION\"),\n }\n return AWS_CONFIG\n }\nexports = async function(changeEvent) {\n\n const AWS_CONFIG = await context.functions.execute('aws_getConfig')\n\n const { S3Client, PutObjectCommand } = require('@aws-sdk/client-s3')\n const { SQSClient, SendMessageCommand } = require('@aws-sdk/client-sqs')\n\n const { S3_BUCKET_ETL_ENV, SQS_QUEUE_ETL_URLS } = context.environment.values\n const S3Bucket = context.values.get(\"S3_ETL_BUCKET\")\n \n if (changeEvent && (changeEvent.operationType == 'insert' || changeEvent.operationType == 'delete' || changeEvent.operationType == 'update')) {\n const s3 = new S3Client(AWS_CONFIG)\n const sqs = new SQSClient(AWS_CONFIG)\n \n const collection = changeEvent.ns.coll\n const SQSQueueUrl = SQS_QUEUE_ETL_URLS[collection.toLowerCase()]\n \n let objectsToPutInS3 = [], objectsToPutInS3ForSQS = []\n \n const fullStringBody = changeEvent.fullDocument ? JSON.stringify(changeEvent.fullDocument) : JSON.stringify(changeEvent.documentKey)\n \n const baseKeyName = S3_BUCKET_ETL_ENV + \"/\" + collection + \"/\" + changeEvent.documentKey._id + \"-\" + Date.now()\n \n if(fullStringBody) {\n const anObj = {\n Bucket: S3Bucket,\n Key: baseKeyName,\n Body: fullStringBody\n }\n objectsToPutInS3ForSQS.push(anObj)\n objectsToPutInS3.push(new PutObjectCommand(anObj))\n }\n \n const s3Promises = objectsToPutInS3.map(object => s3.send(object).then(data => {\n console.log('S3 put object result: ' + JSON.stringify(data))\n return data\n }))\n await Promise.all(s3Promises)\n\n const sqsMsgBody = JSON.stringify({\n operation: changeEvent.operationType,\n S3FilePartsOfJSONDocument: objectsToPutInS3ForSQS.map(object => {return {Bucket: object.Bucket, Key: object.Key}})\n })\n\n console.log(`SQSQueueUrl: ${SQSQueueUrl} and is of type ${typeof SQSQueueUrl}`)\n //\"https://sqs.us-east-1.amazonaws.com/1234567890/MongoAtlasETLUsersDev.fifo\" and is of type string\n\n console.log(`collection: ${collection} and is of type ${typeof collection}`)\n //\"users\" and is of type string\n\n console.log(`sqsMsgBody: ${sqsMsgBody} and is of type ${typeof sqsMsgBody}`)\n //something like: \"{\\\"operation\\\":\\\"update\\\",\\\"S3FilePartsOfJSONDocument\\\":[{\\\"Bucket\\\":\\\"bucket-name\\\",\\\"Key\\\":\\\"dev/users/5bb2377885432223fg-1690592434481\\\"}]}\" and is of type string\n\n console.log(`baseKeyName: ${baseKeyName} and is of type ${typeof baseKeyName}`)\n //something like: \"dev/users/5bb2377885432223fg-1690592434481\" and is of type string\n \n //Everything is fine up to here\n try {\n const sqsResult = await sqs.send(new SendMessageCommand({\n QueueUrl: SQSQueueUrl,\n MessageGroupId: collection,\n MessageBody: sqsMsgBody,\n MessageDeduplicationId: baseKeyName\n }))\n } catch (e) {\n console.log('Error writing to SQS: ' + e)\n //error is: \"TypeError: Data must be a string or a buffer\"\n }\n }\n }\n{\"dependencies\":{\"@aws-sdk/client-s3\":\"~3.378.0\",\"@aws-sdk/client-sqs\":\"~3.378.0\"}}\n", "text": "Hello Michael,Absolutely and thank you for responding.One function is for the aws config (credentials and region) in aws_getConfig.js:The function that tries to make the SQS call is triggered by a database trigger (it actually does both S3 and SQS - S3 works, SQS does not) - in Atlas_Triggers_ETL_Function:So, the full error message is \"“TypeError: Data must be a string or a buffer”The package.json looks like this:Thank you!", "username": "Dima" }, { "code": "", "text": "We arrived at this post as we are also facing the exact same problem with the V3 aws-sdk and SendMessageCommand.", "username": "Mark_Johnson" }, { "code": "", "text": "Hi Mark,Glad to know that I am not alone in this. Thanks for the reply.", "username": "Dima" }, { "code": "", "text": "As a result we are looking at EventBridge triggers instead, though looks like it will complicate the setup a little, particularly for your scenario.", "username": "Mark_Johnson" }, { "code": "", "text": "Hey @Dima @mpobrien any updates on this", "username": "Todd_Stephenson" } ]
AWS SDK v3 SQS SendMessage
2023-07-29T02:53:11.891Z
AWS SDK v3 SQS SendMessage
786
null
[ "aggregation", "compass" ]
[ { "code": "db.getCollection('dlsComponents').aggregate([\n { $match: { library: 'Library1', collection: 'Collection1', media: 'Images', object: 'Image3' } }\n ])\n{ _id: ObjectId(\"61fc458b46d7874a3a97ef79\"),\n library: 'Library1',\n collection: 'Collection1',\n media: 'Images',\n object: 'Image3',\n info: 'Image: 1/1/Images/Image3 Info', …\ndb.getCollection('dlsComponents').aggregate([\n { $match: { _id: { $eq: { $toObjectId: \"61fc458b46d7874a3a97ef79\" } } } }\n ])\n", "text": "Using Compass 1.30.1, I was testing an aggregation and getting unexpected results. A $match was not working as expected. The simplified aggregation is:And this gives the expected result by finding a document:try to get the same document by _id:does not find a document. Why does the second $match not find a document?", "username": "David_Camps" }, { "code": "", "text": "I found that:{ $match: { $expr: { $eq: [ ‘$_id’, ‘$$imageId’ ] } } }does work ($$imageId is an ObjectId used in the non-simplified aggregate). Maybe the { $eq: ‘$value’ } format does not work in pipelines.", "username": "David_Camps" }, { "code": "$eq$eq: ObjectId(\"...\")", "text": "Hi David,The $eq used in find()/$match (without $expr) must specify an exact value: https://docs.mongodb.com/manual/reference/operator/query/eq/You can use $eq: ObjectId(\"...\")Jess", "username": "jbalint" }, { "code": "{ $match: { $expr : { $eq: [ '$_id' , { $toObjectId: \"61fc458b46d7874a3a97ef79\" } ] } } }\n", "text": "Your last post made me think that may be $toObjectId works only inside $expr. I triedand it works.", "username": "steevej" }, { "code": "", "text": "See on a related topicand", "username": "steevej" }, { "code": "", "text": "even gpt4 doesn’t solve my problem, thank", "username": "Crown_International_Technology_Pvt_Ltd_CIT" }, { "code": "", "text": "Did you told GPT4 what was your problem? Or did you do like you did here., just saying that you have a problem.", "username": "steevej" } ]
Aggregate $match _id $eq $toObjectId not working
2022-02-06T16:10:18.497Z
Aggregate $match _id $eq $toObjectId not working
24,789
null
[ "atlas-device-sync", "android", "kotlin" ]
[ { "code": "override fun configureTheRealm() {\n if (user != null) {\n val config = SyncConfiguration.Builder(user, setOf(EmailAccount::class, ApplicationAccount::class))\n .initialSubscriptions { sub ->\n add(\n query = sub.query<EmailAccount>(\"ownerId == $0\", user.id),\n name = \"User's EmailAccounts\"\n )\n add(\n query = sub.query<ApplicationAccount>(\"ownerId == $0\", user.id),\n name = \"User's Application Account\"\n )\n }\n .log(LogLevel.ALL)\n .build()\n realm = Realm.open(config)\n Log.d(\"USERID\", \"$user\")\n }\n }\n", "text": "I am trying to configure the realm with the help of the below function. Initially, there was only EmailAccount class after adding ApplicationAccount class while writing to EmailAccount cluster it is working but for ApplicationAccount it is saying Cannot write to class ApplicationAccount when no flexible sync subscription has been created.How to solve this ??", "username": "Meet_Soni1" }, { "code": "realm.subscriptions.errorMessage", "text": "Hmm, that is a bit surprising. It could point to a problem with the subscriptions somehow.What is the output of realm.subscriptions.errorMessage?", "username": "ChristanMelchior" }, { "code": "", "text": "image1668×497 67.5 KB", "username": "Meet_Soni1" }, { "code": "", "text": "Is there anything I am missing? please help me", "username": "Meet_Soni1" } ]
Realm Error No Subscription for WRITE: When doing multiple subscriptions of different type
2023-11-07T14:26:49.413Z
Realm Error No Subscription for WRITE: When doing multiple subscriptions of different type
133
null
[ "aggregation", "compass" ]
[ { "code": "", "text": "Last week, we announced a new, intelligent query generation experience in MongoDB Compass. With the help of AI, you can generate queries and aggregations from natural language. This feature is now available in public preview.Please be sure to download the latest version of Compass to try it out. If you have any feedback, we’d love to hear it! Let us know what you think by posting in our feedback portal.You can learn more about this feature by reading our docs. To also learn about the other intelligent experiences for MongoDB announced last week, check out our blog.", "username": "Julia_Oppenheim" }, { "code": "", "text": "Hello! I successfully installed and utilized this feature on Windows. However, to my surprise, after downloading the 64-bit version for Mac, the option (feature preview) doesn’t appear in the settings menu.P.S.: I’m currently using Compass 1.40.4. Could you please assist me with this issue?", "username": "ricardohsmello" }, { "code": "", "text": "Hi @ricardohsmello - thanks for posting here. I can’t reproduce this. Would you mind sharing a screenshot of what you see? (Feel free to message me privately if you’d prefer). Also, do you see “Generate Query” in the query bar?", "username": "Julia_Oppenheim" }, { "code": "", "text": "Hi Julia,\nFirstly, thank you for responding to my message.\nThe \"Generate Query\"option does not show.Attached are the screenshots for your reference.FE0015E9-3A8B-46BE-9C3A-938AF2F1B65F2116×1686 247 KB\n9ACC4478-F0D2-4A58-BB57-0047334AEC452326×406 75.8 KB\n919929C4-BDCC-4781-807E-300E60D4C3001126×640 114 KB", "username": "ricardohsmello" }, { "code": "", "text": "Thank you, this is helpful! Two things may be happening:Please let me know if either of those points help.", "username": "Julia_Oppenheim" } ]
Announcement: Intelligent Query Generation in MongoDB Compass
2023-10-05T13:05:08.468Z
Announcement: Intelligent Query Generation in MongoDB Compass
326
null
[ "dot-net" ]
[ { "code": "", "text": "We are currently in a c# environment and wondering if we can implement the uuidv7 format in mongodb and also get the benefit from it so not just have the same behavior as uuidv4.There seem to be some way to do this:What would be the best way to implement this? If it’s actually possible as today…\nThank you!", "username": "pierre-luc_des" }, { "code": "IIdGeneratorBsonSerializer.RegisterIdGenerator(typeof(Guid), new UuidV7Generator());UuidV7Generator", "text": "Hi, @pierre-luc_des,Welcome to the MongoDB Community Forums. I understand that you want to use UUIDv7 with the .NET/C# Driver.Let’s start by talking about how UUID formats currently supported by the driver and MongoDB - UUIDv3 and UUIDv4. The main difference between UUIDv3 and UUIDv4 is that UUIDv4 specifies a consistent serialized byte ordering regardless of the language or endianness of your CPU whereas UUIDv3 does not. The main difference between UUIDv4 and UUIDv7 is the generation mechanism. Thus you could use UUIDv4 GUIDs in your application and register a new IIdGenerator that uses the UUIDv7 generation mechanism. You could then register it via BsonSerializer.RegisterIdGenerator(typeof(Guid), new UuidV7Generator()); wher UuidV7Generator is your generator.Hope that helps!Sincerely,\nJames", "username": "James_Kovacs" } ]
Is it possible to correctly use the UUIDv7 format in mongodb and all his benefit today? (c# environment)
2023-11-01T20:52:17.357Z
Is it possible to correctly use the UUIDv7 format in mongodb and all his benefit today? (c# environment)
180
null
[ "containers" ]
[ { "code": "", "text": "Hello,After configuring mongodb as StatefulSets with tls mode = requireTLS I get this NETWORK error:\n“c”:“NETWORK”, “id”:22588, “ctx”:“conn527”,“msg”:“Error receiving request from client. Ending connection from remote”,“attr”:{“error”:{“code”:141,“codeName”:“SSLHandshakeFailed”,“errmsg”:“The server is configured to only allow SSL connections”},“remote”:“100.62.8.86:44503”,“connectionId”:527}}\nThe error seems to come from internal communications between services and StatefulSets.\nI don’t make any request from specific client.\nWith tls mode= preferedTLS it works.\nPlease advice!", "username": "Myq" }, { "code": "", "text": "Seems something in your K8S (assume you use k8s as you mentioned stateful set) is using non-tls connection to your mongodb server.You need to figure what that something is and then try configuring it to use tls. (i’m not familiar with stateful set)", "username": "Kobe_W" }, { "code": "", "text": "Thank you Kobe.\nIndeed I use K8S. MongoDB server is started on TLS config.\nThe complete message is below:\n“c”:“NETWORK”, “id”:22942, “ctx”:“listener”,“msg”:“Connection accepted”,“attr”:{“remote”:“100.62.8.86:44503”,“connectionId”:13,“connectionCount”:1}}\n“c”:“NETWORK”, “id”:22986, “ctx”:“conn13”,“msg”:“Error receiving request from client. Ending connection from remote”,“attr”:{“error”:{“code”:141,“codeName”:“SSLHandshakeFailed”,“errmsg”:“The server is configured to only allow SSL connections”},“remote”:“100.62.8.86:44503”,“connectionId”:13}}\n“c”:“NETWORK”, “id”:22942, “ctx”:“conn13”,“msg”:“Connection ended”,“attr”:{“remote”:“100.62.8.86:44503” ,“connectionId”:13,“connectionCount”:0}}\nI have a service for each replica and a headless service(plus loopback 127.0.0.1). These services communicate (internally) with statefulset and throws these errors.\nBR", "username": "Myq" }, { "code": "tls=true", "text": "100.62.8.86:44503Is this one of the replicas , a service or another connection.Any connections other then the replica set members will need to transition to tls by adding tls=true to the connection string.", "username": "chris" }, { "code": "", "text": "Thank you Chris. I knew it. But where should I mention this tls=true? Is there any magic flag? Or should I make a new kind of chart?\nMy service looks like this:\nkind: Service\nmetadata:\nname: myname - internal\nspec:\nclusterIP: None\nports:\n- name: mongodb\nport: 27017\nselector:\nname: myname", "username": "Myq" }, { "code": "mongodb://username:password@host1,host2,host3/?tls=true", "text": "Hi @MyqSpecifying connection options may be specific to the application, refer to its documentation .The majority of applications will accept a connections string in the form:\nmongodb://username:password@host1,host2,host3/?tls=trueref:\nhttps://www.mongodb.com/docs/manual/reference/connection-string", "username": "chris" }, { "code": "", "text": "Yes, but this is available when I make a connection from a specific client (another app as you mentioned) to my mongodb replicaset. In my case the problem occurs when I start mongodb replicaset as a server , because of the internal services communications. Along with these errors, the PODs crashed (scales down).", "username": "Myq" }, { "code": "", "text": "Are you rolling this yourself or using the MongoDB K8S Operator?Along with these errors, the PODs crashed (scales down).The mongod logs / container logs are going to tell you exactly what is going wrong.", "username": "chris" }, { "code": "", "text": "oc logs mymongo-0 shows the above errors. Is there any logs more precisely?", "username": "Myq" }, { "code": "", "text": "That’s not an error, thats an info level log.You have stated that it is crashing, there will be logs to indicate why that is the case.", "username": "chris" }, { "code": "", "text": "These are the only errors I see. Maybe should I switch to debug error level. However seems to be a NETWORK error:\n“c”:“NETWORK”, “id”:22986, “ctx”:“conn13”,“msg”:“Error receiving request from client. Ending connection from remote”,“attr”:{“error”:{“code”:141,“codeName”:“SSLHandshakeFailed”,“errmsg”:“The server is configured to only allow SSL connections”},“remote”:“100.62.8.86:44503”,“connectionId”:13}}", "username": "Myq" }, { "code": "\"s\":\"I\"", "text": "Again that is not an error. This is logged at \"s\":\"I\" an info level log. It is logging the fact a client tried to connect without TLS. It is NOT going to crash a mongod.You will need to look deeper into your log, if there is a ‘crash’ logs of severity Error or Fatal are common.Otherwise the container logs may tell you what is going on.Are you rolling this yourself or using the MongoDB K8S Operator?If you’re rolling this yourself what does the healthcheck look like?ref:\nhttps://www.mongodb.com/docs/manual/reference/log-messages/#structured-logging", "username": "chris" }, { "code": "", "text": "I rolling by myself. Indeed 100.62.8.86:44503 was another service. I manage to scale down. However now I have the same error from loopback= 127.0.0.1 and the PODs crash after a while.\n“s”:“I”, “c”:“NETWORK”, “id”:22988, “ctx”:“conn37”,“msg”:“Error receiving request from client. Ending connection from remote”,“attr”:{“error”:{“code”:141,“codeName”:“SSLHandshakeFailed”,“errmsg”:“The server is configured to only allow SSL connections”},“remote”:“127.0.0.1:54800”\nHowever for a short period I can connect to the database (from a specific client). The TLS/Certificate seems to work fine.\nAlso I tried to replace bind_ip = 0.0.0.0 with everything else , except localhost/127.0.0.1. The error message disappear but the StatefulSet doesn’t work. I guess localhost/127.0.0.1 is mandatory.", "username": "Myq" }, { "code": "", "text": "I rolling by myselfIf you’re rolling this yourself what does the healthcheck look like?What have you configured for your probes?Are you aware of the MongoDB Community Kubernetes Operator ?", "username": "chris" }, { "code": "", "text": "Basically I use this readiness probe:\nports:\n- containerPort: 27017\nreadinessProbe:\nexec:\ncommand:\n- mongo\n- --eval\n- “db.runCommand({ ping: 1})”\ninitialDelaySeconds: 10\nperiodSeconds: 10\nsuccessThreshold: 1\nfailureThreshold: 3but only for “preferTLS” where everything goes well. For “requireTLS” I have this error:\nReadiness probe failed: MongoDB shell version v5 connecting to: mongodb://127.0.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb Error: network error while attempting to run command ‘isMaster’ on host ‘127.0.0.1:27017’ : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1\nIn this case mongo-1 and mongo-2 do not get to generate.\n(I know that MongoDB Community Kubernetes Operator is related only to the cloud. I’m working on prem)", "username": "Myq" }, { "code": "", "text": "(I know that MongoDB Community Kubernetes Operator is related only to the cloud. I’m working on prem)What leads you to this conclusion?Basically I use this readiness probe:If you update to TLS then this would also be required to use TLS. This is why your pod is being stopped(“crashing”), the healthcheck cannot connect.", "username": "chris" }, { "code": "", "text": "ld also be rIf I use this readiness probe:\nreadinessProbe:\nexec:\ncommand:\n- mongo\n- --tls\n- -u admin\n- -p MONGO_ADMIN_PASSWORD\n- --tlsCertificateKeyFile=/tlsFolder/mongodb.pem\n- --eval\n- “db.runCommand({ ping: 1})”\ninitialDelaySeconds: 10\nperiodSeconds: 10\nsuccessThreshold: 1\nfailureThreshold: 3\nI have this error:\nReadiness probe failed: MongoDB shell version connecting to: mongodb://127.0.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb {“t”:{“$date”:“2023-11-06T07:22:43.869Z”},“s”:“I”, “c”:“NETWORK”, “id”:5490002, “ctx”:“thread1”,“msg”:“Started a new thread for the timer service”} {“t”:{“$date”:“2023-11-06T07:22:43.879Z”},“s”:“E”, “c”:“NETWORK”, “id”:23256, “ctx”:“js”,“msg”:“SSL peer certificate validation failed”,“attr”:{“error”:“SSL peer certificate validation failed: self signed certificate”}} Error: couldn’t connect to server 127.0.0.1:27017, connection attempt failed: SSLHandshakeFailed: SSL peer certificate validation failed: self signed certificate : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1\nCommon Name (CN) is: localhost.\nMy .pem file is not located in /etc/mongo/ but in other path. Maybe this could be a problem.", "username": "Myq" }, { "code": "", "text": "SSL peer certificate validation failed: self signed certificateWhere is the CA file for this certificate? You’ll need to specify that on the command line too.Are you using client certificates in this deployment too or are you confusing options ?", "username": "chris" }, { "code": "", "text": "Yes seems to be a bug in the certificate.", "username": "Myq" } ]
TLS NETWORK error in mongo stateful sets
2023-10-23T17:08:23.922Z
TLS NETWORK error in mongo stateful sets
382
null
[]
[ { "code": "{\n \"$or\": [\n {\n \"operationType\": \"insert\"\n },\n {\n \"operationType\": \"update\",\n \"updateDescription.updatedFields\": {\n \"$not\": {\n \"$eq\": [\n {\n \"updatedAt\": \"$updateDescription.updatedFields.updatedAt\"\n }\n ]\n }\n }\n }\n ]\n}\n", "text": "I am setting up a database trigger for insert and update. The function this will trigger will either add createdAt & updatedAt or just update the updatedAt.ISSUE: This match expression is not filtering out the document.updates where updatedAt was the only field updated. This is causing an endless loop of triggers.expression ==", "username": "Todd_Stephenson" }, { "code": "{\n \"$or\": [\n {\n \"operationType\": \"insert\"\n },\n {\n \"operationType\": \"update\",\n \"updateDescription.updatedFields.updatedAt\": {\n $exists: false,\n }\n }\n ]\n}\n", "text": "Hi, I believe there is a bit of a misunderstanding in how the match expression works (also, I am not convinced it is valid synctax with the $eq). I think this should work, but let me know if it is not what you are looking for:", "username": "Tyler_Kaye" }, { "code": "exports = function(changeEvent) {\n const dbName = changeEvent.ns.db;\n const collectionName = changeEvent.ns.coll;\n const collection = context.services.get('mongodb-atlas').db(dbName).collection(collectionName);\n const currentDate = new Date();\n\n // Check the operation type\n if (changeEvent.operationType === 'insert') {\n // If it's an insert, set both createdAt and updatedAt\n collection.updateOne(\n { _id: changeEvent.documentKey._id },\n {\n $set: {\n createdAt: currentDate,\n updatedAt: currentDate\n }\n }\n )\n .then(result => {\n console.log(`Inserted document in ${collectionName} collection with createdAt and updatedAt fields.`);\n })\n .catch(error => {\n console.error('Error setting createdAt and updatedAt on insert:', error);\n });\n } else if (changeEvent.operationType === 'update') {\n // If it's an update, only set updatedAt\n collection.updateOne(\n { _id: changeEvent.documentKey._id },\n {\n $set: {\n updatedAt: currentDate\n }\n }\n )\n .then(result => {\n console.log(`Updated document in ${collectionName} collection with updatedAt field.`);\n })\n .catch(error => {\n console.error('Error setting updatedAt on update:', error);\n });\n }\n\n return;\n};\n", "text": "Hey @Tyler_Kaye , Thank you for the quick reply.edited: I didn’t have the trigger active, oops. Sorry!issue:\nOn insert it triggers the function 3 times. Here is my function:Screenshot 2023-11-09 at 1.08.29 PM2552×1508 262 KB", "username": "Todd_Stephenson" }, { "code": "", "text": "Sorry, having a hard time parsing if you are saying it worked or if it did not work?", "username": "Tyler_Kaye" }, { "code": "", "text": "Hey @Tyler_Kaye , I just edited my comment above. I didn’t have the trigger enabled lol", "username": "Todd_Stephenson" }, { "code": "", "text": "The update filter works perfectly! @Tyler_Kaye", "username": "Todd_Stephenson" }, { "code": "", "text": "It happens to us all sometimes Best,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "I just need help with no having 3 triggers on document insert. @Tyler_Kaye", "username": "Todd_Stephenson" }, { "code": "", "text": "I have to step out, but I am not sure what you mean?", "username": "Tyler_Kaye" }, { "code": "", "text": "When I insert a document the triggers gets executed 3 times then it stops. It should only get triggered once @Tyler_Kaye", "username": "Todd_Stephenson" }, { "code": "", "text": "Can you send a link to your trigger?", "username": "Tyler_Kaye" }, { "code": "", "text": "@Tyler_Kaye Here you go: App Services", "username": "Todd_Stephenson" }, { "code": "", "text": "Can you add to the printing to add the document’s _id to the statement (and possibly the updateDescription using EJSON.stringify()). I cant tell if something is going wrong or if there is just something inserting an object and then something updating an object", "username": "Tyler_Kaye" }, { "code": "exports = function(changeEvent) {\n const dbName = changeEvent.ns.db;\n const collectionName = changeEvent.ns.coll;\n const collection = context.services.get('mongodb-atlas').db(dbName).collection(collectionName);\n const currentDate = new Date();\n const documentId = changeEvent.documentKey._id; // Get the _id of the document\n\n // Import the EJSON module\n const EJSON = require('mongodb-extended-json');\n\n // Check the operation type\n if (changeEvent.operationType === 'insert') {\n const updateDescription = {\n $set: {\n createdAt: currentDate,\n updatedAt: currentDate\n }\n };\n\n // If it's an insert, set both createdAt and updatedAt\n collection.updateOne({ _id: documentId }, updateDescription)\n .then(result => {\n console.log(`Inserted document with _id: ${documentId} in ${collectionName} collection with createdAt and updatedAt fields. Update Description: ${EJSON.stringify(updateDescription)}`);\n })\n .catch(error => {\n console.error('Error setting createdAt and updatedAt on insert:', error);\n });\n } else if (changeEvent.operationType === 'update') {\n const updateDescription = {\n $set: {\n updatedAt: currentDate\n }\n };\n\n // If it's an update, only set updatedAt\n collection.updateOne({ _id: documentId }, updateDescription)\n .then(result => {\n console.log(`Updated document with _id: ${documentId} in ${collectionName} collection with updatedAt field. Update Description: ${EJSON.stringify(updateDescription)}`);\n })\n .catch(error => {\n console.error('Error setting updatedAt on update:', error);\n });\n }\n\n return;\n};\nexports = function(changeEvent) {\n const dbName = changeEvent.ns.db;\n const collectionName = changeEvent.ns.coll;\n const collection = context.services.get('mongodb-atlas').db(dbName).collection(collectionName);\n const currentDate = new Date();\n const documentId = changeEvent.documentKey._id; // Get the _id of the document\n\n // Check the operation type\n if (changeEvent.operationType === 'insert') {\n const updateDescription = {\n $set: {\n createdAt: currentDate,\n updatedAt: currentDate\n }\n };\n\n // If it's an insert, set both createdAt and updatedAt\n collection.updateOne({ _id: documentId }, updateDescription)\n .then(result => {\n console.log(`Inserted document with _id: ${documentId} in ${collectionName} collection with createdAt and updatedAt fields. Update Description: ${JSON.stringify(updateDescription)}`);\n })\n .catch(error => {\n console.error('Error setting createdAt and updatedAt on insert:', error);\n });\n } else if (changeEvent.operationType === 'update') {\n const updateDescription = {\n $set: {\n updatedAt: currentDate\n }\n };\n\n // If it's an update, only set updatedAt\n collection.updateOne({ _id: documentId }, updateDescription)\n .then(result => {\n console.log(`Updated document with _id: ${documentId} in ${collectionName} collection with updatedAt field. Update Description: ${JSON.stringify(updateDescription)}`);\n })\n .catch(error => {\n console.error('Error setting updatedAt on update:', error);\n });\n }\n\n return;\n};\n", "text": "@Tyler_Kaye Does this work? I couldn’t use EJSON , I got an error “Cannot find module ‘mongodb-extended-json’”. This is the code for the function I wrote with EJSONHere is the code that actually worked", "username": "Todd_Stephenson" } ]
Trigger Match Expression for updatedAt
2023-11-09T17:56:24.238Z
Trigger Match Expression for updatedAt
110
null
[]
[ { "code": "", "text": "I would like to make sure that db.CollectionX.fieldY is constrained to values in db.CollectionY. Essentially this is a schema enum, but the enum has to be drawn from another collection, not typed directly in the schema. (This would violate DRY in our application, since we also need these values elsewhere in the code.)", "username": "Eugene_Callahan1" }, { "code": "db.CollectionYdb.CollectionXdb.CollectionYdb.CollectionYdb.CollectionXraise NoEnum unless CollectionWhy.where({ value: value_to_test }).exists?\n", "text": "What will happen if the values in db.CollectionY get deleted?If the answer is along the lines of nothing should happen to db.CollectionX or the values in db.CollectionY don’t ever get deleted, the solution is to validate the expected value in db.CollectionY in the application layer (probably in controllers if the project structure mentioning has one) before writing to db.CollectionX.That is what I would do, given only the info you mentioned.", "username": "3Ji" }, { "code": "", "text": "We ALREADY validate the values in the application layer. The issue is, let’s say someone enters ‘Otters’ (in collections ‘Animals’) as ‘Reptiles’ (from collection ‘Classes’). Now we’d like someone using mongosh to be able to correct that to ‘Mammals’. They go to do that, but they mistype the Class as ‘Mamals’. (Humans will do such things!) It would be nice if the DB itself could block such a typo, since ‘Mamals’ is not a valid member of ‘Classes’.", "username": "Eugene_Callahan1" }, { "code": "", "text": "My message is packed in my question. You can’t just take the second half and ignore the first half.If the validations you are looking for do exist, working the way you wanted it to be, what should happen in the scenario described?Different situations call for different opinions; it is hard to satisfy everyone. Some want it the foreign-key-constrain way, some don’t. The foreign-key way has some more issues of its own. (The ones who don’t include me.)Like a lot of things in MongoDB, the database puts the decision power in the hands of applications. Which, IMO, is a way better design than RDBMS.I don’t see your whole situation, but for me, if my application doesn’t support the correction without proper validations, it is incomplete —no mongosh editing production data.\nHowever, I understand that your situation can be different. I just happened to use the language that solved what you mentioned. It is not really my accomplishment, but people in the community.Just to be clear, I use Ruby. In production, we REPL on the actual models with validations intact. We even wrote troubleshooting scripts for common scenarios and will add UI if the issue is frequent.I don’t know every nook and cranny of MongoDB, but I ran production applications with millions of documents and proper business logic for years. I am not aware of such functionality of MongoDB; it might exist, but I don’t know and never heard of it. They provide building blocks to add such functionality when the situation calls for.", "username": "3Ji" }, { "code": "", "text": "I really just needed to know if this could be done. I guess the answer is “no.” But I do appreciate the additional essay on software engineering that accompanied the answer.", "username": "Eugene_Callahan1" } ]
Can I validate fields in one collection against values in another?
2023-11-10T05:41:58.092Z
Can I validate fields in one collection against values in another?
73
https://www.mongodb.com/…_2_1024x626.jpeg
[ "compass", "mongodb-shell", "containers" ]
[ { "code": "version: '3'\nservices:\n mongo1:\n hostname: mongo1\n image: mongo\n env_file: .env\n expose:\n - 27017\n environment:\n - MONGO_INITDB_DATABASE=${MONGO_INITDB_DATABASE}\n - MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME}\n - MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD}\n networks:\n - mongo-network\n ports:\n - 172.16.50.15:27017:27017 \n volumes:\n - ./db-data1:/data/db\n - ./replica.key:/etc/replica.key\n restart: always\n command: mongod --replSet my-mongo-set --keyFile /etc/replica.key --bind_ip_all\n mongo2:\n hostname: mongo2\n image: mongo\n env_file: .env\n expose:\n - 27017\n environment:\n - MONGO_INITDB_DATABASE=${MONGO_INITDB_DATABASE}\n - MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME}\n - MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD}\n networks:\n - mongo-network\n ports:\n - 172.16.50.16:27017:27017\n volumes:\n - ./db-data2:/data/db\n - ./replica.key:/etc/replica.key\n restart: always\n command: mongod --replSet my-mongo-set --keyFile /etc/replica.key --bind_ip_all\n mongo3:\n hostname: mongo3\n image: mongo\n env_file: .env\n expose:\n - 27017\n environment:\n - MONGO_INITDB_DATABASE=${MONGO_INITDB_DATABASE}\n - MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME}\n - MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD}\n networks:\n - mongo-network\n ports:\n - 172.16.50.17:27017:27017\n volumes:\n - ./db-data3:/data/db\n - ./replica.key:/etc/replica.key\n restart: always\n command: mongod --replSet my-mongo-set --keyFile /etc/replica.key --bind_ip_all\n\n mongoinit:\n image: mongo\n hostname: mongo\n env_file: .env\n networks:\n - mongo-network\n restart: \"no\"\n depends_on:\n - mongo1\n - mongo2\n - mongo3\n # command: tail -F anything\n command: >\n mongosh --host 172.16.50.15:27017 --username ${MONGO_INITDB_ROOT_USERNAME} --password ${MONGO_INITDB_ROOT_PASSWORD} --eval \n '\n config = {\n \"_id\" : \"my-mongo-set\",\n \"members\" : [\n {\n \"_id\" : 0,\n \"host\" : \"172.16.50.15:27017\",\n \"priority\": 3\n },\n {\n \"_id\" : 1,\n \"host\" : \"172.16.50.16:27017\",\n \"priority\": 2\n },\n {\n \"_id\" : 2,\n \"host\" : \"172.16.50.17:27017\",\n \"priority\": 1\n }\n ]\n };\n rs.initiate(config, { force: true });\n rs.status();\n '\n\n\nvolumes:\n db-data1:\n db-data2:\n db-data3:\n\nnetworks:\n mongo-network:\n driver: bridge\n{\n set: 'my-mongo-set',\n date: 2023-10-26T09:29:47.471Z,\n myState: 1,\n term: Long(\"1\"),\n syncSourceHost: '',\n syncSourceId: -1,\n heartbeatIntervalMillis: Long(\"2000\"),\n majorityVoteCount: 2,\n writeMajorityCount: 2,\n votingMembersCount: 3,\n writableVotingMembersCount: 3,\n optimes: {\n lastCommittedOpTime: { ts: Timestamp({ t: 1698312587, i: 1 }), t: Long(\"1\") },\n lastCommittedWallTime: 2023-10-26T09:29:47.126Z,\n readConcernMajorityOpTime: { ts: Timestamp({ t: 1698312587, i: 1 }), t: Long(\"1\") },\n appliedOpTime: { ts: Timestamp({ t: 1698312587, i: 1 }), t: Long(\"1\") },\n durableOpTime: { ts: Timestamp({ t: 1698312587, i: 1 }), t: Long(\"1\") },\n lastAppliedWallTime: 2023-10-26T09:29:47.126Z,\n lastDurableWallTime: 2023-10-26T09:29:47.126Z\n },\n lastStableRecoveryTimestamp: Timestamp({ t: 1698312557, i: 1 }),\n electionCandidateMetrics: {\n lastElectionReason: 'electionTimeout',\n lastElectionDate: 2023-10-26T09:22:36.919Z,\n electionTerm: Long(\"1\"),\n lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n numVotesNeeded: 2,\n priorityAtElection: 3,\n electionTimeoutMillis: Long(\"10000\"),\n numCatchUpOps: Long(\"0\"),\n newTermStartDate: 2023-10-26T09:22:37.059Z,\n wMajorityWriteAvailabilityDate: 2023-10-26T09:22:37.674Z\n },\n members: [\n {\n _id: 0,\n name: '172.16.50.15:27017',\n health: 1,\n state: 1,\n stateStr: 'PRIMARY',\n uptime: 445,\n optime: [Object],\n optimeDate: 2023-10-26T09:29:47.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:47.126Z,\n lastDurableWallTime: 2023-10-26T09:29:47.126Z,\n syncSourceHost: '',\n syncSourceId: -1,\n infoMessage: '',\n electionTime: Timestamp({ t: 1698312156, i: 1 }),\n electionDate: 2023-10-26T09:22:36.000Z,\n configVersion: 1,\n configTerm: 1,\n self: true,\n lastHeartbeatMessage: ''\n },\n {\n _id: 1,\n name: '172.16.50.16:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 441,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-10-26T09:29:37.000Z,\n optimeDurableDate: 2023-10-26T09:29:37.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:47.126Z,\n lastDurableWallTime: 2023-10-26T09:29:47.126Z,\n lastHeartbeat: 2023-10-26T09:29:45.731Z,\n lastHeartbeatRecv: 2023-10-26T09:29:46.705Z,\n pingMs: Long(\"1\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1\n },\n {\n _id: 2,\n name: '172.16.50.17:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 442,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-10-26T09:29:37.000Z,\n optimeDurableDate: 2023-10-26T09:29:37.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:47.126Z,\n lastDurableWallTime: 2023-10-26T09:29:47.126Z,\n lastHeartbeat: 2023-10-26T09:29:45.731Z,\n lastHeartbeatRecv: 2023-10-26T09:29:46.706Z,\n pingMs: Long(\"1\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1\n }\n ],\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1698312587, i: 1 }),\n signature: {\n hash: Binary.createFromBase64(\"ib1dE3JOZYGqKad6qnbOxj2NWXo=\", 0),\n keyId: Long(\"7294195172714217475\")\n }\n },\n operationTime: Timestamp({ t: 1698312587, i: 1 })\n}\n{\n set: 'my-mongo-set',\n date: 2023-10-26T09:30:03.394Z,\n myState: 2,\n term: Long(\"1\"),\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n heartbeatIntervalMillis: Long(\"2000\"),\n majorityVoteCount: 2,\n writeMajorityCount: 2,\n votingMembersCount: 3,\n writableVotingMembersCount: 3,\n optimes: {\n lastCommittedOpTime: { ts: Timestamp({ t: 1698312597, i: 1 }), t: Long(\"1\") },\n lastCommittedWallTime: 2023-10-26T09:29:57.126Z,\n readConcernMajorityOpTime: { ts: Timestamp({ t: 1698312597, i: 1 }), t: Long(\"1\") },\n appliedOpTime: { ts: Timestamp({ t: 1698312597, i: 1 }), t: Long(\"1\") },\n durableOpTime: { ts: Timestamp({ t: 1698312597, i: 1 }), t: Long(\"1\") },\n lastAppliedWallTime: 2023-10-26T09:29:57.126Z,\n lastDurableWallTime: 2023-10-26T09:29:57.126Z\n },\n lastStableRecoveryTimestamp: Timestamp({ t: 1698312577, i: 1 }),\n electionParticipantMetrics: {\n votedForCandidate: true,\n electionTerm: Long(\"1\"),\n lastVoteDate: 2023-10-26T09:22:36.925Z,\n electionCandidateMemberId: 0,\n voteReason: '',\n lastAppliedOpTimeAtElection: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n maxAppliedOpTimeInSet: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n priorityAtElection: 1,\n newTermStartDate: 2023-10-26T09:22:37.059Z,\n newTermAppliedDate: 2023-10-26T09:22:37.675Z\n },\n members: [\n {\n _id: 0,\n name: '172.16.50.15:27017',\n health: 1,\n state: 1,\n stateStr: 'PRIMARY',\n uptime: 457,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-10-26T09:29:57.000Z,\n optimeDurableDate: 2023-10-26T09:29:57.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:57.126Z,\n lastDurableWallTime: 2023-10-26T09:29:57.126Z,\n lastHeartbeat: 2023-10-26T09:30:02.732Z,\n lastHeartbeatRecv: 2023-10-26T09:30:01.754Z,\n pingMs: Long(\"1\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '',\n syncSourceId: -1,\n infoMessage: '',\n electionTime: Timestamp({ t: 1698312156, i: 1 }),\n electionDate: 2023-10-26T09:22:36.000Z,\n configVersion: 1,\n configTerm: 1\n },\n {\n _id: 1,\n name: '172.16.50.16:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 457,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-10-26T09:29:57.000Z,\n optimeDurableDate: 2023-10-26T09:29:57.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:57.126Z,\n lastDurableWallTime: 2023-10-26T09:29:57.126Z,\n lastHeartbeat: 2023-10-26T09:30:02.265Z,\n lastHeartbeatRecv: 2023-10-26T09:30:02.731Z,\n pingMs: Long(\"2\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1\n },\n {\n _id: 2,\n name: '172.16.50.17:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 461,\n optime: [Object],\n optimeDate: 2023-10-26T09:29:57.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:57.126Z,\n lastDurableWallTime: 2023-10-26T09:29:57.126Z,\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1,\n self: true,\n lastHeartbeatMessage: ''\n }\n ],\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1698312597, i: 1 }),\n signature: {\n hash: Binary.createFromBase64(\"IVAPZTToW+9cNtRIV/PiQSvQQaM=\", 0),\n keyId: Long(\"7294195172714217475\")\n }\n },\n operationTime: Timestamp({ t: 1698312597, i: 1 })\n}\n", "text": "Hello all,\nIve set docker env for create MongoDB replication with this docker compose:My env is running but i not understand why my doc is only on primary and not copy to the secondary db.this is my rs.status() on primaryinside secondary rs.status()and capture screen to mongo compass :\nreplicate\nsecondary 1\nsecondary 2TestMongoDB1639×1003 210 KBwhat did i forget ?", "username": "Jeremy_Kermes" }, { "code": "", "text": "Nobody has an idea ?", "username": "Jeremy_Kermes" }, { "code": "use testMongo\nshow collections\ndb.testMongoCollection.find()\n", "text": "Have you refreshed the Compass database views on the secondary? Compass does not automatically refresh the list of database or collections.In the mongosh, of Compass connected to a secondary run the commands:Personally, I would use the host names mongo1, mongo2 and mongo3 in the list of replica set members rather than the IP address.", "username": "steevej" }, { "code": "{\n set: 'my-mongo-set',\n date: 2023-11-07T07:32:31.942Z,\n myState: 2,\n term: Long(\"1\"),\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n heartbeatIntervalMillis: Long(\"2000\"),\n majorityVoteCount: 2,\n writeMajorityCount: 2,\n votingMembersCount: 3,\n writableVotingMembersCount: 3,\n optimes: {\n lastCommittedOpTime: { ts: Timestamp({ t: 1699342343, i: 3 }), t: Long(\"1\") },\n lastCommittedWallTime: 2023-11-07T07:32:23.683Z,\n readConcernMajorityOpTime: { ts: Timestamp({ t: 1699342343, i: 3 }), t: Long(\"1\") },\n appliedOpTime: { ts: Timestamp({ t: 1699342343, i: 3 }), t: Long(\"1\") },\n durableOpTime: { ts: Timestamp({ t: 1699342343, i: 3 }), t: Long(\"1\") },\n lastAppliedWallTime: 2023-11-07T07:32:23.683Z,\n lastDurableWallTime: 2023-11-07T07:32:23.683Z\n },\n lastStableRecoveryTimestamp: Timestamp({ t: 1699342332, i: 1 }),\n electionParticipantMetrics: {\n votedForCandidate: true,\n electionTerm: Long(\"1\"),\n lastVoteDate: 2023-10-26T09:22:36.925Z,\n electionCandidateMemberId: 0,\n voteReason: '',\n lastAppliedOpTimeAtElection: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n maxAppliedOpTimeInSet: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n priorityAtElection: 2,\n newTermStartDate: 2023-10-26T09:22:37.059Z,\n newTermAppliedDate: 2023-10-26T09:22:37.668Z\n },\n members: [\n {\n _id: 0,\n name: '172.16.50.15:27017',\n health: 1,\n state: 1,\n stateStr: 'PRIMARY',\n uptime: 1030205,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-11-07T07:32:23.000Z,\n optimeDurableDate: 2023-11-07T07:32:23.000Z,\n lastAppliedWallTime: 2023-11-07T07:32:23.683Z,\n lastDurableWallTime: 2023-11-07T07:32:23.683Z,\n lastHeartbeat: 2023-11-07T07:32:31.289Z,\n lastHeartbeatRecv: 2023-11-07T07:32:31.284Z,\n pingMs: Long(\"2\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '',\n syncSourceId: -1,\n infoMessage: '',\n electionTime: Timestamp({ t: 1698312156, i: 1 }),\n electionDate: 2023-10-26T09:22:36.000Z,\n configVersion: 1,\n configTerm: 1\n },\n {\n _id: 1,\n name: '172.16.50.16:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 1030209,\n optime: [Object],\n optimeDate: 2023-11-07T07:32:23.000Z,\n lastAppliedWallTime: 2023-11-07T07:32:23.683Z,\n lastDurableWallTime: 2023-11-07T07:32:23.683Z,\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1,\n self: true,\n lastHeartbeatMessage: ''\n },\n {\n _id: 2,\n name: '172.16.50.17:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 1030205,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-11-07T07:32:23.000Z,\n optimeDurableDate: 2023-11-07T07:32:23.000Z,\n lastAppliedWallTime: 2023-11-07T07:32:23.683Z,\n lastDurableWallTime: 2023-11-07T07:32:23.683Z,\n lastHeartbeat: 2023-11-07T07:32:31.287Z,\n lastHeartbeatRecv: 2023-11-07T07:32:31.288Z,\n pingMs: Long(\"1\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1\n }\n ],\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1699342343, i: 3 }),\n signature: {\n hash: Binary.createFromBase64(\"SWPKqfLLavFMGccTgAR2u67eHTk=\", 0),\n keyId: Long(\"7294195172714217475\")\n }\n },\n operationTime: Timestamp({ t: 1699342343, i: 3 })\n}\n\n", "text": "I try but ive got this error message.ProblémeMongo1631×972 73.7 KBI don’t understand i have Not primary and secondaryOK=False\nwith my rs.status() OK = 1 .I use 3 IP address because in the futur, where my replica work fine, i move docker instance of mongo on 3 PC on 3 localstorages.", "username": "Jeremy_Kermes" }, { "code": "mongodb://user:password@172.16.50.16:27017/?authMechanism=DEFAULT&replicaSet=my-mongo-set&directConnection=true\nmongodb://user:password@172.16.50.16:27017/?authMechanism=DEFAULT&replicaSet=my-mongo-set&readPreference=secondaryPreferred\n", "text": "With this new error, after search on google i edit my string connection to my secondary by :Before:Now:Now with this new string connection i see my replicate collection, but ive a new question.In my MONGOSH prompt i see my-mongo-set [primary] test >Why i see [primary] ? My string connection content IP to the secondary, not for the primary ?Why if i use directConnection on primary i can see this data but not on secondary ?ProblémeMongo21633×822 52.6 KB", "username": "Jeremy_Kermes" }, { "code": "directConnection=truereadPreference=secondaryPreferreddirectConnection=truereadPreference=secondaryPreferred", "text": "Why i see [primary] ? My string connection content IP to the secondary, not for the primary ?Unless directConnection=true the client(Compass) will perform cluster discover and connect to the primary.\nreadPreference=secondaryPreferred willread from a secondary. The connections you have made could be reading from the exact same secondary.To read from a specific secondary both directConnection=true and readPreference=secondaryPreferred need to be specified.", "username": "chris" }, { "code": "", "text": "Ok if i understand, i need use directConnection for verify if my documents is copy correctly on the secondary ?", "username": "Jeremy_Kermes" }, { "code": "", "text": "On a specific secondary, yes.", "username": "chris" }, { "code": "", "text": "Ok a set my string connection with directConnection and i don’t see my document on secondary 1 and secondary 2.mongodb://user:password@172.16.50.16:27017/?authMechanism=DEFAULT&replicaSet=my-mongo-set&directConnection=trueI don’t understand why my primary is not copied to the secondary.ProblémeMongo21296×583 24.8 KB", "username": "Jeremy_Kermes" }, { "code": "directConnection=truereadPreference=secondaryPreferred", "text": "We already covered this.To read from a specific secondary both directConnection=true and readPreference=secondaryPreferred need to be specified.Here is an example:image1959×957 135 KB", "username": "chris" }, { "code": "", "text": "The answer from chris came at the same time and was covering the same. Please ignore.", "username": "steevej" }, { "code": "", "text": "Indeed, withdirectConnection=true&readPreference=secondaryPreferredI can find all my documents.\nThank you all for your help and i’m sorry to have made you repeat yourself ", "username": "Jeremy_Kermes" } ]
Docker replica not copy to secondary
2023-10-26T09:47:04.591Z
Docker replica not copy to secondary
255
https://www.mongodb.com/…_2_1024x559.jpeg
[ "boston-mug" ]
[ { "code": "Senior Product Manager, Developer EducationAssociate Developer Advocate, MongoDBDirector of Developer Advocacy", "text": "Screenshot 2023-10-25 1312531216×664 76.5 KB To RSVP - Please click on the “ ✓ RSVP ” link at the top of this event page if you plan to attend. The link should change to a green button once you RSVP. You need to be signed in to access the button. Have meetup.com? You can also register for this event on our Boston MongoDB User Group Meetup Page.Event Details:We are excited to welcome you to our first Boston MongoDB User Group (MUG). This evening is filled with:Agenda:Event Type: In-Person\nLocation: PTC Headquarters 121 Seaport Blvd Boston, MASenior Product Manager, Developer EducationAssociate Developer Advocate, MongoDBDirector of Developer Advocacy", "username": "Veronica_Cooley-Perry" }, { "code": "", "text": "We are excited to see you on Monday, November 13th at the MongoDB User Group (MUG) Meet-up!We are still accepting RSVPs so please consider inviting someone from your network.Location: PTC Headquarters 121 Seaport Blvd Boston, MA 2Event Schedule:Parking:There is a parking garage at 121 Seaport Blvd.Office Access:When you arrive at PTC Headquarters, MongoDB associates will help guide you into the event.This email address has been shared with PTC security. You’ll soon receive an email via the envoy system, guiding you to create an account. Afterward, you’ll get another email containing a QR code for office area access. You will need to show this QR code as well as a photo ID to security upon arrival. Failing to complete this ahead of time will require on-site completion, potentially causing entry delays.We ask that you kindly stay within the designated event location and maintain a respectful and professional atmosphere throughout the office.Please let me know if you have any questions!Best,Veronica and Chuck your MongoDB User Group Leader", "username": "Veronica_Cooley-Perry" } ]
Boston MongoDB User Group Kickoff
2023-10-03T17:18:27.094Z
Boston MongoDB User Group Kickoff
1,391
null
[]
[ { "code": "", "text": "Hi dear friends \nI am working on a extension (js) which copies a JSON from the browser in to clipboard ready to be pasted.\nFor days now I have searched to find how to convert the id type which are string and convert them to UUID type.\ne.g:\n“id” : \" 0b2484c8-c069-4fa3-a23d-b40010ab59da\" >to> “id”: UUDI(“0b2484c8-c069-4fa3-a23d-b40010ab59da”)As well as for “LastUpdated”: “2023-10-23T08:46:13.213Z” >to> “LastUpdated”: ISODate(2023-10-23T08:46:13.213Z),I appreciate any idea or solution.", "username": "Behzad_Pashaie2" }, { "code": "", "text": "Hi @Behzad_Pashaie2,Welcome to the MongoDB Community forum I am working on a extension (js) which copies a JSON from the browser in to clipboard ready to be pasted.\nFor days now I have searched to find how to convert the id type which are string and convert them to UUID type.Are you looking for any specific conversion method? Also, may I ask the purpose of this conversion? Are you planning to store the data in a MongoDB Collection? If so, you might consider using a JS script to do the conversion process.Looking forward to your response.Best regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "KushagraHi @Kushagra_Kesav_1\nThanks for you kind response. Yes I am looking for a way (in javaScript function\\code ) to parse some values to different type (If I am not wrong BSON types!?)Let me explain it this way I have javascript code which copies the response of an API call as a :json1 :{\n“Id”: “b52918ea-3d32-4b5c-ac2c-114ac940c47d”,\n“RegDate”: “2023-09-26T00:00:00Z”\n}I need convert\\parse json1 tojson2 :{\n“Id”: UUDI(“b52918ea-3d32-4b5c-ac2c-114ac940c47d”),\n“RegDate”: ISODate(“2023-09-26T00:00:00Z”)\n}The reason to require this is when inserting json1 the “Id” and “RegDate” are inserted as string types while i need them to be UUID and ISODate type.\nI have searched a lot. I be so gratefull if help me with a solution\\practical approach o this.", "username": "Behzad_Pashaie2" }, { "code": "document.addEventListener('DOMContentLoaded', function () {\n const copyButton = document.getElementById('copyButton');\n const msg = document.getElementById('msg');\n\n copyButton.addEventListener('click', function () {\n // Clear the existing URL list\n msg.innerHTML = '';\n\n // Get the current active tab's URL\n chrome.tabs.query({ active: true, currentWindow: true }, function (tabs) {\n const currentTab = tabs[0];\n let url = new URL(currentTab.url);\n //const url = currentTab.url;\n const baseUrl = url.protocol + '//' + url.hostname;\n\n console.log(url.hash);\n url= new URL(url.hash.replace(/^#/,\"\"),currentTab.url.split(\"#\")[0]);\n console.log(\"URL >>>\"+url);\n // Extract parameters from the URL (adjust as needed)\n const id = url.searchParams.get('id');\n \n const code = url.searchParams.get('code');\n \n const type = url.searchParams.get('type');\n // Construct the new URL\n const newUrl = `${baseUrl}/api/entity/${id}?companyCode=${code}&module=${type}`;\n console.log(newUrl);\n\n // Make the API call using the current URL\n fetch(newUrl)\n .then(response => response.json())\n .then(data => {\n data._id = data.Id;\n delete data.Id;\n const readyMongoData = changeMongoType(data);\n // Copy the API response to the clipboard\n copyToClipboard(JSON.stringify(readyMongoData)); // JSON.stringify(data)\n \n }).then(()=>{\n const newMsg = document.createElement('h3');\n newMsg.textContent = new DOMParser().parseFromString(`Copied to cliboard V4 <span>&#10003;</span>`, 'text/html').body.textContent;\n msg.appendChild(newMsg);\n })\n .catch(error => {\n console.error('API Call Error:', error);\n });\n\n \n });\n });\n});\n\n\nfunction copyToClipboard(text) {\n navigator.clipboard.writeText(text)\n .then(() => {\n console.log('URL copied to clipboard:', text);\n // You can add a success message here\n })\n .catch(error => {\n console.error('Error copying to clipboard:', error);\n // You can add an error message here\n });\n}\n\nfunction parseDateToISODate(dateString) {\n const date = new Date(dateString);\n if (isNaN(date)) {\n // Handle invalid date input if necessary\n return null;\n }\n return date.toISOString();\n}\n\n\n\n///////////////////////////////////////////////////////////////////\nfunction changeMongoType(jsonObj) {\n \n for (const key in jsonObj) {\n if (jsonObj.hasOwnProperty(key)) {\n const value = jsonObj[key];\n \n if (typeof value === 'string' && /^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/.test(jsonObj[key])) {\n jsonObj[key] = convertStringToUUIDFormat(value);\n \n }else if (typeof value === 'string' && /^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}(\\.\\d{3})?Z?$/.test(jsonObj[key]) && isStringADate(jsonObj[key])) {\n jsonObj[key] = `ISODate(\"${jsonObj[key]}\")`.replace(/\\\"/g, \"\");\n // jsonObj[key] = `ISODate(\"${jsonObj[key]}\")`.replace(/\\\"/g, \"\");\n } else if (typeof value === 'object') {\n changeMongoType(value); // Recursively process nested objects\n }\n }\n }\n \n return jsonObj;\n}\n\nfunction convertStringToUUIDFormat(input) {\n const regex = /^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/;\n if (regex.test(input)) {\n console.log(`UUID('${input}')`.replace(/['\"]+/g, ''));\n //return `UUID('${input}')`.replace(/['\"]+/g, '');\n console.log(\"input> \"+input);\n return UUID(input);\n \n } \n return input;\n} \n\nfunction isStringADate(dateString) {\n // Try to parse the string into a Date object\n const date = new Date(dateString);\n // Check if the parsed date is a valid date and the original string is not \"Invalid Date\"\n return !isNaN(date) && date.toString() !== 'Invalid Date';\n}\n\nfunction preservId(dateString) {\n // Try to parse the string into a Date object\n const date = new Date(dateString);\n // Check if the parsed date is a valid date and the original string is not \"Invalid Date\"\n return !isNaN(date) && date.toString() !== 'Invalid Date';\n}```", "text": "BTW here my code in the case you wonder:\nAs you see the changeMongoType function will do the job as now it only parsers\n“Id”: “b52918ea-3d32-4b5c-ac2c-114ac940c47d” to “Id”: “UUID(‘b52918ea-3d32-4b5c-ac2c-114ac940c47d’)\" which again is a string and dose not fulfill what I am looking for.", "username": "Behzad_Pashaie2" }, { "code": "", "text": "Hi @Kushagra_Kesav_1Hope you are doing well. \nAny updates on this issue?\nThanks", "username": "Behzad_Pashaie2" }, { "code": "", "text": "@Kushagra_Kesav_1Hi @Kushagra_Kesav_1. Any updates. Looking to hear from you.", "username": "Behzad_Pashaie2" } ]
Convert json commn type to mongo types "string-id" to UUID("string-id")
2023-10-23T13:42:20.328Z
Convert json commn type to mongo types &ldquo;string-id&rdquo; to UUID(&ldquo;string-id&rdquo;)
289
null
[ "aggregation" ]
[ { "code": "```I have 2 collections\n\n1. external_S_P_FLAT_main_api \n2. external_S_C_FLAT_main_api\n\nThe collection has data as below.\n\n\"external_S_P_FLAT_main_api\": [\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef588674d\"\n },\n \"data.pricing.material\": \"TG11\",\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000001\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef588674e\"\n },\n \"data.pricing.material\": \"TG12\",\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000002\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef588674f\"\n },\n \"data.pricing.material\": \"TG14\",\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000003\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886750\"\n },\n \"data.pricing.material\": \"TG2341\",\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000004\",\n }\n]\n\"external_S_C_FLAT_main_api\": [\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886751\"\n },\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886752\"\n },\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886753\"\n },\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886754\"\n },\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n }\n]\n\nBelow is the query I am executing: \n\ndb.external_S_P_FLAT_main_api.aggregate([\n {\n \"$addFields\": {\n \"external_S_P_FLAT_main_api_data.pricing.controlling_area\": \"$data.pricing.controlling_area\"\n }\n },\n {\n \"$addFields\": {\n \"external_S_P_FLAT_main_api_data.pricing.cost_center\": \"$data.pricing.cost_center\"\n }\n },\n {\n \"$lookup\": {\n from: \"external_S_C_FLAT_main_api\",\n let: {\n let_data__pricing__controlling_area: \"$external_S_P_FLAT_main_api_data.pricing.controlling_area\",\n let_data__pricing__cost_center: \"$external_S_P_FLAT_main_api_data.pricing.cost_center\"\n },\n pipeline: [\n {\n \"$match\": {\n \"$expr\": {\n \"$and\": [\n {\n \"$eq\": [\n \"$data.costcenter.controlling_area\",\n \"$$let_data__pricing__controlling_area\"\n ]\n },\n {\n \"$eq\": [\n \"$data.costcenter.cost_center\",\n \"$$let_data__pricing__cost_center\"\n ]\n }\n ]\n }\n }\n }\n ],\n as: \"from_external_S_C_FLAT_main_api\"\n }\n },\n {\n \"$project\": {\n _id: 0,\n \"external_S_P_FLAT_main_api_data.pricing.controlling_area\": 0,\n \"external_S_P_FLAT_main_api_data.pricing.cost_center\": 0,\n // from_external_S_C_FLAT_main_api: 0,\n }\n }\n])\n\nBelow is the output comes: \n\n[\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000001\",\n \"data.pricing.material\": \"TG11\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000002\",\n \"data.pricing.material\": \"TG12\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000003\",\n \"data.pricing.material\": \"TG14\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000004\",\n \"data.pricing.material\": \"TG2341\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n }\n]\n\nI want the output in such a way that it has single element in the array from_external_S_C_FLAT_main_api which is matching with the condition as\nvalue of data.pricing.cost_center matches with from_external_S_C_FLAT_main_api.data.costcenter.cost_center and \nvalue of data.pricing.controlling_area matches with from_external_S_C_FLAT_main_api.data.costcenter.controlling_area\n\nSo the expected output should be as below:\n[\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000001\",\n \"data.pricing.material\": \"TG11\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n }\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000002\",\n \"data.pricing.material\": \"TG12\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000003\",\n \"data.pricing.material\": \"TG14\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000004\",\n \"data.pricing.material\": \"TG2341\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n }\n]\n\nWhat am I missing? How shall I get the expected result?```\n", "text": "", "username": "Hemant_Joshi" }, { "code": " \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data\" : {\n \"costcenter\" : {\n \"controlling_area\": \"AJ00\",\n \"cost_center\": \"DE000002\",\n \"valid_from_date\": \"2022-03-02\",\n \"long_description\": \"CC DE000002 - 1st\",\n }\n }\n", "text": "To filter the array within the document you $project, $addFields or $set the array with $filter.Is there any reasons why your documents look like:rather thanI would be worry that the former structure takes more space. I would also be worry that you would need to project each field of data.costcenter individually.", "username": "steevej" }, { "code": "", "text": "@steevej,My documents look like this because I am storing the collection fields as dot notation.\nI have a Mongo version of v4.4.3Can you help me with the built query with the suggestions as below?\nfilter the array within the document you $project, $addFields or $set the array with $filter.", "username": "Hemant_Joshi" }, { "code": "{ \"$set\" : {\n \"from_external_S_C_FLAT_main_api\" : { \"$filter\" : {\n \"input\" : \"$from_external_S_C_FLAT_main_api\" ,\n \"cond\" : { /* An expression that resolves to a boolean value used to determine if an element should be included in the output array. */ }\n } }\n} }\n", "text": "It looks like you have not clicked on the $filter link I provided. Usually, there is a counter that indicates the number of time the link is clicked and as this morning there is no counter, so no one has clicked on the link. So it means you have not looked at the examples shown in the documentation.Example of $set with $filterWhy are youstoring the collection fields as dot notation", "username": "steevej" } ]
After some aggregation stages, I want to filter the array within the document that matches with the conditions
2023-11-09T13:15:32.869Z
After some aggregation stages, I want to filter the array within the document that matches with the conditions
116
null
[ "swift" ]
[ { "code": "", "text": "I have a macOS app built with the Realm Swift SDK. The dataset is about 150mb and is running on an M2.When a new user runs the app for the first time, the initial download of of the database takes FOREVER. It pushes 5 minutes, even on a 1000Mbps connection. Sync operations after the initial download are instant and perfect.Restructuring/segmenting the database is not an option; the app requires all of the data that is being downloaded.The initial-download performance is becoming untenable and I need advice on how to fix it. I found an old thread about compaction, here: MongoDB Realm syncing size - #13 by Brock_GLIt’s unclear if this advice is still relevant. Do I need to contact support to enable this, still? Or is the option now exposed in settings somewhere?", "username": "Bryan_Jones" }, { "code": "", "text": "Hi Bryan,\nWould you be willing to share your app ID? This will allow us to take a closer look and possibly determine if this is due to cluster limitations, network, or something else.", "username": "mpobrien" }, { "code": "", "text": "Sure. What’s the best way for me to get that to you in a non-public forum?", "username": "Bryan_Jones" }, { "code": "", "text": "I sent you a DM, we can coordinate there.", "username": "mpobrien" }, { "code": "", "text": "Hi,\nI have exactly the same issueDo you have any solution for it please ? We are obliged to uninstall every day the appThank you", "username": "Laurent_RIPOCHE" }, { "code": "", "text": "Hi, this is often very dependent on your application (object size, bootstrap size, cluster size, etc). Can you send a link to your application (realm cloud URL) and we can poke in and see if anything stands out?Best,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "Hi Tyler,\nThank you for your reply et sorry for my late replyHow can I send you the link ?", "username": "Famille_Ripoche" }, { "code": "", "text": "Sorry I was with the wrong account … Can you give me DM again please\nMy bad ", "username": "Famille_Ripoche" }, { "code": "", "text": "Hi,\nI need the DM on this account Laurent_RIPOCHE, the other one is an account created by error Thank you a lot !", "username": "Laurent_RIPOCHE" }, { "code": "", "text": "Hi, you should be able to just DM me directly. Also, the URL in the realm URL should not be considered sensitive information as only you and MongoDB employees are able to view it", "username": "Tyler_Kaye" }, { "code": "", "text": "Hi, I took a look at your app, and unfortunately I might need some more information from you. From my perspective, things look pretty good. In the past 10 days there have only been 27 bootstraps (clients connecting for the first time)Minimum Time: 315 ms\nMaximum Time: 564 ms\nAverage Time: 429 ms\nP95 Time: 545 msThis is the time it takes between receiving a connection from the client and sending the last of the download messages to the client. It is worth noting that the bootstraps themselves are not very large, so I wouldn’t expect there to be much time taken by the client to receive these changes and integrate them, but that part is not information collected by the server.Can you elaborate on what exactly is slow? And do you have a specific user / request_id from the logs that you are looking at?Best,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "Hi,\nThank you for your answerThe apps stuck like 5 minutes or more at the loading screenThe loading screen say that 100 % is downloaded and stay stuck for a long time and then go to the new view\n(I’ve made a screenshot)IMG_70371170×2532 110 KBIt’s so long that we preferer to uninstall it and resintall. But it’s not a long term solution I’ ve used the starter template for SwiftUIThe second weird part is that my database is only like 20MB and take 18 GB of disk spaceThank you a lot !\nHave a nice day", "username": "Laurent_RIPOCHE" }, { "code": "", "text": "Hi, this seems like something that is most likely an issue with the code used to open the realm and use it (and most likely blocking the main thread on something that is not happening). Can you share the code you are using for this?Best,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "Hi Tyler,\nHope you are well\nI’ve sent you the sample of my code, did you receive it ?Have a nice day", "username": "Laurent_RIPOCHE" }, { "code": "", "text": "Do you have an estimate for how much data is being synced down here? We do not have logging for this (though we should, and I can add that soon)", "username": "Tyler_Kaye" }, { "code": "", "text": "I don’t know,\nHow can i find this information ?Any clue for thie second issue :The second weird part is that my database is only like 20MB and take 18 GB of disk spacethank you", "username": "Laurent_RIPOCHE" }, { "code": "", "text": "You client device logs should show how much data is being sent to the client. You can post those logs here if possible?As for the second, are you talking about realm or MongoDB?", "username": "Tyler_Kaye" }, { "code": "", "text": "Thank you for your fast reply,\nI’ve sent you a dm with the log", "username": "Laurent_RIPOCHE" }, { "code": "", "text": "As for the second, are you talking about realm or MongoDB?\nMongoDB but we think that his realm who take the place", "username": "Laurent_RIPOCHE" } ]
Realm Sync - Long Initial Download Times
2023-05-02T19:39:08.320Z
Realm Sync - Long Initial Download Times
1,036
null
[]
[ { "code": "title: {\n multi: {\n de: {\n searchAnalyzer: 'lucene.german',\n type: 'string',\n },\n en: {\n searchAnalyzer: 'lucene.english',\n type: 'string',\n },\n es: {\n searchAnalyzer: 'lucene.spanish',\n type: 'string',\n },\n fr: {\n searchAnalyzer: 'lucene.french',\n type: 'string',\n },\n it: {\n searchAnalyzer: 'lucene.italian',\n type: 'string',\n },\n pt: {\n searchAnalyzer: 'lucene.portuguese',\n type: 'string',\n },\n },\n type: 'string',\n },\n edition: {\n multi: {\n de: {\n searchAnalyzer: 'lucene.german',\n type: 'string',\n },\n en: {\n searchAnalyzer: 'lucene.english',\n type: 'string',\n },\n es: {\n searchAnalyzer: 'lucene.spanish',\n type: 'string',\n },\n fr: {\n searchAnalyzer: 'lucene.french',\n type: 'string',\n },\n it: {\n searchAnalyzer: 'lucene.italian',\n type: 'string',\n },\n pt: {\n searchAnalyzer: 'lucene.portuguese',\n type: 'string',\n },\n },\n type: 'string',\n },\n[\n{\n analyzer: 'lucene.english',\n name: 'book-english-synonyms',\n source: {\n collection: 'synonyms',\n },\n},\n{\n analyzer: 'lucene.spanish',\n name: 'book-spanish-synonyms',\n source: {\n collection: 'synonyms',\n },\n},\n]\n{ value: 'title', multi: 'en' },\n{ value: 'edition', multi: 'en' },\n", "text": "Guys, some idea of what is this?, I have been looking through the documentation without success nor able to find a reference to such limit in the limits documentation.It happens if I try to add a second mapping to a search index.\nThe concrete case is that I have 2 fields title and edition that are analyzed differently depending on the language:And here is how I’m trying to create the mapping:And just to complete the idea I query like:And the problem that I’m trying to resolve is that I want to use a synonym for ‘1st’ to be ‘first’ and viceversa.\nConcrete example (and what I’m trying to resolve), If I search for “Optimize B2 first” or “Optimize B2 1st”, I should get the same results. Currently the result with larger score is the one that matches the way “first” is being written.Thanks in advance", "username": "Ignacio_Larranaga" }, { "code": "{ value: 'title', multi: 'en' },\n{ value: 'edition', multi: 'en' },\n$search$search", "text": "Thanks for providing that index definition and the context for the searches @Ignacio_Larranaga.And just to complete the idea I query like:And the problem that I’m trying to resolve is that I want to use a synonym for ‘1st’ to be ‘first’ and viceversa.\nConcrete example (and what I’m trying to resolve), If I search for “Optimize B2 first” or “Optimize B2 1st”, I should get the same results. Currently the result with larger score is the one that matches the way “first” is being written.I would like to get some further clarification here, is the 2 “query”'s you’ve mentioned in the above quote two separate $search queries? If so, could you provide the following:Look forward to hearing your response.Regards,\nJason", "username": "Jason_Tran" }, { "code": "{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"activeCopyCounts\": {\n \"dynamic\": true,\n \"type\": \"document\"\n },\n \"authors\": {\n \"type\": \"string\"\n },\n \"edition\": {\n \"multi\": {\n \"de\": {\n \"analyzer\": \"lucene.german\",\n \"searchAnalyzer\": \"lucene.german\",\n \"type\": \"string\"\n },\n \"en\": {\n \"analyzer\": \"lucene.english\",\n \"searchAnalyzer\": \"lucene.english\",\n \"type\": \"string\"\n },\n \"es\": {\n \"analyzer\": \"lucene.spanish\",\n \"searchAnalyzer\": \"lucene.spanish\",\n \"type\": \"string\"\n },\n \"fr\": {\n \"analyzer\": \"lucene.french\",\n \"searchAnalyzer\": \"lucene.french\",\n \"type\": \"string\"\n },\n \"it\": {\n \"analyzer\": \"lucene.italian\",\n \"searchAnalyzer\": \"lucene.italian\",\n \"type\": \"string\"\n },\n \"pt\": {\n \"analyzer\": \"lucene.portuguese\",\n \"searchAnalyzer\": \"lucene.portuguese\",\n \"type\": \"string\"\n }\n },\n \"type\": \"string\"\n },\n \"isbn\": {\n \"type\": \"string\"\n },\n \"labelsDe\": {\n \"analyzer\": \"lucene.german\",\n \"searchAnalyzer\": \"lucene.german\",\n \"type\": \"string\"\n },\n \"labelsEn\": {\n \"analyzer\": \"lucene.english\",\n \"searchAnalyzer\": \"lucene.english\",\n \"type\": \"string\"\n },\n \"labelsEs\": {\n \"analyzer\": \"lucene.spanish\",\n \"searchAnalyzer\": \"lucene.spanish\",\n \"type\": \"string\"\n },\n \"labelsFr\": {\n \"analyzer\": \"lucene.french\",\n \"searchAnalyzer\": \"lucene.french\",\n \"type\": \"string\"\n },\n \"labelsIt\": {\n \"analyzer\": \"lucene.italian\",\n \"searchAnalyzer\": \"lucene.italian\",\n \"type\": \"string\"\n },\n \"labelsPt\": {\n \"analyzer\": \"lucene.portuguese\",\n \"searchAnalyzer\": \"lucene.portuguese\",\n \"type\": \"string\"\n },\n \"prices\": {\n \"fields\": {\n \"countryCode\": {\n \"analyzer\": \"lucene.keyword\",\n \"searchAnalyzer\": \"lucene.keyword\",\n \"type\": \"string\"\n }\n },\n \"type\": \"document\"\n },\n \"publisher\": {\n \"type\": \"string\"\n },\n \"title\": {\n \"multi\": {\n \"de\": {\n \"analyzer\": \"lucene.german\",\n \"searchAnalyzer\": \"lucene.german\",\n \"type\": \"string\"\n },\n \"en\": {\n \"analyzer\": \"lucene.english\",\n \"searchAnalyzer\": \"lucene.english\",\n \"type\": \"string\"\n },\n \"es\": {\n \"analyzer\": \"lucene.spanish\",\n \"searchAnalyzer\": \"lucene.spanish\",\n \"type\": \"string\"\n },\n \"fr\": {\n \"analyzer\": \"lucene.french\",\n \"searchAnalyzer\": \"lucene.french\",\n \"type\": \"string\"\n },\n \"it\": {\n \"analyzer\": \"lucene.italian\",\n \"searchAnalyzer\": \"lucene.italian\",\n \"type\": \"string\"\n },\n \"pt\": {\n \"analyzer\": \"lucene.portuguese\",\n \"searchAnalyzer\": \"lucene.portuguese\",\n \"type\": \"string\"\n }\n },\n \"type\": \"string\"\n }\n }\n },\n \"synonyms\": [\n {\n \"analyzer\": \"lucene.standard\",\n \"name\": \"book-synonyms\",\n \"source\": {\n \"collection\": \"synonyms\"\n }\n }\n ]\n}\n{\n ...,\n \"synonyms\": [\n {\n \"analyzer\": \"lucene.standard\",\n \"name\": \"book-synonyms\",\n \"source\": {\n \"collection\": \"synonyms\"\n }\n },\n {\n \"analyzer\": \"lucene.spanish\",\n \"name\": \"book-synonyms-es\",\n \"source\": {\n \"collection\": \"synonyms-es\"\n }\n }\n ]\n}\n", "text": "Thanks @Jason_Tran , just to clarify I’m not really having an issue with the query but when creating the mapping. The “Synonym mappings limit exceeded” appears when I try to add a second synonym to the same index.Specifically here are my current mappings:If I try for example to do:The error appears, right in the admin console.\nI don’t think it is related to the document or the mapping itselft but to my cluster tier (M0 & M2).\nMight it be?I’ll provide a sample data-set, query and mapping for the 1st/first issue I described above but the root of the problem is this inability to add a second synonym.", "username": "Ignacio_Larranaga" }, { "code": "", "text": "Thanks for clarifying Ignacio.I don’t think it is related to the document or the mapping itselft but to my cluster tier (M0 & M2).As of now there are some limits related to Atlas Search and M0, M2/M5 tier clusters. I’m just checking if the below is related or if there’s other limits that may be causing this error:Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "Just for clarification, in my case the synonyms collections has 32 docs.", "username": "Ignacio_Larranaga" } ]
Synonym mappings limit exceeded?
2023-11-09T02:23:39.364Z
Synonym mappings limit exceeded?
114
null
[]
[ { "code": "", "text": "Heya, I’m Emmanuel Katto, African Minerals and Mining Engineer expert from Uganda. I like to travel, do adventure sports, and try new cuisines when I’m not working. I find technology very intriguing and would love to share ideas to make this globe a better place to live. I am new to this community, hope will learn alot about the latest technology and development techniques.Thanks & RegardsEmmanuel Katto from Africa", "username": "Emmanuel_Katto" }, { "code": "", "text": "Welcome to the community! We’re so glad you’re here. Please be sure to check out our MongoDB User Groups and join one near you!", "username": "Karissa_Fuller" }, { "code": "", "text": "Thank you so much for your warm welcome…", "username": "Emmanuel_Katto" } ]
Emmanuel Katto African Introduction
2023-11-08T07:11:53.358Z
Emmanuel Katto African Introduction
109
null
[]
[ { "code": "", "text": "I’m just starting to experiment with RealmCpp and Atlas Sync.I’ve verified everything works great with anonymous users.Now I want to be able to create a new user using their email as their ID. I don’t want to store a password for them. How would I go about this?It looks I could use Custom Function Authentication, and write a simple function that internally identifies users based on their email / username only.Thank you!", "username": "Adam_Wilson" }, { "code": "", "text": "I’d expect custom functions to work in the way you describe! Another alternative could be to hardcode a password in your app, and only use user input for the email field, though that is admittedly a bit hacky.", "username": "Sudarshan_Muralidhar" } ]
Create a new user based on email only?
2023-11-04T13:48:53.402Z
Create a new user based on email only?
157
https://www.mongodb.com/…_2_1024x576.jpeg
[ "losangeles-mug" ]
[ { "code": "Solution Architect", "text": "LA_MUG1920×1080 161 KBThe LA MUG will host regular meetings to bring the MongoDB community together. It fosters sharing knowledge and meeting people who use and love MongoDB. Meetups will include interesting talks, networking, and discussions with and our community.This meetup will include a talk titled “MongoDB First Steps”. The presentation is a light introduction to MongoDB covering the unique aspects of reading and writing data with MongoDB. It is a gentle introduction with lots of demos to highlight the document database paradigm and how to work with it. No special background is required, and questions are encouraged!If you are a developer, DBA, analyst, or new to MongoDB - this talk is for you! To RSVP - Please click on the “ ✓ RSVP ” link at the top of this event page if you plan to attend. The link should change to a green button if you are going. You need to be signed in to access the button.Event Type: In-Person\nLocation: 2219 Main Street, Santa Monica CA 90405Solution ArchitectNuri wrangles code, data, and random bytes for a living. He’s been using MongoDB since version 1.8 to this very day.image726×746 66.2 KB", "username": "Nuri_Halperin" }, { "code": "", "text": "Hey All,Gentle Reminder: The event is tomorrow and we are excited to have you all join us tomorrowThe event is scheduled to begin at 18:15 at the Beach House CoWork.We want to make sure everyone has a fantastic time, so please arrive on time at 18:15 to ensure you don’t miss the session, and we can all have some time to chat before the talk begins.If you have any questions, please don’t hesitate to ask by replying to this thread Looking forward to seeing you all at the event!", "username": "Harshit" }, { "code": "", "text": "Hey All,Gentle Reminder: The event is tomorrow and we are excited to have you all join us tomorrowThe event is scheduled to begin at 18:15 at the Beach House CoWork.We want to make sure everyone has a fantastic time, so please arrive on time at 18:15 to ensure you don’t miss the session, and we can all have some time to chat before the talk begins.If you have any questions, please don’t hesitate to ask by replying to this thread.Looking forward to seeing you all at the event!\nimage928×707 158 KB", "username": "Nuri_Halperin" } ]
Los Angeles MongoDB User Group (LA:MUG)
2023-10-11T22:32:55.133Z
Los Angeles MongoDB User Group (LA:MUG)
877
null
[ "aggregation" ]
[ { "code": "", "text": "help to get optimized query , taking long time, any alternate methods", "username": "P.S_Shilpa" }, { "code": "", "text": "Please update the title of your post to something more comprehensible.Please provide the complete pipeline.Please provide the explain plan of your query.Please provide sample documents.", "username": "steevej" } ]
db.colectionName1.aggregate([ { $match: { "filed1": { $in: ["x", "y"] }, } }, { $lookup: { from: "colectionName2", localField: "_id", foreignField: "xy", as: "matchedDate" } }, { $match: {
2023-11-09T08:06:07.916Z
db.colectionName1.aggregate([ { $match: { &ldquo;filed1&rdquo;: { $in: [&ldquo;x&rdquo;, &ldquo;y&rdquo;] }, } }, { $lookup: { from: &ldquo;colectionName2&rdquo;, localField: &ldquo;_id&rdquo;, foreignField: &ldquo;xy&rdquo;, as: &ldquo;matchedDate&rdquo; } }, { $match: {
88
null
[ "aggregation" ]
[ { "code": "", "text": "I am working with backups. At the moment I have two collections: A and A2.\ndocuments in both locations look like this\n{_id:ObjectId(“6456456456”), “name”:“a_unique_name”}The documents in A don’t share the same _id, they only share the “name” attribute.So my question is how can I use an aggregation pipeline to compare the two collections and find the difference between the sets of “name” attributes.Someone gave me a solution that required the creation of an additional attribute. I don’t think that sounds like an efficient idea. I image MongoDB is very powerful and should be able to not resort to setting and unsetting attributes per document when I have almost a billion of them.", "username": "Leslie_Solorzano" }, { "code": "pipeline_A = [ ] ;\n\nlookup = { \"$lookup\" : {\n \"from\" : \"A2\" ,\n \"localField\" : \"name\" ,\n \"foreignField\" : \"name\" ,\n \"as\" : \"A2\" ,\n \"pipeline\" : [\n { \"$project\" : { \"_id\" : 1 } ,\n { \"$limit\" : 1 }\n ]\n} }\n\npipeline_A.push( lookup ) ;\n\nmatch = { \"$match\" : {\n \"A2.0\" : { \"$exists\" : false }\n} } ;\n\npipeline_A.push( match ) ;\n\nproject = { \"$project\" : {\n \"name\" : 1\n} }\n\n/* The following should produce names in A that are not in A2 */\n\ndb.A.aggregate( pipeline_A ) ;\n", "text": "My approach would use $lookup.I would aggregate first on A to $lookup in A2, then a second aggregation on A2 to $lookup in A.You then do the same with pipeline_A2.Yes, there are 2 database accesses but with $unionWith you can do it in 1.Yes, matching names are processed twice.Yes, you need an index with name:1 prefix to have some kind of performance.When doing that sort of things, I like to $out the result in a temporary collection to explore and process the results later.", "username": "steevej" } ]
Using setDifference with mutiple collections
2023-11-08T17:03:32.454Z
Using setDifference with mutiple collections
118
null
[ "mongoose-odm", "compass", "mongodb-shell", "field-encryption", "schema-validation" ]
[ { "code": "mongocryptdmongoshmongooseMongoDB Enterprise 7.0.2\n\"mongodb\": \"^6.2.0\",\n\"mongodb-client-encryption\": \"^6.0.0\",\n\"mongoose\": \"^8.0.0\",\ninitializeEncryptionautoEncryptionexport default class Encryption implements IEncryption {\n // ... There are several private and public variables not shown here\n\n // private constructor to enforce calling `initialize` method below, which calls this constructor internally\n private constructor(opts?: EncryptionConfigConstructorOpts) {\n this.tenantId = opts?.tenantId;\n this.keyVaultDbName = opts?.keyVaultDbName;\n this.keyVaultCollectionName = opts?.keyVaultCollectionName;\n this.DEKAlias = opts?.DEKAlias;\n\n // Detect a local development environment\n if (process.env?.ENVIRONMENT === LOCAL_DEV_ENV) {\n const keyBase64 = process.env?.LOCAL_MASTER_KEY;\n const key = Buffer.from(keyBase64, 'base64');\n\n // For testing, I'm manually switching between a local key and remote KMS\n // I'll leave out the production-detection code\n if (_debug) {\n this.provider = KMS_PROVIDER;\n this.kmsProviders = {\n aws: {\n accessKeyId: process.env.AWS_ACCESS_KEY_ID,\n secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,\n },\n };\n this.masterKey = {\n key: process.env.KMS_MASTER_ARN,\n region: opts?.masterRegion,\n };\n } else {\n this.kmsProviders = {\n local: {\n key,\n },\n };\n }\n }\n\n const keyVaultNamespace = `${this.keyVaultDbName}.${this.keyVaultCollectionName}`;\n\n const encryptionOptions: ClientEncryptionOptions = {\n keyVaultNamespace,\n kmsProviders: this.kmsProviders,\n };\n\n this.encryptionOptions = encryptionOptions;\n }\n\n public static async initialize(\n url: string,\n opts?: EncryptionConfigConstructorOpts\n ): Promise<Encryption> {\n // Set internal attributes\n const encryption = new Encryption(opts);\n\n // Create key vault collection (this is idempotent, afaik)\n const client = new MongoClient(url);\n const keyVaultDB = client.db(encryption.keyVaultDbName);\n const keyVaultColl = keyVaultDB.collection(encryption.keyVaultCollectionName);\n await keyVaultColl.createIndex(\n { keyAltNames: 1 },\n {\n unique: true,\n partialFilterExpression: { keyAltNames: { $exists: true } },\n }\n );\n\n let dek: UUID | undefined = undefined;\n\n // This checks for an existing DEK, then creates/assigns or just assigns when necessary\n try {\n // Initialize client encryption\n const clientEncryption = new ClientEncryption(client, encryption.encryptionOptions!);\n const keyOptions = {\n masterKey: encryption.masterKey,\n keyAltNames: [encryption.DEKAlias],\n };\n dek = await clientEncryption.createDataKey(encryption.provider, keyOptions);\n } catch (err: any) {\n // Duplicate key error is expected if the key already exists, so we fetch the key if that happens\n if (String(err?.code) !== '11000') {\n throw err;\n } else {\n // Check if a DEK with the keyAltName in the env var DEK_ALIAS already exists\n const existingKey = await client\n .db(encryption.keyVaultDbName)\n .collection(encryption.keyVaultCollectionName)\n .findOne({ keyAltNames: encryption.DEKAlias });\n\n if (existingKey?._id) {\n dek = UUID.createFromHexString(existingKey._id.toHexString());\n } else {\n throw new Error('DEK could not be found or created');\n }\n }\n } finally {\n await client.close();\n }\n\n encryption.dek = dek;\n encryption.isReady = !!encryption.dek;\n return encryption;\n }\n\n // Defined as an arrow function to preserve the `this` context, since it is called as a callback elsewhere\n // This gets called after the `initialize` method from within each micro-service\n public getSchemaMap = (\n jsonSchema: Record<string, unknown>,\n encryptionMetadata?: Record<string, unknown>\n ): Record<string, unknown> => {\n if (!this?.isReady) {\n throw new Error('Encryption class cannot get schema map until it is initialized');\n }\n\n const schemaMapWithEncryption = {\n encryptMetadata: {\n keyId: [this.dek],\n algorithm: process.env.ALG_DETERMINISTIC,\n ...encryptionMetadata,\n },\n ...jsonSchema,\n };\n return schemaMapWithEncryption;\n };\n}\n\n// ... Start up code\n const encryption = await Encryption.initialize(process.env.DB_CONN_STRING);\n const opts = {\n autoEncryption: {\n ...encryption.encryptionOptions\n },\n };\n\n await Service1Models.initialize(process.env.DB_CONN_STRING, opts, encryption.getSchemaMap);\n await Service2Models.initialize(process.env.DB_CONN_STRING, opts, encryption.getSchemaMap);\n\n// ... More start up code and API route config\ninitialize// ... Init code and then assigning the schema-generated model (which does not contain any encryption syntax)\nService1Model.service1DataModel = model<IService1Document>('Service1Doc', Service1Schema, 'Service1Docs');\n\n// Finally, connecting to the DB with a schema map generated for this service, specifically\nmongoose.connect(url, {\n ...opts,\n autoEncryption: opts?.autoEncryption\n ? {\n ...opts?.autoEncryption,\n schemaMap: getSchemaMap(importedSchemaJson),\n }\n : undefined,\n } as ConnectOptions);\n{\n \"MyCollection1\": {\n \"properties\": {\n \"myDataString\": {\n \"encrypt\": {\n \"bsonType\": \"string\"\n }\n },\n \"myDataArray\": {\n \"encrypt\": {\n \"bsonType\": \"array\"\n }\n },\n \"myDataObject\": {\n \"bsonType\": \"object\",\n \"properties\": {\n \"myNestedProperty1\": {\n \"encrypt\": {\n \"bsonType\": \"string\"\n }\n },\n \"myNestedProperty2\": {\n \"bsonType\": \"string\"\n }\n }\n }\n }\n }\n}\n", "text": "In my Node.JS local development environment, using Compass, connected to a CSFLE-enabled MongoDB Enterprise client, I can see all the fields that are supposed to be encrypted by my JSON schema as clear text.", "username": "Angus_Ryer" }, { "code": "", "text": "@wan I heard you’re the expert around here ", "username": "Angus_Ryer" } ]
Node.JS CSFLE-enabled Enterprise database is not encrypting data (clear text visible in Compass)
2023-11-09T17:37:52.287Z
Node.JS CSFLE-enabled Enterprise database is not encrypting data (clear text visible in Compass)
89
null
[ "aggregation", "indexes" ]
[ { "code": "\"aggregate\" (\"pipeline\": [{\"$match\": {\"$or\": [{\"Review\": {\"$exists\": false}},{\"Review\": null}]}},{ \"$group\": {\"_id\": {},\"COUNT\": {\"$count\": {}}}}", "text": "Hi, I have a collection with an index on { Review: 1}, and am trying to count the documents without a Review property or where the Review property is null.Query is: \"aggregate\" (\"pipeline\": [{\"$match\": {\"$or\": [{\"Review\": {\"$exists\": false}},{\"Review\": null}]}},{ \"$group\": {\"_id\": {},\"COUNT\": {\"$count\": {}}}}In the execution plan, it seems like the index is scanned, not seeked. Is there a way to get a seek out of this?", "username": "Bruno_Denuit-Wojcik" }, { "code": "", "text": "In the execution plan, it seems like the index is scanned, not seeked.Please provide the plan. What do you mean by scanned vs seeked?", "username": "steevej" } ]
Index is not used on OR(NOT Exists(col), col is NULL)
2023-11-09T15:45:39.469Z
Index is not used on OR(NOT Exists(col), col is NULL)
83
null
[ "queries" ]
[ { "code": "", "text": "Howdy folks,I’m currently using app service auth for my app, which was fine, but thankfully has no gained a bit of traction and I have almost 3k users on it. The issue I’m facing now is that a new user is unable to sign in and i’m unable to see their account to see if they are verified because they’re down near the end of the list.Wondering if we could add a query by user email to the app services app users list which would allow me to find this user and help them get into their account.Thanks in advance\nEvan\ngymbuddy.ai", "username": "gymbuddy_ai" }, { "code": "LOAD MORE", "text": "I second this as a feature request.I end up Command + F searching for the email after tapping LOAD MORE a bunch of times.It’s super frustrating when I have to do it.", "username": "Kurt_Libby1" } ]
Adding search by user email functionality in app services app users
2023-06-23T09:26:11.937Z
Adding search by user email functionality in app services app users
331
null
[ "dot-net" ]
[ { "code": "", "text": "Hello,I realize the Mongo EF Core provider preview was recently released. Does anyone know if EF Core Migrations are supported?", "username": "Herb_Ramos" }, { "code": "", "text": "Hi, @Herb_Ramos,Thank you for your interest in the first public preview of our EF Core Provider. We do not currently support migrations, but are in discussions both internally and with Microsoft about how to support the notion of migrations in the context of document databases. You can find a list of our supported features along with the high-level roadmap on the GitHub project’s README.md.Sincerely,\nJames", "username": "James_Kovacs" }, { "code": "", "text": "Thanks for the quick reply.", "username": "Herb_Ramos" } ]
EF Core Migrations
2023-11-08T22:09:22.029Z
EF Core Migrations
109
null
[]
[ { "code": "", "text": "Hi,Using flexible sync- isn’t that a little weird that users have theoretical access to the entire database- unless filtered by a query on their device? I mean, it’s got advantages I guess for sharing data, but it sounds a bit hack-prone doesn’t it?\nI mean, the entire database is accessible to the client side app, which define rules/queries for accessing the user-specific data. Am I missing something/?", "username": "donuts542" }, { "code": "", "text": "Hi, permissions are defined on the server to define the access rules for the system (and any particular user). Please see here for more details: https://www.mongodb.com/docs/atlas/app-services/sync/app-builder/device-sync-permissions-guide/The view of data being synced down is a function of (a) the user’s permissions and (b) the users’s subscriptionsIf you have any other questions, please let me know.Best,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "Adding to the page Tyler mentioned, we also have a page in some of the SDK docs that goes into more details about how the combination of permissions and the Flexible Sync query determine what data can sync: https://www.mongodb.com/docs/realm/sdk/swift/sync/write-to-synced-realm/#determining-what-data-syncsThis page also has an example of what happens if you try to write data that doesn’t match the server-side permissions in App Services.This page hasn’t made it to all of the SDKs yet, so apologies if you haven’t seen it in an SDK you’re working with.", "username": "Dachary_Carey" } ]
Privacy of flexible sync
2023-11-09T03:13:41.456Z
Privacy of flexible sync
105
null
[ "flexible-sync" ]
[ { "code": "_partitionKeyuser=user.data.externalUserIduser=fbdc4c82-3c18-4bee-9064-445b75f93cfeuserMatadata=user.data.externalUserIduserMetadata=fbdc4c82-3c18-4bee-9064-445b75f93cfecanReadPartitioncanWritePartitionexports = function (partition) {\n console.log(`Checking if can sync a write for partition = ${partition}`);\n\n const user = context.user;\n\n let partitionKey = \"\";\n\n const splitPartition = partition.split(\"=\");\n let partitionValue;\n if (splitPartition.length == 2) {\n partitionKey = splitPartition[0];\n partitionValue = splitPartition[1];\n console.log(`Partition key = ${partitionKey}; partition value = ${partitionValue}`);\n } else {\n console.log(`Couldn't extract the partition key/value from ${partition}`);\n return false;\n }\n\n switch (partitionKey) {\n case \"user\":\n case \"userMetadata\":\n console.log(`Checking if partitionValue(${partitionValue}) matches user.id(${user.data.externalUserId}) – ${partitionKey === user.data.externalUserId}`);\n return partitionValue === user.data.externalUserId;\n default:\n console.log(`Unexpected partition key: ${partitionKey}`);\n return false;\n }\n};\nReceived: ERROR \"Permission denied (BIND, IDENT, QUERY, REFRESH)\" (error_code=206, is_fatal=false, error_action=ApplicationBug)\npartition_owner{\n \"roles\": [\n {\n \"name\": \"partition_owner\",\n \"apply_when\": {\n \"$or\": [\n {\n \"_partitionKey\": \"user=%%user.data.externalUserId\"\n },\n {\n \"_partitionKey\": \"userMetadata=%%user.data.externalUserId\"\n }\n ]\n },\n \"document_filters\": {\n \"write\": {\n \"$or\": [\n {\n \"_partitionKey\": \"user=%%user.data.externalUserId\"\n },\n {\n \"_partitionKey\": \"userMetadata=%%user.data.externalUserId\"\n }\n ]\n },\n \"read\": {\n \"$or\": [\n {\n \"_partitionKey\": \"user=%%user.data.externalUserId\"\n },\n {\n \"_partitionKey\": \"userMetadata=%%user.data.externalUserId\"\n }\n ]\n }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n }\n ]\n}\n", "text": "We are currently using partition-based sync. We are testing the migration to flexible-sync and experiencing some issues while migrating the permission logic.In the partition-based model, we use “key=value” style _partitionKey. We have 2 basic partitionsThe access rules to both of these partitions are pretty trivial - the user can read and write both of those partitions when he is the owner. And that is governed by canReadPartition and canWritePartition functions that have the exact same body.Now, when migrating to flexible sync I need to define roles that would mimic this behavior. And that’s what I can’t figure out as after migration I get the following errorThe latest version of my role which I’ve named partition_owner looks like this.What am I doing wrong here?", "username": "Gagik_Kyurkchyan" }, { "code": "\"user=%%user.data.externalUserId\"user=%%user.data.externalUserId\"read\": {\n \"$or\": [\n {\n \"user\": \"%%user.data.externalUserId\"\n },\n {\n \"userMetadata\": \"%%user.data.externalUserId\"\n }\n ]\n}\n", "text": "Hi, I suspect what is going wrong here is that \"user=%%user.data.externalUserId\" is being interpreted as a string value and the expansion is not actually being run on the user object, so it is looking for _partitionKey to equal the value user=%%user.data.externalUserId.As a first question, it seems like the partition concept did not quite fit what you wanted, so you had to add a key/value storage into it. Are user and userMetadata fields on the documents themselves? If so, I suspect the ideal permissions for you would be to just define permission like this:Additionally, I believe you should remove the apply_when in the statement. For device sync, this field cannot reference fields in a document as it is applied at session start (not on each individual document). See here: https://www.mongodb.com/docs/atlas/app-services/rules/roles/#how-app-services-assigns-rolesBest,\nTyler", "username": "Tyler_Kaye" }, { "code": "_partitionKeyuseruserMetadata\"user=%%user.data.externalUserId\"OwnerIdOwnerId", "text": "Hey TylerAll of the entities that I have the _partitionKey values in the format of “user=EXTERNAL_USER_ID” or “userMetadata=EXTERNAL_USER_ID”. I do not have a user or a userMetadata field.Can we something validate that the \"user=%%user.data.externalUserId\" expression is not being string-interpolated and being used as is?If that’s the case, will I have to create a new field called “OwnerId” for instance, andIf you think that’s the most straightforward approach, I’d go for it.", "username": "Gagik_Kyurkchyan" }, { "code": "OwnerId_partitionKey_partitionKey%%{\n \"name\": \"role\",\n \"apply_when\": {},\n \"document_filters\": {\n \"read\": { \"OwnerId\": \"%%user.data.externalUserId\" },\n \"write\": { \"OwnerId\": \"%%user.data.externalUserId\" }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n}\n", "text": "Hi @Gagik_Kyurkchyan,I imagine creating that new OwnerId field as you described above would be your best bet, assuming you want to avoid breaking changes / modifying the existing data for _partitionKey.The App Services Rules system isn’t really designed for handling the format of the values for _partitionKey unfortunately (it looks for the expansion identifier %% at the beginning of the string in particular). Note that it isn’t possible to call a function here in the same manner as before because the function operator is evaluated at session start – before any documents have been observed.I imagine the role you’ll want to define will look something like this in JSON:Let me know if that works,\nJonathan", "username": "Jonathan_Lee" }, { "code": "{\n \"roles\": [\n {\n \"name\": \"owner\",\n \"apply_when\": {},\n \"document_filters\": {\n \"write\": {\n \"CreatedBy\": \"%%user.data.externalUserId\"\n },\n \"read\": {\n \"CreatedBy\": \"%%user.data.externalUserId\"\n }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n }\n ]\n}\nCreatedByCreatedByending session with error: integrating changesets failed: operation was cancelled after retrying 12 times (ProtocolErrorCode=201)\n", "text": "Hey @Jonathan_LeeThanks for getting back.I was trying several things before replying here. Here’s my journeyFirst thing I tried is the strategy I mentionedOnce I did this, I decided to launch the app from the master branch without changing any code as that’s what I am trying to achieve - seamless activation of flexible sync. Unfortunately, that didn’t go well. I would receive invalid permissions errors.I decided to add the CreatedBy field to the client code and ensure its value is set correctly. Once I did that, I was finally able to sync the data to the device. This means, we won’t be able to migrate to flexible sync unless we release a new version of the app that has CreatedBy field and everybody needs to upgrade to that latest version.However, there are more issues that I am experiencing. When I try to create entities like before, they won’t sync. Sync times out and I see the following error in my App Service logsSo, apparently, there are more deeper issues we will have to address.My question is the following, are there any real-world scenarios where partition-based sync was seamlessly migrated to flexible sync?\nPerhaps, a better option would simply be to deprecate the old app and start a new one that has flexible-sync built in? And suggest our customers to switch?", "username": "Gagik_Kyurkchyan" }, { "code": "{\n \"name\": \"role\",\n \"apply_when\": {},\n \"document_filters\": {\n \"read\": { \"_partitionKey\": { \"$regex\": \"%%user.data.externalUserId\" }, \"$or\": [ { \"_partitionKey\": { \"$regex\": \"^user=<externalUserId-regex>$\" } }, { \"_partitionKey\": { \"$regex\": \"^userMetadata=<externalUserId-regex>$\" } } ] }\n \"write\": { \"_partitionKey\": { \"$regex\": \"%%user.data.externalUserId\" }, \"$or\": [ { \"_partitionKey\": { \"$regex\": \"^user=<externalUserId-regex>$\" } }, { \"_partitionKey\": { \"$regex\": \"^userMetadata=<externalUserId-regex>$\" } } ] }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n}\n\"_partitionKey\": { \"$regex\": \"%%user.data.externalUserId\" }\"%%user.data.externalUserId\"\"_partitionKey\"\"$or\": [ { \"_partitionKey\": { \"$regex\": \"^user=<externalUserId-regex>$\" } }, { \"_partitionKey\": { \"$regex\": \"^userMetadata=<externalUserId-regex>$\" }<externalUserId-regex>user.data.externalUserId", "text": "If updating the schema & deploying a new version of the app is unacceptable, then you could consider using the $regex operator on the “_partitionKey” field like this:It’s a bit ugly, but breaking down the rule expression here for the document read/write filters:Using this above configuration should hopefully tightly match the current partition-based sync permissions in a way that is compatible with flexible sync. Another thing worth mentioning here is that there is obviously some amount of performance hit with using the “$regex” operator now, and long term it may be better to continue to try and use a new field for permissions (it will definitely be faster).Let me know what you think,\nJonathan", "username": "Jonathan_Lee" } ]
Permission issues migrating partition-based sync to flexible sync
2023-11-02T11:36:34.002Z
Permission issues migrating partition-based sync to flexible sync
230
null
[ "queries" ]
[ { "code": "$geoIntersectsdb.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })null", "text": "How to make $geoIntersects work for the “legacy coordinates pair”?it is simple to find the user’s current neighborhood with $geoIntersects.Trying to make/adapt the following find by $geoIntersects work with the “legacy coordinates pair” –\ndb.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })but I always get null results, even I surely know there is a match.Suppose the user is located at -73.93414657 longitude and 40.82302903 latitude. To find the current neighborhood, you will specify a point using the special $geometry field in GeoJSON format.I guess, on top of the above requirement, the field used for the $geoIntersects lookup must be in GeoJSON format as well, (unlike $geoWithin or $nearSphere), right?So, if my collection uses the legacy coordinates pair, like the sample_restaurants from sample db, is there any way for me to use the $geometry query please?", "username": "MBee" }, { "code": "db.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })sample_restaurants.neighborhoods{\n _id: ObjectId(\"55cb9c666c522cafdb053a68\"),\n geometry: {\n coordinates: [\n [\n [\n -73.93383000695911,\n 40.81949109558767\n ],\n [\n -73.93411701695138,\n 40.81955053491088\n ],\n ... 248 more items\n ]\n ],\n type: 'Polygon'\n },\n name: 'Central Harlem North-Polo Grounds'\n}\n", "text": "Hey @MBee,db.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })\nbut I always get null results, even I surely know there is a match.I tried the same query against the sample_restaurants.neighborhoods collection. It worked as expected and returned the following document as output:Please double-check that you are running the query against the correct collection, and ensure that your collection contains the correct set of data for accurate results.Best regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "db.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })", "text": "db.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })Ah, you’re right. I made a tiny mistake when trying the above.\nThanks for the confirmation.", "username": "MBee" } ]
How to make $geoIntersects work for the “legacy coordinates pair”
2023-11-08T22:05:35.732Z
How to make $geoIntersects work for the “legacy coordinates pair”
96
null
[ "objective-c" ]
[ { "code": "RLMRealmConfiguration *configuration = [self.realmApp.currentUser flexibleSyncConfigurationWithInitialSubscriptions:^(RLMSyncSubscriptionSet * _Nonnull subscriptions) {[subscriptions addSubscriptionWithClassName:@\"DealGeneratorSet\" where:@\"owner_id = %@\", self.realmApp.currentUser.identifier];} rerunOnOpen:true];\nTerminating app due to uncaught exception 'NSInvalidArgumentException', reason: 'Unable to parse the format string \"owner_id = 650781f373e92d4da4f51073\"'\nterminating with uncaught exception of type NSException\n\n", "text": "I am having trouble setting up a Realm Subscription using the ObjectId field in the where predicate. I tried all sorts of string combinations, and the error persisted. All my objects have owner_id of type ObjectID, and the user should sync only its objects.\nHere is my code:The error is always along those lines:", "username": "Milen_Milkovski" }, { "code": "[subscriptions addSubscriptionWithClassName:@\"DealGeneratorSet\" where:@\"owner_id == %@\", [[RLMObjectId alloc] initWithString:self.realmApp.currentUser.identifier error:nil]];\n", "text": "It looks like after the update to the latest SDK 10.44, this code works as expected:", "username": "Milen_Milkovski" } ]
Objective-C Subscription predicate on ObjectId field
2023-11-08T22:07:15.696Z
Objective-C Subscription predicate on ObjectId field
104
https://www.mongodb.com/…_2_1024x576.jpeg
[ "jakarta-mug" ]
[ { "code": "Senior Cloud Solution ConsultantTechnical LeadFull Stack EngineerData Warehouse Engineer", "text": "WhatsApp Image 2023-11-02 at 12.08.461600×900 109 KBWe are thrilled to announce our offline community event collaboration with Dkatalis in Jakarta. Are you ready to dive into the world of MongoDB and gain insights from experts in the field? Don’t miss our upcoming MongoDB Community Event where we’ll explore exciting topics.Please fill this form first Pre-Registration Form To RSVP - Please click on the “ ✓ RSVP ” link at the top of this event page if you plan to attend. The link should change to a green button if you are RSVPed. You need to be signed in to access the buttonEvent Type: In-Person\nLocation: Dkatalis Jakarta Office, Menara BTPN 43rd Floor. Jl. Dr. IDe Anak Agung Gde Agung Kav. 5.5-5,6 CBD Mega Kuningan - SetiabudiWhatsApp Image 2023-10-24 at 14.09.06800×800 61 KBSenior Cloud Solution Consultant at Searce IncWhatsApp Image 2023-10-27 at 11.24.29-removebg-preview408×612 30.2 KBTechnical Lead at DKatalisWhatsApp Image 2023-10-29 at 23.16.35-removebg-preview458×544 49 KBFull Stack Engineer at DKatalis1667705812109600×600 145 KBData Warehouse Engineer at Bank BTPNCollaboration with DKatalisLogo DK Vertical royal purple (1)3150×3150 118 KB", "username": "Fajar_Abdul_Karim" }, { "code": "", "text": "Interesting topics! Unfortunately, I won’t be able to come in person. ", "username": "berviantoleo" }, { "code": "", "text": "sayang banget bro, if you have any feedback please dm me", "username": "Fajar_Abdul_Karim" } ]
Jakarta MUG x DKatalis : Leveraging MongoDB in Modern Enterprises
2023-11-02T04:43:27.564Z
Jakarta MUG x DKatalis : Leveraging MongoDB in Modern Enterprises
494
https://www.mongodb.com/…b_2_1024x252.png
[ "database-tools" ]
[ { "code": "", "text": "MongoParseError: Invalid scheme, expected connection string to start with “mongodb://” or “mongodb+srv://”\nimage1112×274 12.2 KB", "username": "Aum_Shukla" }, { "code": "", "text": "Show your connect string from the env file\nThere could be some syntax error", "username": "Ramachandra_Tummala" }, { "code": "", "text": "I tried all the syntax issues regarding the env file and double quotations the database name mentioned all but still this issue i am fixing it since 1 day but guide me connect with me so that i can resolve this issue.", "username": "Aum_Shukla" }, { "code": "", "text": "Screenshot (1)3286×1080 377 KB", "username": "Aum_Shukla" }, { "code": "", "text": "Remove semicolon( at the end of your connect string and try again", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Also your password has special character @Aum_Shukla You need to escaped or URL encoded or change it to a simple one and try", "username": "Ramachandra_Tummala" } ]
Issue related mongodb parse error
2023-11-08T11:10:37.554Z
Issue related mongodb parse error
110
null
[]
[ { "code": "", "text": "Hello,Since a few days the conditionnal formating in table graph is not working anymore when based on comparison operator based on groups fields (example #toto > 0, background of text in red).Is there a bug or did the functionnality change ?Thank you\nSylvain", "username": "Sylvain_Gelfi" }, { "code": "", "text": "Hi Sylvain, sorry to hear for your issue.We have not changed the functionality for the conditional formatting.\nIt might be a bug but I can’t reproduce at the moment, so I will need a bit more details from you in order to investigate.Did you had the conditional formatting on an existing table chart that used to work before but suddenly stopped working last week or were you trying on a new chart? Could you please share a screenshot that shows the table, the rules and the encoding channels, so we can see the types?Like so:\ntable-conditional-formatting-rules1105×1098 74.6 KBtable-conditional-formatting-channels1103×1090 94 KB", "username": "Kristina_Stefanova" }, { "code": "", "text": "Hello, it was working before on existing tables and stopped working last week.\nAlso when I create new tables it is not working.\nTo be more precise it is NOT working for “VALUES” (like count(Countries) in your example)image1488×790 75 KB", "username": "Sylvain_Gelfi" }, { "code": "", "text": "But it is working with dynamic columns\nimage1107×577 21.2 KB", "username": "Sylvain_Gelfi" }, { "code": "", "text": "Encoding channels\nimage1453×793 82.8 KB", "username": "Sylvain_Gelfi" }, { "code": "", "text": "Thank you for the screenshots Sylvain, they are very helpful.It looks like the conditional formatting rules don’t work on decimal fields anymore.The team is working on a fix, but in the meantime a workaround is to convert the field to another numeric format. You can do this in the Chart builder by clicking on the ellipsis menu (…) of the field in the fields panel, and select “Convert Type” and then “Number”. This will remove the conditional formatting rules for this field, so you would need to add the rule again. Apologies for any inconvenience caused.Thank you for reporting the bug.\nWe will update this thread once the fix is rolled out.", "username": "Kristina_Stefanova" }, { "code": "", "text": "Hi Sylvain, we have released the patch that fixed this bug. Sorry for the inconvenience it has caused.", "username": "James_Wang1" }, { "code": "", "text": "Hello, YES! Everything is back to normal!\nThank you very much for the very quick fix Have a good day.", "username": "Sylvain_Gelfi" } ]
Conditionnal Formating not working anymore dfor groups (in Table)
2023-11-03T15:54:22.184Z
Conditionnal Formating not working anymore dfor groups (in Table)
217
null
[ "node-js" ]
[ { "code": "[\n\t\"Hello World\",\n\t\"hello world\",\n\t\"HELLO WORLD\",\n\t\"héllö wörld\"\n]\nthis.collection.findOne({ names: name })\n", "text": "Hi. I’m looking for Mongo functionality to search documents based on a string that will match any variation of that string ignoring capitalization and diacritics. I am using the findOne Mongo command.JavaScript has this functionality with localeCompare: String.prototype.localeCompare() - JavaScript | MDN.For example I might search on “Hello World”, and the documents it should match might contain:To specify my use-case: I have documents with an array of names (1 or more) in each of them. Sometimes the name is written a bit differently, e.g. with an accent on one of the letters or slightly different capitalization. I will not use this functionality for long texts.I am using the Node.js Mongo client.", "username": "Carsten" }, { "code": "$texttext$text$text{\n $text: {\n $search: <string>,\n $language: <string>,\n $caseSensitive: <boolean>,\n $diacriticSensitive: <boolean>\n }\n}\néêe$text", "text": "Hi @Carsten, I think the features you’re looking for can be found within the $text operator. You would need to create a text index on the field you’re attempting to query through.According to the documentation here, you can use the $text operator on the following environments:MongoDB Atlas: The fully managed service for MongoDB deployments in the cloudMongoDB Enterprise: The subscription-based, self-managed version of MongoDBMongoDB Community: The source-available, free-to-use, and self-managed version of MongoDBThe $text operator uses the following syntax:It allows passing arguments for both your ignoring case sensitivity and diacritics.You can find documentation on the diacritic insensitivity option here.\nA caveat:Once the text index is created (at least version 3) on the field, using the $text operator will be diacritic and case insensitive unless otherwise specified.", "username": "Jacob_Latonis" }, { "code": "return await this.collection.find(query, { collation, projection }).toArray();findOne", "text": "For anyone reading this: I ended up using the collation function. I’m using the Node MongoDB client so I only needed to add a small part to my query:return await this.collection.find(query, { collation, projection }).toArray();Note I can no longer use findOne.", "username": "Carsten" } ]
Ignore capitalization and diacritics (e.g. localeCompare in JS)
2023-10-18T13:55:54.308Z
Ignore capitalization and diacritics (e.g. localeCompare in JS)
208
null
[ "dot-net" ]
[ { "code": "", "text": "I am really interested in using MongoDB Analyzer as it is indeed very hard to figure out how to use C# driver.\nSo I installed NugetPackage in both VS2019 ans 2022 Enterprise.\nAnalyser is listed in references but it does not work. There simply aren’t any three grey dots under any expression.\nSomebody got this working? Am I missing something here?", "username": "Kay_Zander" }, { "code": "", "text": "just gladly found that it worked!Write the query code in a form like ‘collection.AsQueryable().Where(l=>l.Name.Contains(“bbb”))’ works. The key is “AsQueryable().Where”", "username": "mx_fan" } ]
MongoDB Analyzer is not working
2022-08-09T12:45:42.824Z
MongoDB Analyzer is not working
1,575
null
[ "python" ]
[ { "code": "", "text": "I have a free, shared tier of a MongoDB Atlas version 6.0.11 cluster an SSL is enabled, so how do I get the SSL cert required to connect to it? the simple connection string is not enough", "username": "Brennan_Ow" }, { "code": "", "text": "Hi @Brennan_Ow,What is the context regarding this question? I’d recommend going over the FAQ: Security documentation for Atlas.Can you also verify what you mean by “the simple connection string is not enough”?Regards,\nJason", "username": "Jason_Tran" }, { "code": "uri = \"mongodb+srv://<username>:<password>@cluster0.w029iod.mongodb.net/?retryWrites=true&w=majority\"\nclient = MongoClient(uri, server_api=ServerApi('1'))\n", "text": "Context: To connect to my free and shared tier cluster, I’m using the python codeand sometimes it works, but sometimes I get the error stating that the SSL handshake failed.I even tried using the X.509 method with and the SSL handshake failure error message still happens 50% of the timeclient = MongoClient(uri,\ntls=True,\ntlsCertificateKeyFile=‘path/to/file.pem’,\nserver_api=ServerApi(‘1’))", "username": "Brennan_Ow" }, { "code": "", "text": "What’s the pymongo driver version you’re using and what’s the full error message being generated?Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "Thank you for continuing to follow up with me, but I have found a fix for it already. It turns out the ISP that I was using had some firewall configuration that did not allow me to connect to the cluster. So I outsourced the job of connecting to the cluster to a GitHub action where it’s ISP had no such issues.I came to the conclusion after reading this → Error: couldn't connect to server 127.0.0.1:27017 - #16 by Stennie_Xspecifically, this partAs for my pymongo version, it is 4.6.0The error message was as followsSSL handshake failed: <shard_id>.mongodb.net:27017: [WinError 10054] An existing connection was forcibly closed by the remote host (configured timeouts: socketTimeoutMS: 20000.0ms, connectTimeoutMS: 20000.0ms)Thank you for your time", "username": "Brennan_Ow" } ]
SSL cert for free tier
2023-11-06T09:03:53.660Z
SSL cert for free tier
182
null
[]
[ { "code": " @Prop({\n type: String,\n required: true,\n default: new Date().toISOString(),\n })\n dateAdded: string;\n @Prop({\n type: Date,\n default: new Date(),\n })\n dateAdded: Date;\n {\n \"_id\": {\n \"$oid\": \"654bfb0fd1c75afd0193f1d6\"\n },\n \"dateAdded\": {\n \"$date\": \"2023-11-07T21:31:30.373Z\"\n },\n \"__v\": 0\n },\n {\n \"_id\": {\n \"$oid\": \"654bfb1246e0213aa264584a\"\n },\n \"dateAdded\": \"2023-11-08T21:17:13.093Z\",\n \"__v\": 0\n },\n", "text": "For context, my document’s latest schema looks like this:the legacy schema was:Now when I try to create the document in a single call, with a single payload for document body (which I checked with debuggers, for the presence of any async call being made or multiple calls being made. Which is not). I see that it is creating 2 different documents likeThe point to be noted is though they are created at the same time(verified by testing), their dateAdded field is different.", "username": "Prasanjit_Dutta" }, { "code": "", "text": "they are created at the same time(verified by testing)Really?It looks like there is 1 day difference between the 2 documents you shared. The one with the old schema is 2023-11-07 and the one with the new schema is 2023-11-08. It is far from being created at the same time. Since both schema call new Date(), I would surprised that there is a bug in new Date() that could generate 2 dates with 1 day difference if called at the same time.And what about __v:0 is both case? I suspect you are using some kind of abstraction layer and I suspect that something is missing and the new schema is not registered correctly.And why, why, why would you change a perfectly valid Date field into a ISO string? Do you really what to make your queries slower? Do you really want to make your data take more space? Date values stored as Date data type are smaller and faster than their ISO string variant.", "username": "steevej" } ]
Mongo create document call creating 2 different documents with a legacy schema version and another with new schema
2023-11-08T21:40:56.311Z
Mongo create document call creating 2 different documents with a legacy schema version and another with new schema
76
null
[ "swift" ]
[ { "code": "Terminating app due to uncaught exception 'RLMException', reason: 'Property 'boards' is of type 'RLMArray<Deal_boards>' which is not a supported RLMArray object type. RLMArray can only contain instances of RLMObject subclasses. See https://www.mongodb.com/docs/realm/sdk/swift/fundamentals/relationships/#to-many-relationship for more information.'\nterminating with uncaught exception of type NSException\n\n", "text": "I ran into trouble with my Schema and Objective-C. My App crashes with the following error. Any help will be appreciated. The class definitions were copied from the Schema editor on my Atlas account.", "username": "Milen_Milkovski" }, { "code": "", "text": "After playing around, the solution was to add implementation for each embedded class.Note to Atlas developers: please fix the schema code generator to add implementation for each of the embedded classes.", "username": "Milen_Milkovski" } ]
Objective-C RLMArray property error
2023-11-08T23:00:05.360Z
Objective-C RLMArray property error
77
null
[]
[ { "code": "", "text": "I’m also having the same issue as Install mongodb-org 5.0 on Amazon Linux 2 aarch64 architecture. how to resolve", "username": "Simeon_Palla" }, { "code": "/proc/cpuinfox86_64aarch64", "text": "Welcome to the MongoDB Community @Simeon_Palla!Please provide more details on the issue you are encountering:Aside from the typo in the original post, the repo format seems to be OK. I would follow the general tutorial to Install MongoDB Community Edition on Amazon Linux and replace x86_64 with aarch64.Thanks,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "I’m trying to install MongoDB on the AWS Linux ec2 server, for my node.js app backend. but when I’m trying to install MongoDB I’m getting these errors$ sudo yum install -y mongodb-org\nLoaded plugins: extras_suggestions, langpacks, priorities, update-motd\nNo package mongodb-org available.\nError: Nothing to do\nerror1918×636 43.2 KB\n", "username": "Simeon_Palla" }, { "code": "", "text": "already tried this Install MongoDB Community Edition on Amazon Linux but not working", "username": "Simeon_Palla" }, { "code": "/etc/yum.repos.d/mongodb-org-6.0.repoyum installyum repolist", "text": "Hi @Simeon_Palla,Did you create the /etc/yum.repos.d/mongodb-org-6.0.repo file before running yum install ?What is the output of yum repolist?Thanks,\nStennie", "username": "Stennie_X" }, { "code": "x86_64", "text": "Hi @Simeon_Palla ,Please also confirm the hardware architecture your EC2 instance is using (x86_64, Graviton, etc).Thanks,\nStennie", "username": "Stennie_X" }, { "code": "sudo yum install -y mongodb-org", "text": "Hello @Stennie_X ! so I am having the same issue hereI have created the file as requested and still sudo yum install -y mongodb-org returns “No package mongodb-org available”I have tried to install it on similar machine and it worked just finehardware architecture: aarch64", "username": "Ella_Mozes" }, { "code": "", "text": "I’m having the same issue with amazon linux:Checking the repolist it seems that MongoDB 6 has much less entries:amzn-updates/latest amzn-updates-Base 7,548\nmongodb-org-3.4 MongoDB Repository 150\nmongodb-org-3.6 MongoDB Repository 144\nmongodb-org-4.0 MongoDB Repository 170\nmongodb-org-4.2 MongoDB Repository 120\nmongodb-org-4.4 MongoDB Repository 196\nmongodb-org-5.0 MongoDB Repository 177\nmongodb-org-6.0 MongoDB Repository 39", "username": "Nicolas_Dickreuter" }, { "code": "", "text": "Hi Stennie,\nI am trying to install Mongodb 6.0 in Amazon linux (https://www.mongodb.com/docs/manual/tutorial/install-mongodb-on-amazon/). But getting error as:package mongodb-org-6.0.6-1.amzn2.x86_64 requires mongodb-org-database, but none of the providers can be installed[root@ip-172-31-37-199 ~]# yum repolist\nrepo id repo name\namazonlinux Amazon Linux 2023 repository\nkernel-livepatch Amazon Linux 2023 Kernel Livepatch repository\nmongodb-org-6.0 MongoDB Repository[root@ip-172-31-37-199 ~]# aws --version\naws-cli/2.9.19 Python/3.9.16 Linux/6.1.29-47.49.amzn2023.x86_64 source/x86_64.amzn.2023 prompt/offPlease help", "username": "Shivangi_Agarwal" }, { "code": "", "text": "Hi Shivangi,\nDid you get any solution for your issue ? I’m hvaing a similar issue.\nError:\nProblem: conflicting requests[root@ip--21-10- ~]# aws --version\naws-cli/2.9.19 Python/3.9.16 Linux/6.1.34-59.116.amzn2023.x86_64 source/x86_64.amzn.2023 prompt/off", "username": "vijay_shankar_Singh" }, { "code": "", "text": "Hello all! I was having this issue today following the Install MongoDB Community Edition on Amazon Linux tutorial.I switched from the Amazon Linux 2 tab over to the Amazon Linux 2022 tab,\nresulting in a different base url in the yum repo file. This seemed to do the trick! Install complete.", "username": "armslice_N_A" } ]
Problem installing MongoDB 6.0 on Amazon Linux 2
2022-10-06T08:01:53.956Z
Problem installing MongoDB 6.0 on Amazon Linux 2
5,791
null
[]
[ { "code": "", "text": "Hello,I am preparing for my DBA associate exam and there are a couple of questions I have regarding the syllabus and practice questions. I have already passed my developer exam.There is a question in the practice exam about an error in shard keys (more specifically, question 9). In the exam objective section of the exam study guide, the only learning objective about sharding is 1.4 Identify the function of sharding. Is this type of question valid for the exam? If it is, I’ll have to go a lot deeper in sharding.Ditto for question 10 on profiling, there are no learning objectives on profiling though it appeared in the learning path. I’d like to understand if I will need to go deeper in profiling for the exam too. The only similar objective is 3.1 on index performance, which is the same objective in the developer pathway, but there are no indications on the exam objective that profiling is included.(Not a question) I believe exam objective 2.17 is a duplicate of exam objective 2.1.More generally I’d like to understand if all exam questions will conform to the exam objectives in the DBA exam study guide. Your help is very much appreciated.", "username": "Marcus_Peck" }, { "code": "", "text": "@Marcus_Peck Hello and thanks for reaching out. In order to better assist you, please send this request over to certification@mongodb.com\nThank you!", "username": "Heather_Davis" } ]
DBA Practice Questions and Exam Objectives
2023-11-07T11:55:14.821Z
DBA Practice Questions and Exam Objectives
113
https://www.mongodb.com/…4_2_1024x512.png
[ "queries" ]
[ { "code": "", "text": "In the below link, I found that $near 2d legacy use radians as distance measure.{ $near: [ , ], $maxDistance: }However, when I tested queries in the mongo shell, I got confused because it seems like $near 2d legacy use degrees as distance measure.db.restaurants.find({ ‘address.coord’ :{ $near : [ -73.9, 40.7 ], $maxDistance : 2/111.1 }}) #degreesdb.restaurants.find({ ‘address.coord’ :{ $near : [ -73.9, 40.7 ], $maxDistance : 2/6378.1}} ) #radiansI checked that the first query with degrees gave correct answer, not the second one with radians.Does $near 2d legacy use degrees or radians as distance measure?Is there anyone who can solve my problem?Thanks.", "username": "Juhun_Kim" }, { "code": "", "text": "Anyone can confirm/double-check on this please?", "username": "MBee" } ]
$near 2d legacy distance measure
2020-03-22T09:17:12.758Z
$near 2d legacy distance measure
1,537
https://www.mongodb.com/…4_2_1024x512.png
[ "serverless", "api" ]
[ { "code": "", "text": "I want to enable/disable autoindexing for my Serverless Mongo instance using api call or terraform config.I cound not find nether of them in documentation.From internet site of my mongo cluster I went to inspect window and I got api call which is called from mongo internet site to update/get autoindexing value. This is the url.\nhttps://cloud.mongodb.com/performanceAdvisor/groups/groupid/serverless/serverlessid/autoIndexingIs there any official information about where I can get for api call or terraform resource that can change autoindexing settings?", "username": "Marija_R" }, { "code": "", "text": "Hi MarijaThanks for the question. The Auto-Indexing feature for Serverless Instances is currently in private preview and is rolling out to customers over the next few weeks. Therefore, it is possible that your instance has not been enabled for it yet. We will add a “private preview” tag in our documentation to denote this.In the first iteration, auto-indexing will be a UI only feature. I’d like to understand more about your use case and how you’d like to use it using terraform or an API call. Feel free to reply on this thread or direct message me.Best,\nAnurag Kadasne", "username": "Anurag_Kadasne" } ]
Disable autoIndexing on Mongo serverless instance
2023-11-08T11:43:06.761Z
Disable autoIndexing on Mongo serverless instance
94
null
[ "node-js", "mongoose-odm" ]
[ { "code": "// Code to require the parts needed for seedsindex to work correctly\nconst mongoose = require('mongoose');\nconst MusicProduct = require('../database_models/musicproduct');\nconst BookProduct = require('../database_models/bookproduct');\n\nconst musicAlbums = require('./musicseeds');\nconst bookNovels = require('./bookseeds');\n\n// Connnect to MongoDB\nmongoose.connect('mongodb://127.0.0.1/music-bookApp');\nmongoose.set('strictQuery', false);\n\n// Logic to check that the database is connected properly\nmongoose.connection.on('error', console.error.bind(console, 'connection error:'));\nmongoose.connection.once('open', () => {\n console.log('Database connected');\n});\n\n//Fill the Music products database with 20 random albums taken from the music seeds file\nconst musicSeedDB = async () => {\n await MusicProduct.deleteMany({});\n for (let i = 0; i < 20; i++) {\n const randomMusic20 = Math.floor(Math.random() * 20);\n //const musicStock = Math.floor(Math.random() * 10) + 1;\n const musicItem = new MusicProduct({\n artistName: musicAlbums[randomMusic20].artist,\n albumName: musicAlbums[randomMusic20].title,\n //musicStock\n })\n await musicItem.save();\n }\n};\n\n//Fill the Book products database with 20 random books taken from the music seeds file\nconst bookSeedDB = async () => {\n await BookProduct.deleteMany({});\n for (let i = 0; i < 20; i++) {\n const randomBook20 = Math.floor(Math.random() * 20);\n //const bookStock = Math.floor(Math.random() * 10) + 1;\n const bookItem = new BookProduct({\n bookAuthor: bookNovels[randomBook20].authors,\n bookName: bookNovels[randomBook20].title,\n //ookStock\n })\n await bookItem.save();\n }\n};\n\n// Close the connection to DB after finish seeding\nmusicSeedDB().then(() => {\n mongoose.connection.close();\n});\n\nbookSeedDB().then(() => {\n mongoose.connection.close();\n});\n", "text": "Hi All,I have recently started on a project at my University, and part of this project is including a seeds file to seed a DB with test information. Previously, this has worked fine but now I am getting the following error messages every time I run the seeds file in node.js:Database connected\nD:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\operations\\execute_operation.js:24\nthrow new error_1.MongoNotConnectedError(‘Client must be connected before running operations’);\n^MongoNotConnectedError: Client must be connected before running operations\nat executeOperationAsync (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\operations\\execute_operation.js:24:19)\nat D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\operations\\execute_operation.js:12:45\nat maybeCallback (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\utils.js:338:21)\nat executeOperation (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\operations\\execute_operation.js:12:38)\nat Collection.insertOne (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\collection.js:148:57)\nat NativeCollection. [as insertOne] (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongoose\\lib\\drivers\\node-mongodb-native\\collection.js:226:33)\nat Model.$__handleSave (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongoose\\lib\\model.js:309:33)\nat Model.$__save (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongoose\\lib\\model.js:388:8)\nat D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\kareem\\index.js:387:18\nat D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\kareem\\index.js:113:15 {\n[Symbol(errorLabels)]: Set(0) {}\n}Node.js v18.12.1For reference (if it helps), here is the seeds file I have created and run:To be fair, the seeds file still seems to run as the database does update with the seeded information, but I would much rather get to the bottom of the error so I can stop it appearing.Thank you for your help in advance ", "username": "gary_easton" }, { "code": "#!/usr/bin/env node\nimport { MongoClient } from 'mongodb';\nimport { spawn } from 'child_process';\nimport fs from 'fs';\n\nconst DB_URI = 'mongodb://0.0.0.0:27017';\nconst DB_NAME = 'DB name goes here';\nconst OUTPUT_DIR = 'directory output goes here';\nconst client = new MongoClient(DB_URI);\n\nasync function run() {\n try {\n await client.connect();\n const db = client.db(DB_NAME);\n const collections = await db.collections();\n\n if (!fs.existsSync(OUTPUT_DIR)) {\n fs.mkdirSync(OUTPUT_DIR);\n }\n\n collections.forEach(async (c) => {\n const name = c.collectionName;\n await spawn('mongoexport', [\n '--db',\n DB_NAME,\n '--collection',\n name,\n '--jsonArray',\n '--pretty',\n `--out=./${OUTPUT_DIR}/${name}.json`,\n ]);\n });\n } finally {\n await client.close();\n console.log(`DB Data for ${DB_NAME} has been written to ./${OUTPUT_DIR}/`);\n }\n}\nrun().catch(console.dir);\nconst mongoose = require('Mongoose');\nmongoose.connect(\"MongoDB://localhost:<PortNumberHereDoubleCheckPort>/<DatabaseName>\", {useNewUrlParser: true});\nconst <nameOfDbschemahere> = new mongoose.schema({\n name: String,\n rating: String,\n quantity: Number,\n someothervalue: String,\n somevalue2: String,\n});\n\nconst Fruit<Assuming as you call it FruitsDB> = mongoose.model(\"nameOfCollection\" , <nameOfSchemeHere>);\n\nconst fruit = new Fruit<Because FruitsDB calling documents Fruit for this>({\n name: \"Watermelon\",\n rating: 10,\n quantity: 50,\n someothervalue: \"Pirates love them\",\n somevalue2: \"They are big\",\n});\nfruit.save();\n", "text": "Take a look at these two example scripts, first is Node.JS, second is Mongoose.The points I want to drive home with the first, is how the connections to the DB are being established and verified before the rest of the operations. And comparatively to how similar connections work with Mongoose, as you can choose to use Mongoose for redundancy to ensure the client connection if you’d like.Mongoose:Mongoose Script", "username": "Brock" }, { "code": "", "text": "Could it be because you wroteawait client.close();", "username": "anont_mon" }, { "code": "", "text": "error === {message : “Client must be connected before running operations”}\ni am facing this type of error so many times i worked it but i couldn’t fix that bug", "username": "Madhesh_Siva" }, { "code": "", "text": "Yes, you are absolutely right…", "username": "Zahidul_Islam_Sagor" } ]
Getting this error - MongoNotConnectedError: Client must be connected before running operations
2023-03-28T19:55:40.589Z
Getting this error - MongoNotConnectedError: Client must be connected before running operations
8,763
null
[]
[ { "code": "", "text": "I accidentally deleted cluster0, please restore cluster0", "username": "Api_Sport" }, { "code": "", "text": "I am pretty sure that it cannot be done.You could create another one with the same name. Hopefully, you have a backup of your important data.", "username": "steevej" } ]
I accidentally deleted cluster0, please restore cluster0
2023-11-06T14:52:24.554Z
I accidentally deleted cluster0, please restore cluster0
125
null
[ "field-encryption" ]
[ { "code": "csebrew info libmongocrypt\n==> mongodb/brew/libmongocrypt: stable 1.8.2, HEAD\nC library for Client Side Encryption\nhttps://github.com/mongodb/libmongocrypt\n/opt/homebrew/Cellar/libmongocrypt/1.8.2 (44 files, 9.4MB) *\n Built from source on 2023-11-07 at 11:28:05\nFrom: https://github.com/mongodb/homebrew-brew/blob/HEAD/Formula/libmongocrypt.rb\nLicense: Apache-2.0\n==> Dependencies\nBuild: cmake ✔, mongo-c-driver ✔\n==> Options\n--HEAD\n\tInstall HEAD version\ngo build -ldflags \"-X 'main.version=f0205c2409aa47ce7ac836784f364468af074380'\" -tags cse -o /path/to/go/project/bin/apiserver cmd/apiserver/apiserver.go\n# command-line-arguments\n/usr/local/go/pkg/tool/darwin_arm64/link: running clang failed: exit status 1\nld: warning: search path '/opt/homebrew/Cellar/libmongocrypt/1.8.1/lib' not found\nld: library 'mongocrypt' not found\nclang: error: linker command failed with exit code 1 (use -v to see invocation)\nlibmongocrypt v1.11.7", "text": "I have followed the steps here to install libmongocrypt and added the cse tag to my go build.Build results:This was working for me before but I believe i installed libmongocrypt before the release on Sept 5. I was helping a team member get set up so I uninstalled to walk through the installation with them and now it’s failing for both of us.Our go-mongodriver version is v1.11.7Any suggestions?", "username": "Kevin_Rathgeber" }, { "code": "> pkg-config --debug --cflags --libs libmongocrypt\n\nError printing enabled by default due to use of output options besides --exists, --atleast/exact/max-version or --list-all. Value of --silence-errors: 0\nError printing enabled\nAdding virtual 'pkg-config' package to list of known packages\nLooking for package 'libmongocrypt'\nLooking for package 'libmongocrypt-uninstalled'\nReading 'libmongocrypt' from file '/opt/homebrew/lib/pkgconfig/libmongocrypt.pc'\nParsing package file '/opt/homebrew/lib/pkgconfig/libmongocrypt.pc'\n line>Name: mongocrypt\n line>Description: The libmongocrypt client-side field level encryption library.\n line>Version: 1.8.2\n line>Requires:\n line>Requires.private:\n line>prefix=/opt/homebrew/Cellar/libmongocrypt/1.8.2\n Variable declaration, 'prefix' has value '/opt/homebrew/Cellar/libmongocrypt/1.8.2'\n line>includedir=${prefix}/include/mongocrypt\n Variable declaration, 'includedir' has value '/opt/homebrew/Cellar/libmongocrypt/1.8.2/include/mongocrypt'\n line>libdir=${prefix}/lib\n Variable declaration, 'libdir' has value '/opt/homebrew/Cellar/libmongocrypt/1.8.2/lib'\n line>Libs: -L${libdir} -lmongocrypt\n line>Cflags: -I${includedir}\nPath position of 'libmongocrypt' is 1\nAdding 'libmongocrypt' to list of known packages\n post-recurse: libmongocrypt\nadding CFLAGS_OTHER string \"\"\n post-recurse: libmongocrypt\n original: libmongocrypt\n sorted: libmongocrypt\nadding CFLAGS_I string \"-I/opt/homebrew/Cellar/libmongocrypt/1.8.2/include/mongocrypt \"\n post-recurse: libmongocrypt\n original: libmongocrypt\n sorted: libmongocrypt\nadding LIBS_L string \"-L/opt/homebrew/Cellar/libmongocrypt/1.8.2/lib \"\n post-recurse: libmongocrypt\nadding LIBS_OTHER | LIBS_l string \"-lmongocrypt \"\nreturning flags string \"-I/opt/homebrew/Cellar/libmongocrypt/1.8.2/include/mongocrypt -L/opt/homebrew/Cellar/libmongocrypt/1.8.2/lib -lmongocrypt\"\n-I/opt/homebrew/Cellar/libmongocrypt/1.8.2/include/mongocrypt -L/opt/homebrew/Cellar/libmongocrypt/1.8.2/lib -lmongocrypt\n", "text": "Also here is the pkg-config debug for libmongocrypt:", "username": "Kevin_Rathgeber" }, { "code": "go clean -cache", "text": "This was resolved by running go clean -cache as described in this Github issue", "username": "Kevin_Rathgeber" } ]
Unable to build Go app with cse tag
2023-11-07T16:52:01.268Z
Unable to build Go app with cse tag
121
null
[ "aggregation", "queries", "dot-net" ]
[ { "code": "$all[\n {\n \"words\": [ \"Hello\", \"World\", \"!\"],\n \"wordArrays\": [\n [\"Hello\", \"World\", \"!\"],\n [\"The\", \"Sun\", \"Shines\"]\n ]\n }\n]\n{words: {$all: [\"Hello\", \"World\"]}}{words: {$all: [\"Hello\", \"I do not exist\"]}}{\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}}$elemMatch$elemMatchwordArrayswordArrays{wordArrays: {$elemMatch: {$all: [\"Hello\", \"World\"]}}}wordArrays", "text": "I am trying to find documents where any or all nested array elements must contain at least the values provided. This can normally be achieved easily with the $all operator. For example here:The query {words: {$all: [\"Hello\", \"World\"]}} correctly matches because both are contained and {words: {$all: [\"Hello\", \"I do not exist\"]}} does not match. So far so good.If you attempt the same for the nested arrays this stops working altogether. {\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}} does not match. I have found similar questions why on nested arrays all the operators start to fail but no one ever can explain why and they just use $elemMatch. But I do not know how I would translate my “all values must be in the array” to an $elemMatch. And then how I would sayI have prepared this little playground with the data in the hopes somebody knows how to achieve this.\nPlaygroundI would prefer if this could be solved all within the Find() stage, but if operations like this suddenly require the aggregation pipeline I am fine with that too.My approach for ANY match does not return any matches, and I do not understand why {wordArrays: {$elemMatch: {$all: [\"Hello\", \"World\"]}}}. I read it as wordArrays: does any element match: array contains “Hello” and “World”. And that should have matched the documentThank you!", "username": "Arkensor" }, { "code": "{\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}}Atlas atlas-cihc7e-shard-0 [primary] test> db.server.find()\n[\n {\n _id: ObjectId(\"652e5a208c360b950bb50eea\"),\n word: 'Hello',\n words: [ 'Hello', 'World', '!' ],\n wordArrays: [ [ 'Hello', 'World', '!' ], [ 'The', 'Sun', 'Shines' ] ]\n }\n]\n{wordArrays: {$elemMatch: {$all: [\"Hello\", \"World\"]}}}wordArrays$elematchAtlas atlas-cihc7e-shard-0 [primary] test> db.server.find({wordArrays: {$elemMatch: {$all: [[\"Hello\", \"World\", \"!\"]]}}})\n[\n {\n _id: ObjectId(\"652e5a208c360b950bb50eea\"),\n word: 'Hello',\n words: [ 'Hello', 'World', '!' ],\n wordArrays: [ [ 'Hello', 'World', '!' ], [ 'The', 'Sun', 'Shines' ] ]\n }\n]\nwordArrayswordArrays", "text": "Hi @Arkensor and welcome to MongoDB community forums!!I apologise for getting back to you so late.If you attempt the same for the nested arrays this stops working altogether. {\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}} does not match.The find query would not work in the above mentioned format. However, if your sample document looks like:the query:Atlas atlas-cihc7e-shard-0 [primary] test> db.server.find( {“wordArrays”: { “$all”: [[“Hello”,“World”,“!”]] } })\n[\n{\n_id: ObjectId(“652e5a208c360b950bb50eea”),\nword: ‘Hello’,\nwords: [ ‘Hello’, ‘World’, ‘!’ ],\nwordArrays: [ [ ‘Hello’, ‘World’, ‘!’ ], [ ‘The’, ‘Sun’, ‘Shines’ ] ]\n}\n]\nwould give the result as the ‘[“Hello”,“World”,“!”]’ matches the complete element at index 0.My approach for ANY match does not return any matches, and I do not understand why {wordArrays: {$elemMatch: {$all: [\"Hello\", \"World\"]}}}. I read it as wordArrays: does any element match: array contains “Hello” and “World”. And that should have matched the documentIf you wish to use $elematch to match for a specific element in the array, you can use the query asIf you wish to use aggregations, you can make use of $arrayElemAt to project element at a specific index value.Also, could you clarify with sample documents and expected response which explains the below statements.Please feel free to reach out in case of any further questions.Regards\nAasawari", "username": "Aasawari" }, { "code": "{\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}}\ndb.collection.find({ words: {\"$all\": [\"Hello\",\"World\"]} })\n[{\"wordArrays\": [[ \"Hello\",\"World\",\"!\"],[\"The\",\"Sun\",\"Shines\"]]}]\n", "text": "Hello @Aasawari,thank you for getting back to me on this. To clarify a few things:The first example was intentionally about checking the 0th item. I expect this code to work as it reads as follows: On the document go into “wordArrays” property. It is an array and the 0th item shall be accessed. On data data present on the 0th item it shall be checked that: ALL the values [“Hello”, “World”] are present on the data. If so return true true. False otherwise.The reason why I say it should work like this is because on the example playground, I linked it works if I have a non-nested array.So my expectation as user is that if I manually navigate the field accessor for the nested array to the 0th item, the data he inspects is the same as if I ask the query to do it on a field that only has this one array as data.\nI would consider this a bug / lack of feature support. Which is why I opened https://jira.mongodb.org/browse/SERVER-77974the query: db.server.find( {“wordArrays”: { “$all”: [[“Hello”,“World”,“!”]] } }) …\nwould give the result as the ‘[“Hello”,“World”,“!”]’ matches the complete element at index 0.That is true but not what I was asking about here. That is an EXACT match on ANY of the items to find find the index. My first example was about already having the index and wanting to validate if I find the required data there or not.As for the second part of my question, this was about achieving my overall goal. The issue raised over “array.INDEX” query not working as expected was just a finding I had while trying to get there.My goal is exactly what I describe there. I have multiple documents. They each contain the “wordArrays” field which holds multiple arrays. I now want to check what I wrote on all the documents.So for the 1. one given the data below, I expect the query I am looking for to match since there was one nested array that contained both “Hello” and “World”. On the same data if I looked for “Not” and “Exists” it should not match because none of the arrays contained both these strings.For the 2. query I want to not find just one matching nested array, but I want to validate that ALL the nested arrays must contain both the values. Given the same data example as used above this should fail, because “Hello” and “World” are present in the 0th array, but the 1st contains neither of the words.Writing these queries is easy if you just inspect one array that is not nested. I however have data that I can not and do not want to change in any way and want to query the nested arrays to do these ANY and ALL operations on them. I am looking for those two examples because from those two working queries, I should be able to derive the other variants of any/all nested arrays inspected for any/all item matches.", "username": "Arkensor" }, { "code": "$alldb.collection.find({\n \"wordArrays.0\":{\n \"$all\":[\n \"Hello\",\n \"World\"\n ]\n }\n})\ndb.collection.find({\n \"$and\":[\n {\n \"wordArrays.0\":\"Hello\"\n },\n {\n \"wordArrays.0\":\"World\"\n }\n ]\n})\n\"wordArray\"[ \"Hello\",\"World\",\"!\"]\"Hello\"\"World\"\"Hello\"\"World\"Atlas atlas-cihc7e-shard-0 [primary] test> db.testall.find( {$or: [{'wordsArray.0': ['hello', 'world']}, {'wordsArray.1': ['hello', 'world']}, {'wordsArray.2': ['hello', 'world']} ] })\n[\n {\n _id: ObjectId(\"654b212957b6dc078e3deea5\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [\n [ 'the', 'sum', 'flower' ],\n [ 'not', 'exists' ],\n [ 'hello', 'world' ]\n ]\n }\n]\nAtlas atlas-cihc7e-shard-0 [primary] test> db.testall.find()\n[\n {\n _id: ObjectId(\"654b206d57b6dc078e3deea4\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [ [ 'hello', 'world', '1' ], [ 'the', 'sum', 'flower' ] ]\n },\n {\n _id: ObjectId(\"654b212957b6dc078e3deea5\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [\n [ 'the', 'sum', 'flower' ],\n [ 'not', 'exists' ],\n [ 'hello', 'world' ]\n ]\n },\n {\n _id: ObjectId(\"654b26d457b6dc078e3deea6\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [ [ 'hello', 'world' ], [ 'hello', 'world' ] ]\n }\n]\nAtlas atlas-cihc7e-shard-0 [primary] test> db.testall.find( {$and: [{'wordsArray.0': ['hello', 'world']}, {'wordsArray.1': ['hello', 'world']} ] })\n[\n {\n _id: ObjectId(\"654b26d457b6dc078e3deea6\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [ [ 'hello', 'world' ], [ 'hello', 'world' ] ]\n }\n]\n", "text": "Hi @Arkensor thank you for the detailsI expect this code to work as it reads as follows: On the document go into “wordArrays” property. It is an array and the 0th item shall be accessed.My understanding is that using the dot notation references the specific element at the array index as opposed to the contents of the specified element at the array index. If we combine this with the example noted in the behaviour portion of the $all operator documentation, we can say that the query you mentioned:Is equivalent to:I expect this code to work as it reads as follows: On the document go into “wordArrays” property. It is an array and the 0th item shall be accessed.In saying the above, we can see the element in the array \"wordArray\" at index 0 for the is [ \"Hello\",\"World\",\"!\"]. Neither the string \"Hello\" or \"World\" match this so nothing is returned. The use of dot notation here does not “access” the array as you have mentioned. I.e. in this example, use of the dot not notation will not look into the contents of this sub-array to determine if it contains the strings and instead will just check if the specified element is equal to the strings \"Hello\" and \"World\".I would consider this a bug / lack of feature support. Which is why I openedFrom the above example and the documentation, I believe this works as the expected. However, i understand this may not be the behavour you expect so this may be better suited as a feedback post as opposed to SERVER ticket. Please raise a feedback request in the MongoDB Feedback Engine or upvote for an existing/similar request if present.So for the 1. one given the data below, I expect the query I am looking for to match since there was one nested array that contained both “Hello” and “World”. On the same data if I looked for “Not” and “Exists” it should not match because none of the arrays contained both these strings.If I understand correctly, you are looking for a query which would give all the documents which contains [‘hello’,‘world’] irrespective of the index position.\nIf your wordsArray size is small, you can use a query like:where in my sample data, [‘hello’,‘world’] is at the 2nd index.I request your assistance in providing a sample dataset and outlining the anticipated response if the provided data does not align with your requirements.For the 2. query I want to not find just one matching nested array, but I want to validate that ALL the nested arrays must contain both the values. Given the same data example as used above this should fail, because “Hello” and “World” are present in the 0th array, but the 1st contains neither of the words.Consider the sample data contains:If you use the query with $and, the below query would return.Could you outline the particular reason or use case for this schema design?Regards\nAasawari", "username": "Aasawari" }, { "code": "[\n {\n _id: ObjectId(\"654b212957b6dc078e3deea5\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [\n [ 'the', 'sum', 'flower' ],\n [ 'not', 'exists' ],\n [ 'justoneword' ],\n [ 'many', 'many', 'many', 'many', 'wordsinonearray' ],\n <<<TRUNCATED - 500 MORE ARRAY ITEMS HERE >>>,\n [ 'hello', 'world' ]\n ]\n }\n]\n", "text": "Hello again @Aasawari,that explanation is rather helpful. I can see now how the index accessor syntax together with $all was not what I expected. So that is fair enough.About the other query. Yes if it was only a few items it could be hard coded with indices, however, my data could have 0…N amount of wordArrays where each array can have 0…N number elements.I am not directly in control of the data. I am writing a translation layer of user-defined queries into MongoDB, hence I can not make any assumptions on the data. I just know it is an array of arrays of strings and to find matches where N user-provided strings are present. I have some ugly solution with using $map to flatten the nested arrays into booleans if they contain all search words or not, and then check if the outer array contained “true” anywhere, but I am not sure if there could maybe be a nicer option.The data I have given you is exactly my use case. There is no other data that I can give you to make this any more clear.Thank you very much", "username": "Arkensor" } ]
Nested array operation for $all does not seem to work in mongodb
2023-06-08T17:33:08.529Z
Nested array operation for $all does not seem to work in mongodb
742
null
[ "kafka-connector" ]
[ { "code": "", "text": "Hi.We are considering using heartbeat mechanism to prevent the loss of the resume token when reading changes from a collection.Our current scenario involves 6 collections being polled, each by one separate connector.So far, we haven’t found any documentation about the heartbeat topic parameters. Any advise on how to choose, topic retention, topic partition number, compaction… or any other relevant heartbeat topic configs?Also, given our scenario of several connectors, is it safe to share the heartbeat topic or should we create a separate topic per connector?Thanks", "username": "Jorge_Lopez_Castro" }, { "code": "", "text": "Would love to get some guidance on this as well. We also have the multiple connectors configuration and am not sure if we should create one topic per connector, or if it’s safe for them to share.", "username": "Kevin_Languasco" }, { "code": "", "text": "We are facing the very same issue too and can’t find any answer:It would be highly appreciated if anyone would be able to answer those questions.\nThanks a lot in advance!", "username": "Jan_de_Wilde" } ]
Heartbeat topic configuration for Kafka Source Connector
2021-09-10T11:54:21.879Z
Heartbeat topic configuration for Kafka Source Connector
3,403
null
[ "data-modeling", "database-tools", "field-encryption" ]
[ { "code": "total_item: 10,\nlistOfItems:[\n{ item: 5, name: \"Value1\"}, \n{ item: 3, name: \"Value2\"}, \n{ item: 2, name: \"Value3\"}\n]\ntotal_item: 15,\nlistOfItems:[\n{ item: 5, name: \"Value1\"}, \n{ item: 3, name: \"Value2\"}, \n{ item: 2, name: \"Value3\"},\n{ item: 5, name: \"Value4\"}\n]\n", "text": "Hii \nI have a collection orders, containing a field total_item. Total Items are calculated with sumation of all numerical values present in item. Item is present in list of object.For example:Now lets say that if a new item comes in listOfItems then the value of total_item should automatically update.\nFor example:\nif my new value is { item: 5, name: “Value4”}\nThen expected update in my collection should be:Is there any way to create this kind of flow?\nBecause its only an example, but in my collection it contains multiple fields which are based on multiple numerical calculations and its hard to track correct data inserted through every formula.This is just like formula field in SalesForce.Hope this information is helpful to solve my query ", "username": "Mehul_Sanghvi" }, { "code": "_id : 1 ,\ntotal_item: 10,\nlistOfItems:[\n{ item: 5, name: \"Value1\"}, \n{ item: 3, name: \"Value2\"}, \n{ item: 2, name: \"Value3\"}\n]\nnew_value = { \"item\" : 5 , \"name\" : \"value4\" } ;\n\nquery = { \"_id\" : 1 } ;\n\nupdate = {\n \"$inc\" : { \"total_item\" : new_value.item } ,\n \"$push\" : { \"listOfItems\" : new_value }\n} ;\n\ndb.orders.updateOne( query , update ) ;\n", "text": "Try the following untested code.Assuming document:As reference:", "username": "steevej" } ]
How to create a field that contains values formulated with values of other fields?
2023-11-07T13:15:52.080Z
How to create a field that contains values formulated with values of other fields?
113
https://www.mongodb.com/…7_2_1024x475.png
[ "queries" ]
[ { "code": "", "text": "We noticed a significant performance drop after upgrading our servers from version 6 to 7.The same query that selects 2 index entries and took 2ms on version 6 would suddenly\ntake 50 ms on version 7.\nThe number of scanned/returned documents did not change.The size of the dataset is as follows:DOCUMENTS 410.1k\nTOTAL SIZE 306.4MB\nAVG. SIZE 784B\nINDEXES 9\nTOTAL SIZE 183.0MB\nAVG. SIZE 20.3MBExample document\ndocumentStructure.json (1.4 KB)Execution plan version 6\nmongoExecutionPlan_6_0_10.json (77.7 KB)\nExecution plan version 7\nmongoExecutionPlan_7_0_1.json (114.4 KB)Indizes\nScreenshot 2023-09-28 1148321616×751 68.3 KB", "username": "Sijing_You" }, { "code": "allPlansExecution", "text": "Hi @Sijing_You,Thanks for providing those details. I assume these tests / explain outputs were run on the same server that was upgraded but please correct me if I’m wrong here.I’m going to do some tests on my own version 6.0 and 7.0 environments to see if theres similar behaviour.It’s possible it may have something to do with the slot based query engine but hard to confirm at this stage.I did notice a larger amount of document scans within the allPlansExecution of the version 7 explain output which seems to be adding up to most of the difference between execution times you are seeing but what is the cause of that is yet unknown.I will see if I can spot anything.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "I assume these tests / explain outputs were run on the same server that was upgraded but please correct me if I’m wrong here.Yes, this is correct. These outputs were run on a smaller test instance, but we were getting the same behavior on a bigger cluster.\nDowngrading to 6.0 also restores the query run time. (We kept the setFeatureCompatibilityVersion on 6)", "username": "Sijing_You" }, { "code": "", "text": "Hi guys, I had the same problem. What most reflected in my metrics were spikes in scanned documents, which directly impacted the application. Some data simply did not load and operations were interrupted by the client due to a timeout.I just downgraded to 6.0.11 and the problem was completely resolved!Posting information in this thread so you can follow up and find out if anyone else has had the same type of problem.Best!", "username": "Leandro_Domingues" } ]
Performance Drop After Upgrade 6.0.10 > 7.0.1
2023-09-28T09:44:55.072Z
Performance Drop After Upgrade 6.0.10 &gt; 7.0.1
348
https://www.mongodb.com/…a_2_1024x544.png
[ "java" ]
[ { "code": "", "text": "image2260×1202 428 KB", "username": "Adan_Zeng" }, { "code": "", "text": "Hi Adan,Thank you for bringing this issue to our attention.The MongoDB University team is investigating and will update you as a soon as possible.", "username": "Davenson_Lombard" } ]
Cannot check in progress when learning "Getting Started with MongoDB Atlas" in MongoDB Java Developer Path
2023-11-05T08:04:41.256Z
Cannot check in progress when learning &ldquo;Getting Started with MongoDB Atlas&rdquo; in MongoDB Java Developer Path
151
https://www.mongodb.com/…c_2_1024x544.png
[ "node-js" ]
[ { "code": "", "text": "Hi everyone. II’d to to ask why my node.js can’t connect to mongodb. it shows “Internal server error: MongoDB collection not available”. Did I make mistakes on codes or something, Can anyone reach out for this problem. Thank you.Screenshot 2023-11-08 at 18.32.561520×808 92.1 KB", "username": "dydyyy_N_A" }, { "code": "", "text": "Screenshot 2023-11-08 at 18.33.482020×1386 99.4 KB", "username": "dydyyy_N_A" }, { "code": "", "text": "Screenshot 2023-11-08 at 18.35.201684×1004 99.7 KB", "username": "dydyyy_N_A" }, { "code": "db.collectiondbnamedb.collectiondbname.collectiondb", "text": "Hi @dydyyy_N_A,Welcome to the MongoDB Community forums!I’d to to ask why my node.js can’t connect to mongodb. it shows “Internal server error: MongoDB collection not available”. Did I make mistakes on codes or something, Can anyone reach out for this problem.Based on the shared screenshot, it seems you are calling db.collection , but in fact, you have passed the database name in the variable dbname . That’s the reason the error is occurring for you.Instead of db.collection , you should be calling dbname.collection\nor passing the database name to the variable db .I hope it helps!Best regards,\nKushagra", "username": "Kushagra_Kesav" } ]
Node js connect to mongodb failed
2023-11-08T10:44:21.358Z
Node js connect to mongodb failed
99
null
[ "schema-validation" ]
[ { "code": "total_orders = len('orders_list')\ntotal_amount = summation of all 'amount' present in each item of 'orders_list'\ntotal_tax = summation of all 'tax_amount' present in each item of 'orders_list'\n", "text": "HiiCurrently I have created a calculated field in my mongodb collection with following calculation:Now I need to keep a validator that will validate above calculated field. Is there any way to create a validation that can keep track on this calculations?", "username": "Mehul_Sanghvi" }, { "code": "$exprtotal_orderstotal_amountdb.createCollection('<your-collection>', {\n validator: {\n $expr: {\n $and: [\n { $eq: ['$total_orders', { $size: '$orders_list' }] },\n {\n $eq: [\n '$total_amount',\n {\n $reduce: {\n input: '$total_orders',\n initialValue: 0,\n in: { $sum: ['$$value', '$$this.amount'] },\n },\n },\n ],\n },\n ],\n },\n },\n});\n", "text": "Hi @Mehul_Sanghvi!It’s possible to create a schema validation for the collection that will check if the fields for totals have the expected values. You can implement it using the $expr on the schema validation logic. Here you can find out more about this topic.I believe the following validation logic will work for total_orders and total_amount:", "username": "Artur_57972" } ]
How to put validation on Calculated Field?
2023-11-07T10:26:25.907Z
How to put validation on Calculated Field?
107
null
[]
[ { "code": "", "text": "I am using mongodb atlas where we have prod cluster and Dev cluster.\ni want to copy some specific collection data from prod to dev cluster for some data validation work.Can anyone help on this. Which feature do i need to follow.", "username": "Rojalin_Das1" }, { "code": "mongosync", "text": "Hey @Rojalin_Das1,Welcome to the MongoDB Community forums!I am using MongoDB atlas where we have a prod cluster and a Dev cluster. i want to copy some specific collection data from prod to dev clusterIf this process is not continuous, consider using the dump/restore method. Alternatively, if you’re using MongoDB clusters with version 6.0 or higher, you can also use mongosync. You can find more information about mongosync documentation.for some data validation work.May I ask what specific type of data validation work you are looking to perform?Best regards,\nKushagra", "username": "Kushagra_Kesav" } ]
How to move specific Collection's data from prod atlas cluster to dev atlas cluster
2023-11-08T05:48:16.857Z
How to move specific Collection&rsquo;s data from prod atlas cluster to dev atlas cluster
91
null
[ "node-js", "mongoose-odm" ]
[ { "code": "db.runCommand ( { collMod: \"myCollection\", changeStreamPreAndPostImages: { enabled: true } } );\n", "text": "I want to apply the following MongoDB command to my collectionIs it possible to configure it in Mongo Altas, if not, how I can do it using Mongoose?", "username": "WONG_TUNG_TUNG" }, { "code": "Atlas atlas-cihc7e-shard-0 [primary] test> db.runCommand({ collMod: \"server\", changeStreamPreAndPostImages: { enabled: true } })\n{\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1698305510, i: 20 }),\n signature: {\n hash: Binary.createFromBase64(\"KfJBkq3wufdRsjRbPtLilFExH4g=\", 0),\n keyId: Long(\"7262404739617259522\")\n }\n },\n operationTime: Timestamp({ t: 1698305510, i: 20 })\n}\nconst addressSchema = new mongoose.Schema({\n city: String,\n street: String,\n},\n {\n changeStreamPreAndPostImages: { enabled: true }\n}\n);\n", "text": "Hi @WONG_TUNG_TUNG and welcome to MongoDB community forums!!You can use the command to changeStreamPreAndPostImages both using Atlas and Mongoose.You can try the below command on the database selected.Please feel free to reach out in case of further queries.Warm Regards\nAasawari", "username": "Aasawari" }, { "code": "{changeStreamPreAndPostImages: { enabled: true }}\nfullDocumentBeforeChange: \"whenAvailable\",\nfullDocumentBeforeChange: null\n {\n_id: {\n _data: '82653A3F35000000012B022C0100296E5A1004613AD22AB6A74AAA9A5B8FBE59C0F49846645F69640064653A3E14941006D98CD7AA380004'\n},\noperationType: 'delete',\nclusterTime: new Timestamp({ t: 1698316085, i: 1 }),\nwallTime: 2023-10-26T10:28:05.397Z,\nns: { db: 'football', coll: 'maps' },\ndocumentKey: { _id: new ObjectId(\"653a3e14941006d98cd7aa38\") },\nfullDocumentBeforeChange: null\n}\n", "text": "Hi, thank you for the reply. After I addto my schema and setin my change stream. I still getin my delete operation like:May I ask why?", "username": "WONG_TUNG_TUNG" }, { "code": "config.system.preimagesexpireAfterSecondsconfig.system.preimagesexpireAfterSeconds{\n _id: {\n _data: '82653B9548000000202B022C0100296E5A1004B4153884939F4B80B8AED56EB36FE12A461E5F696400290004'\n },\n operationType: 'delete',\n clusterTime: Timestamp({ t: 1698403656, i: 32 }),\n wallTime: ISODate(\"2023-10-27T10:47:36.200Z\"),\n ns: {\n db: 'test',\n coll: 'temperatureSensor'\n },\n documentKey: {\n _id: 0\n },\n fullDocumentBeforeChange: {\n _id: 0,\n reading: 26.1\n }\n}\ndb.temperatureSensor.deleteOne( { \"_id\": 0})", "text": "Hi @WONG_TUNG_TUNG\nThank you for writing backAs mentioned in the Change Stream documentation, you have the ability to manage the size of the config.system.preimages collection and set an expireAfterSeconds time for the pre-images.\nThis is a crucial strategy to prevent the config.system.preimages collection from becoming excessively large. When you set an expiration time for pre-images, MongoDB will automatically remove them in the background process, helping to keep your database’s size under control.\nCan you verify if you are applying any timer values for the expireAfterSeconds field?I tried the steps mentioned in documentation for change stream and I am able to see the fullDocumentBeforeChange in the change stream output.using the simple delete command as:\ndb.temperatureSensor.deleteOne( { \"_id\": 0})Can you help with some reproducible code snippet which I can use and understand further.Regards\nAasawari", "username": "Aasawari" }, { "code": "import mongoose from \"mongoose\";\nimport pkg from \"mongoose\";\n\nconst { Schema, model, models } = pkg;\n\nmongoose.set(\"strictQuery\", true);\n\nawait mongoose.connect(\n \"mongodb+srv://tung:12345@blackbox.fgbtzus.mongodb.net/?retryWrites=true&w=majority\",\n {\n dbName: \"test\",\n }\n);\n\nconst testingSchema = new Schema(\n {\n apple: {\n type: String,\n },\n },\n {\n changeStreamPreAndPostImages: { enabled: true },\n }\n);\n\nconst db = models.testing || model(\"testing\", testingSchema);\n\ndb.watch([], {\n fullDocument: \"updateLookup\",\n fullDocumentBeforeChange: \"whenAvailable\",\n}).on(\"change\", (data) => {\n console.log(data);\n});\nfullDocumentBeforeChange{\n _id: {\n _data: '82653F568E000000012B022C0100296E5A1004D2EEBD4CC0094B1788300C910A41B32946645F69640064653F568428856F6B2D5B44670004'\n },\n operationType: 'delete',\n clusterTime: new Timestamp({ t: 1698649742, i: 1 }),\n wallTime: 2023-10-30T07:09:02.825Z,\n ns: { db: 'test', coll: 'testings' },\n documentKey: { _id: new ObjectId(\"653f568428856f6b2d5b4467\") },\n fullDocumentBeforeChange: null\n}\n", "text": "Hi Aasawari,I wrote a full demo code:By doing this, I receive null in fullDocumentBeforeChange:May I ask why? Thank you.", "username": "WONG_TUNG_TUNG" }, { "code": "{\n changeStreamPreAndPostImages: { enabled: true }\n}\nAtlas atlas-cihc7e-shard-0 [primary] test> db.runCommand({ collMod: \"blogs\", changeStreamPreAndPostImages: { enabled: true } })\n{\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1698749692, i: 5 }),\n signature: {\n hash: Binary.createFromBase64(\"Xe7WL9XzdjMJhlT1jFKGD6YeNPs=\", 0),\n keyId: Long(\"7262404739617259522\")\n }\n },\n operationTime: Timestamp({ t: 1698749692, i: 3 })\n}\nconst blogChangeStream = Blog.watch({fullDocumentBeforeChange: 'whenAvailable'});\n\nblogChangeStream.on('change', async (change) => {\n console.log('Change detected:', change);\n // Handle the change as needed\n});\n", "text": "Hi @WONG_TUNG_TUNG\nThank you for your patience.It seemsis not taking the effect and because of which fullDocumentBeforeChange is displayed as null in your case.In order to get the deleted document in fullDocumentBeforeChange, you can run the command:Please ensure you run this command in the MongoDB shell within your Atlas environment, and then perform the delete operation.\nFor your reference, here’s is the code snippet, which has been tested on Atlas with version 6.0.11 and Mongoose version 7.6.4:.4where Blogs is the name of the collection in my test example.Let us know if the above solution works for you.Regards\nAasawari", "username": "Aasawari" }, { "code": "db.runCommand({ collMod: \"blogs\", changeStreamPreAndPostImages: { enabled: true } })MongoServerError: not authorized on test to execute command { collMod: \"maps\", \nchangeStreamPreAndPostImages: { enabled: true }, apiVersion: \"1\", \nlsid: { id: UUID(\"228de218-d5d6-4c71-b883-227c306429c6\") }, \n$clusterTime: { clusterTime: Timestamp(1698755045, 2), \nsignature: { hash: BinData(0, 91F644F7D966DC66A3A62FD40102DC734127D568), \nkeyId: 7265277218039791620 } }, $db: \"foodball\" }\n", "text": "db.runCommand({ collMod: \"blogs\", changeStreamPreAndPostImages: { enabled: true } })Hi Aasawari,Thank you for your reply, after running the command, I got:May I ask why and how can I solve it? Thank you so much.", "username": "WONG_TUNG_TUNG" }, { "code": "changeStreamPreAndPostImages", "text": "Hi @WONG_TUNG_TUNGI see that you have not received response from a long time. I hope that you have been able to resolve the issue.\nHowever, if you are still facing the issue, the error message occurs if the right permissions are not set in the cluster.\nBefore using the command to set changeStreamPreAndPostImages to enable, please ensure that you have granted the admin access on the cluster and then execute the command from the same database where you wish to enable the flag.Let us know if the issue still persists.Regards\nAasawari", "username": "Aasawari" }, { "code": "atlas customDbRoles listSyntaxError: Missing semicolon. (1:5)\n\n> 1 | atlas customDbRoles createt\n | ^\n 2 |\n", "text": "Hi Aasawari,Thank you for your kind reply. I am still facing the issue, after watching the document of admin access, I am still confused on how to set granted the admin access on the cluster, can you tell me which code should I follow?\nI tried atlas customDbRoles list, but got:What command should I type in order to grant the access? Thank you.", "username": "WONG_TUNG_TUNG" }, { "code": "", "text": "Hi @WONG_TUNG_TUNGHave you tried setting up using the Atlas UI?attaching screenshot for reference:Screenshot 1945-08-17 at 12.31.49 PM1462×932 96.2 KBRegards\nAasawari", "username": "Aasawari" } ]
How to set the db.createCollection() parameter in Mongodb Altas or Mongoose
2023-10-26T04:32:45.035Z
How to set the db.createCollection() parameter in Mongodb Altas or Mongoose
387
null
[ "node-js", "mongoose-odm", "atlas-cluster" ]
[ { "code": "/usr/src/app/node_modules/mongodb/lib/cmap/connection_pool.js:262\n connection.onError(new errors_1.PoolClearedOnNetworkError(this));\n ^\n \n PoolClearedOnNetworkError: Connection to ac-727rc86-shard-00-01.fgbtzus.mongodb.net:27017 interrupted due to server monitor timeout\n at ConnectionPool.interruptInUseConnections (/usr/src/app/node_modules/mongodb/lib/cmap/connection_pool.js:262:36)\n at /usr/src/app/node_modules/mongodb/lib/cmap/connection_pool.js:249:41\n at process.processTicksAndRejections (node:internal/process/task_queues:77:11) {\n address: 'ac-727rc86-shard-00-01.fgbtzus.mongodb.net:27017',\n [Symbol(errorLabels)]: Set(1) { 'RetryableWriteError' }\n }\n", "text": "Hi, I am just doing normal CRUD operation using mongoose in my nodejs application, and suddenly this error pop out.May I ask what may lead to this error, and how I can solve it or retry the connection using mongoose?", "username": "WONG_TUNG_TUNG" }, { "code": "PoolClearedOnNetworkError: Connection to ac-727rc86-shard-00-01.fgbtzus.mongodb.net:27017 interrupted due to server monitor timeout\n at ConnectionPool.interruptInUseConnections (/usr/src/app/node_modules/mongodb/lib/cmap/connection_pool.js:262:36)\nconnectTimeoutMS", "text": "Hey @WONG_TUNG_TUNG,The error generally indicates intermittent network instability. The driver cancels the ongoing operations to a node when a network timeout occurs (i.e., the heartbeat takes longer than connectTimeoutMS), assuming that a timeout indicates server unreachability. To address this I’d recommend checking out this post and monitoring the network changes.Also to read more about it please refer to Connection Monitoring - MongoDB Specification. In case of any further questions, feel free to reach out!Best regards,\nKushagra", "username": "Kushagra_Kesav" } ]
PoolClearedOnNetworkError: ...mongodb.net:27017 interrupted due to server monitor timeout
2023-11-05T17:01:44.758Z
PoolClearedOnNetworkError: &hellip;mongodb.net:27017 interrupted due to server monitor timeout
158
null
[]
[ { "code": "", "text": "Hi guys, could you help me?I connected some collections of mongodb atlas to power bi desktop, using the connector that was released at this month.However, when I try to schedule updates in Power BI Service, it’s returning this error:Missing client library for datasource Visit MongoDB Atlas SQL Interface | MongoDB for more information.I don’t know exactly what to do.", "username": "Flavia_Santos_de_Almeida" }, { "code": "", "text": "I’m running into the same error.Prior to the release of the named connector, I set up scheduled refresh using a personal data gateway and a System DSN configured for my MongoDB database. I was hoping the named connector might remove the gateway as a dependency…", "username": "Joel_Zehring" }, { "code": "", "text": "One other observation… When I try and configure the connection in the Power BI service through the Settings > Manage connections and gateways link, the Save button remains disabled, even after I enter valid values in to the required fields.\n\nScreenshot 2023-06-29 112749560×1068 31.7 KB\n", "username": "Joel_Zehring" }, { "code": "", "text": "Hi Joel!If you find any solution, could you please communicate me? I’m using google sheets because of this error of power bi service.", "username": "Flavia_Santos_de_Almeida" }, { "code": "", "text": "Hi Flavia!I am running into the same error. Would you mind sharing how your google sheets work around works?Thank you in advance.", "username": "Carlos_DI" }, { "code": "", "text": "Hi Carlos I usually download the report of mongodb compass and import the data into google sheets, to use in power bi.It has been necessary because I still don’t know how to solve this issue of power bi service", "username": "Flavia_Santos_de_Almeida" }, { "code": "", "text": "I see - Thanks for sharing, Flavia!", "username": "Carlos_DI" }, { "code": "", "text": "Hi everyone!Any solution…?", "username": "Flavia_Santos_de_Almeida" }, { "code": "", "text": "Hi, I’m also trying to query Mongo (just a test db at the moment) with Power BI and also getting nowhere, same message “Missing client library for datasource”.PowerBI (desktop at the moment) asks for a MongoDB URL, and a “database” which as an SQL guy I assume is a tablename. I use the Atlas DB connector string, name the table, and get that result.Has anyone tried this with Tableau?", "username": "Peter_Harrison" }, { "code": "", "text": "And what’s the best way of downloading and importing to google sheets? Mongo newbie here, trying to get to grips with a world where pretty much everything is in the cloud!", "username": "Peter_Harrison" }, { "code": "", "text": "Hi! I haven’t tried with Tableau…And answering the second question, I download the reports of mongodb using mongodb compass.In mongodb compass I can make some queries and then download this queries in a report format, just clicking in a button Anyway, I haven’t found a solution for the main problem, did anyone have found?", "username": "Flavia_Santos_de_Almeida" }, { "code": "", "text": "Hi,\nI am in the same situation and not able to setup refresh using the latest power bi connector. any solutions so far?", "username": "Shuja" }, { "code": "", "text": "Hi everyone, same problem here.I’m able to connect Power BI desktop to Atlas, but not Power BI Service. Same error message. That’s a shame because I cannot use dataflows and datamarts functionalities.I have two questions, with the intention to find a workaround for all of us together:Creating the report in Power BI desktop and publishing it to Power BI Service makes the dataset available in Power BI Service. However, this dataset can be used only for reports. It cannot be consumed from a dataflow or a datamart. So, it’s not a solution. Am I right?A second approach could be using a third tool as a bridge. Some SQL database that works as a charm when connected to Atlas on one side and Power BI on the other. There should be plenty of solutions. Any recommendation? (preferably free and straightforward)Some folks above talked about using Compass + Google Sheets this way, but it seems a little manual to me (opening a program, exporting a file, pasting the content to google sheets or whatever it takes…)I hope we all find a solution.\nThank you!", "username": "SergioMagnettu" }, { "code": "", "text": "I’ve been engaged with Microsoft support around this. We’ve had several calls to reproduce the issue, troubleshoot, and collect logging. I’ll update here if they determine a solution.", "username": "Joel_Zehring" }, { "code": "", "text": "Hi @Joel_ZehringDo you have any updates from the Microsoft Team?", "username": "Akhil_Vadiyala" }, { "code": "", "text": "Hi Flavia, how are you? I’m Gabriel Nogueira, I’m starting work here at the company with Power BI and we use Mongo DB as a database. I’m having several problems (for being a beginner).We could make a call, if you can, to understand how you’ve been importing data into Power BI.Best", "username": "Gabriel_Nogueira" }, { "code": "", "text": "Hi All - I’m the product manager for the Atlas SQL Interface and Power BI Connector.First up, the error “Missing client library for datasource” most probably means you may not have downloaded and installed the Atlas SQL ODBC driver. Here is the download for that driver: Download Atlas SQL ODBC Driver | MongoDBNext, when the connection dialog asks for “database” it is referring to a database name, not a table. This is just the database to authenticate against and you will be able to build upon other databases within that cluster/Federated db, so long as you have permissions to those.\n\nScreenshot 2023-09-08 at 8.28.36 AM1280×702 120 KB\nAnd if you want to publish and refresh this data, a Power BI Gateway is required. So if this gateway is on a server, the connector and driver must also reside on that server. You can download the connector and driver from our download center (as mentioned above).And finally, Data flows does work with this connector+driver+on-premise gateway, here are the requirements and instructions:Requirements:Instructions:let\nSource = MongoDBAtlasODBC.Query(“mongodb://atlassqlsandbox-rotpc.a.query.mongodb.net/Supplies?ssl=true&authSource=admin”, “Supplies”, “select * from Sales”, null)\nin\nSourceI will monitor this thread, please let me know if you get stuck.\nBest,\nAlexi", "username": "Alexi_Antonino" }, { "code": "", "text": "This solution works!\nThanks.", "username": "Alejandro_Cortes" }, { "code": "", "text": "Hi Alejandro,I’d appreciate if you could do provide a step by step way you did it.I’ve been trying to refresh the power bi service for mongoatlasSQL but to no avail.Thanks in anticipation.", "username": "Tunde_Morakinyo" }, { "code": "", "text": "The steps described by @Alexi_Antonino are working fine for me also. Thank you!!@Tunde_Morakinyo, the steps described are precise and should work also for you. Let me add some details to try to help.Now you’re ready to fetch and use your MongoDB data from Power BI desktop.Pay attention to your M/SQL statement. You must replace the fields inside the statement. The url is the Power BI Connector Connection String that you will find in Atlas (see screenshot provided by Alexi_Antonino). You already used that in step 2. “Supplies” is the Database Name and “Sales” is the specific collection you want to fetch. You probably need to replace the quote markers by \" instead of “ and ”.Hope it helps.", "username": "SergioMagnettu" } ]
MongoDB Atlas Connector <> Power BI Service
2023-06-26T12:53:51.541Z
MongoDB Atlas Connector &lt;&gt; Power BI Service
3,608
null
[ "app-services-data-access" ]
[ { "code": "{\n \"collaborator_id\": { \n \"$in\": \"%%user.custom_data.collaborators\"\n }\n}\nending session with error: error bootstrapping new query: error querying state store: error while querying the state collection \"state_Item\": (BadValue) $in needs an array (ProtocolErrorCode=201)\n{\n \"title\": \"Item\",\n \"type\": \"object\",\n \"required\": [\n \"_id\",\n \"isComplete\",\n \"owner_id\",\n \"partnership_id\",\n \"summary\"\n ],\n \"properties\": {\n \"_id\": {\n \"bsonType\": \"uuid\"\n },\n \"isComplete\": {\n \"bsonType\": \"bool\"\n },\n \"owner_id\": {\n \"bsonType\": \"string\"\n },\n \"partnership_id\": {\n \"bsonType\": \"uuid\"\n },\n \"summary\": {\n \"bsonType\": \"string\"\n }\n }\n}\n{\n \"title\": \"User\",\n \"properties\": {\n \"_id\": {\n \"bsonType\": \"objectId\"\n },\n \"partnerships\": {\n \"bsonType\": \"array\",\n \"items\": {\n \"bsonType\": \"uuid\"\n }\n }\n }\n}\n{\n \"_id\":{\n \"$binary\":{\n \"base64\":\"TJiWdUgvTWOrNhZOCWhj6g==\",\n \"subType\":\"04\"\n }\n },\n \"isComplete\":false,\n \"owner_id\":\"652c671990aad0d369d94434\",\n \"collaborator_id\":{\n \"$binary\":{\n \"base64\":\"kYpeWOOlQ0iox5macnFfCw==\",\n \"subType\":\"04\"\n }\n },\n \"summary\":\"Ffff\"\n}\n{\n \"_id\":{\n \"$oid\":\"652ddf6afda42fb20153d562\"\n },\n \"partnerships\":[\n {\n \"$binary\":{\n \"base64\":\"kYpeWOOlQ0iox5macnFfCw==\",\n \"subType\":\"04\"\n }\n }\n ]\n}\n", "text": "I have two fields I am trying to use to build a document filter (in App Services > Rules) for the Item collection.One is a UUID (the _id of the Item document). The other is an array of UUID stored on the users profile.This is the filter in its current form:It saves fine, but when I try to run the app and sync, it gives me an error “$in needs an array”:SCHEMAS:\nItem:User:EXAMPLES:\nHere is a raw example of Item:Here is a raw example of User (what is used for the user custom data):", "username": "Paul_Newell" }, { "code": "", "text": "Hi @Paul_Newell and welcome to MongoDB community forums!!Apologies for writing back so late. I hope you have figured the issue but if not, please find the details below.As mentioned in the documentation Filters in App Services, the filters are applied to all requests except for the device sync requests.\nThe recommendation would be to use Document Filters which is required for Device Sync.Let us know if the above filters works for you.\nIf not, could you help me with complete filter code you are using in your application.Regards\nAasawari", "username": "Aasawari" } ]
Document filters with an array of UUID
2023-10-17T03:14:26.565Z
Document filters with an array of UUID
244
null
[ "queries", "node-js" ]
[ { "code": "", "text": "We are facing mongo force connection as the system is idle for more time\nwe are using node js version 16 and Mongo version 7.3.1", "username": "Harsha_Vardhan_Reddy_N" }, { "code": "", "text": "Hey @Harsha_Vardhan_Reddy_N,Welcome to the MongoDB community!We are facing mongo force connection as the system is idle for more timeCould you provide some more details about the issues you are seeing, such as:These additional details will help the community better understand the issue and assist you better. Looking forward to your response.Regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "try {\n let readDtaFromMongo = await getDataFromMongo(); \n} catch (error) {\n // Triggred this catch block\n console.log(error)\n}\ngetDataFromMongo(){\n const myCollection = mongoose.connection.collection('collectionName');\n let res = await myCollection.aggregate([\n {\n $lookup: {\n from: 'collection2',\n localField: 'feild1',\n foreignField: 'feild1',\n as: \"resultSet\"\n }\n },\n {\n $unwind: \"$resultSet\"\n }\n ]).toArray();\n}\n // Connection creation\n mongoose.connect(mongoUrl, {\n useNewUrlParser: true,\n useUnifiedTopology: true,\n }).then(() => {\n logger.info('MongoDB connected successfully');\n resolve(mongoose.connection);\n }).catch((err) => {\n \n });\n // Saving Data\n await mongoose.connection.collection('collectionName').insertMany(data);\n // Retrieving data declared this function in the above code snippet\n getDataFromMongo();\n", "text": "1.What specific error or behavior are you seeing that indicates the connections are timing out?Ans) It does not trigger any mongoose.connection.on(‘error’) or mongoose.connection.on(‘disconnected’) we are getting an error in then Block.2.How long your system is being kept idle, could you quantify?\nAns) We are not sure when the error is getting triggered.3.How are you creating and managing the MongoDB connections in your Node.js application?Are the connection timeouts intermittent or happening consistently?\nAns) Yes, continuously we are facing the mongo force connection issue.Does increasing the connection pool size or using maxIdleTimeMS seem to help at all?\nAns) Not sure how it can help with this problemWhat version of the MongoDB node.js driver are you using\nAns) Currently, we are using the mongo atlas", "username": "Harsha_Vardhan_Reddy_N" }, { "code": "", "text": "Any update here?\nWe are waiting for response", "username": "Harsha_Vardhan_Reddy_N" }, { "code": "", "text": "Hey @Harsha_Vardhan_Reddy_N,Thank you for providing the details. Could you please share the error logs you are encountering? Additionally, could you provide more information about how long your system remains in an idle state? Also, kindly share the MongoDB Atlas tier you are currently using.Regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "Hi Kushagra_Kesav,\nThanks for the respondingPlease find the screenshot below for error logs\nMicrosoftTeams-image3360×638 332 KBReading mongo\nwe are using M0 Sandbox (Shared RAM, 512 MB Storage)\nEven though we are facing an error on local Mongo we have faced this issue on db version v6.0.8", "username": "Harsha_Vardhan_Reddy_N" }, { "code": "", "text": "Hi Kushagra_Kesav,\nHope you’re doing well.\nCan get any update for this problem", "username": "Harsha_Vardhan_Reddy_N" }, { "code": "", "text": "Hi Team,\nI hope you’re doing well.\nWe are waiting for the response. Thanks in advance.", "username": "Harsha_Vardhan_Reddy_N" }, { "code": "", "text": "Hi Team,\nI hope you’re doing well.\nWe are waiting for the response. Thanks in advance.", "username": "Harsha_Vardhan_Reddy_N" }, { "code": "connection closedinsertManykeepAliveconnectlehman-caves-core-serverdeviceStatus()deviceMonitoring.js", "text": "Hey @Harsha_Vardhan_Reddy_N,Apologies for the delayed response!Error>>> MongooseError: Connection was force closedI suspect the problem occurs when pooled connections return a connection closed error due to long-running applications, possibly triggered by the “insertMany” operation in the application code. As per the mongoose documentation, one workaround is to add keepAlive to the options object passed into the “connect” function, which may help prevent this error.at exports.deviceStatus (/home/ubuntu/apps/gh-ops-pm2-v9.0/source/node_modules/lehman-caves-core-server/controllers/deviceMonitoring.js:1527:96)Also, could you help me understand the nature of the ‘lehman-caves-core-server’ node_module? It appears to be invoking the ‘deviceStatus()’ function from the ‘deviceMonitoring.js’ file. To me, this seems to be associated with the application code for monitoring device status.Please provide more insights into your codebase so the community can assist you better.Best regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "Hey @Kushagra_Kesav\nThanks for responding.deviceMonitoring.js This is my control where the functions are declared and lehman-caves-core-server\nthis is my custom module", "username": "Harsha_Vardhan_Reddy_N" }, { "code": "keepAlive\" connection closed\"keepAlivetruekeepAlivekeepAlivekeepAliveInitialDelay", "text": "For keepAlive, we are using the 7.3.1 Mongoose moduleAs per the documentationBefore Mongoose 5.2.0, you needed to enable the keepAlive option to initiate TCP keepalive to prevent \" connection closed\" errors. However, keepAlive has been true by default since Mongoose 5.2.0, and the keepAlive is deprecated as of Mongoose 7.2.0. Please remove the keepAlive and keepAliveInitialDelay options from your Mongoose connections.It is enabled by default", "username": "Harsha_Vardhan_Reddy_N" } ]
Connection force closed
2023-10-16T05:46:16.268Z
Connection force closed
537
https://www.mongodb.com/…_2_1024x927.jpeg
[ "queries" ]
[ { "code": "", "text": "Just generated a new code for the GitHub education $50 offer but it’s not letting me redeem it.\nI keep getting this error: “ Sorry, it looks like the code you entered has run out of available uses. Please enter a different code.”\nIMG_46581094×991 98.9 KB", "username": "zw_J" }, { "code": "", "text": "Hi there and welcome to the forums!This message appears when the Atlas promo code has already been applied to an Atlas instance.Each student can only generate one Atlas code and it looks like you generated one on April 16th of this year. After that, you likely applied it to your Atlas account.Hope this clarifies things!", "username": "Aiyana_McConnell" }, { "code": "", "text": "you likely applied it to your Atlas account.Thanks for replied, as you said ‘I likely applied it to my Atlas account.’ However, it seems that I didn’t successfully apply the discount code, as I don’t see the corresponding balance in my account (or there may be another way to check). I’m not sure what to do next?", "username": "zw_J" } ]
Can’t redeem GitHub educational code
2023-11-04T09:51:16.386Z
Can’t redeem GitHub educational code
156
null
[]
[ { "code": "curl --user \"{username}:{password}\" --digest --header \"Accept: application/vnd.atlas.2023-02-01+json\" https://cloud.mongodb.com/api/atlas/v2/groups/{groupID}/customDBRoles/roles\n[{\"actions\":[{\"action\":\"COLL_MOD\",\"resources\":[{\"collection\":\"\",\"db\":\"example\"}]}],\"inheritedRoles\":[],\"roleName\":\"callmodRole\"},{\"actions\":[{\"action\":\"COLL_MOD\",\"resources\":[{\"collection\":\"\",\"db\":\"example-logs\"}]}],\"inheritedRoles\":[],\"roleName\":\"customcallmodRole\"}]\ncurl -v -X DELETE https://cloud.mongodb.com/api/atlas/v2/groups/{groupID}/customDBRoles/roles/customremoveRole --user \"{username}:{password}\" --digest --header \"Accept: application/vnd.atlas.2023-02-01+json\"\n>curl -v -X POST https://cloud.mongodb.com/api/atlas/v2/groups/{groupID/customDBRoles/roles --user \"{username}:{password}\" --digest --header \"Accept: application/vnd.atlas.2023-02-01+json\" -d {\"actions\":[{\"action\":\"REMOVE\",\"resources\":[{\"collection\":\"\",\"db\":\"ambience\"}]}],\"inheritedRoles\":[],\"roleName\":\"customremoveRole\"}\n \"error\" : 415,\n \"reason\" : \"Unsupported Media Type\"\n{\"detail\":\"Invalid accept header or version date.\",\"error\":406,\"errorCode\":\"INVALID_VERSION_DATE\",\"parameters\":[],\"reason\":\"Not Acceptable\"}* Connection #0 to host cloud.mongodb.com left intact\ncurl -v -X POST https://cloud.mongodb.com/api/atlas/v2/groups/{groupID}/customDBRoles/roles --user \"{username}:{password}\" --digest --header \"Accept: application/vnd.atlas.2023-02-01+json\" --header \"Content-Type: application/vnd.atlas.2023-02-01+json\" --data '{\"actions\":[{\"action\":\"REMOVE\",\"resources\":[{\"collection\":\"\",\"db\":\"ambience\"}]}],\"inheritedRoles\":[],\"roleName\":\"customremoveRole\"}'\n{\"detail\":\"Received JSON is malformed.\",\"error\":400,\"errorCode\":\"MALFORMED_JSON\",\"parameters\":[],\"reason\":\"Bad Request\"}* Connection #0 to host cloud.mongodb.com left intact\n", "text": "Dear MongoDB Atlas Team,I am having issues creating customRoles in Mongodb Atlas using API and curl.Resource - MongoDB Atlas Administration APII can display customRoles created in MongoDB Atlas UI like this.I am also able to deleteThe issue is I am unable to create customRoles in MongoDB Atlas with this curl command.I got this errorIf I change the --header to “Content-Type: application/vnd.atlas.2023-02-01+json” or “Content-Type: application/json”I’ll receivePlease do share where did I go wrong?Updated.I recently tried this but comes back with a different errorThe error isPlease advise what went wrong with the payload?", "username": "David_Aw" }, { "code": "publickeypriatevkeycurl --user \"{publickey}:{priatevkey}\" --digest \\\n --header \"Content-Type: application/json\" \\\n --header \"Accept: application/vnd.atlas.2023-02-01+json\" \\\n --include \\\n --request POST \"https://cloud.mongodb.com/api/atlas/v2/groups/{GROUPID}/customDBRoles/roles\" \\\n --data '\n {\n \"actions\":[\n {\n \"action\":\"REMOVE\",\n \"resources\":[\n {\n \"collection\":\"\",\n \"db\":\"example\"\n }\n ]\n }\n ],\n \"inheritedRoles\":[],\n \"roleName\":\"customremoveRole\"\n }'\n", "text": "Hey @David_Aw,Welcome to the MongoDB Community forums!I am having issues creating customRoles in Mongodb Atlas using API and curl.I tried the following command, and it worked for me in my testing environment. Could you please try it with the same after replacing it with your publickey , priatevkey , and GROUPID (also known as ProjectId )?In case it doesn’t work, please share the error message you received after trying the above command.Best regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "curl -v --user \"{username}:{password}\" --digest --header \"Content-Type: application/json\" --header \"Accept: application/vnd.atlas.2023-02-01+json\" --include --request POST \"https://cloud.mongodb.com/api/atlas/v2/groups/{groupID}/customDBRoles/roles\" --data ' { \"actions\":[ { \"action\":\"REMOVE\", \"resources\":[ { \"collection\":\"\", \"db\":\"ambience\" } ] } ], \"inheritedRoles\":[], \"roleName\":\"customremoveRole\" }'\n\"detail\":\"Received JSON is malformed.\",\"error\":400,\"errorCode\":\"MALFORMED_JSON\",\"parameters\":[],\"reason\":\"Bad Request\"}* Connection #0 to host cloud.mongodb.com left intact\ncurl: (3) unmatched brace in URL position 1:\n{\n", "text": "Thanks for getting back to me.The error I am facing is", "username": "David_Aw" }, { "code": "\"detail\":\"Received JSON is malformed.\",\"error\":400,\"errorCode\":\"MALFORMED_JSON\",\"parameters\":[],\"reason\":\"Bad Request\"}* Connection #0 to host cloud.mongodb.com left intact\ncurl: (3) unmatched brace in URL position 1:\n{\n}curl --user \"{pubkey}:{prikey}\" --digest --header \"Content-Type: application/json\" --header \"Accept: application/vnd.atlas.2023-02-01+json\" --include --request POST \"https://cloud.mongodb.com/api/atlas/v2/groups/{groupId}/customDBRoles/roles\" --data '{\n \"actions\":[\n {\n \"action\":\"REMOVE\",\n \"resources\":[\n {\n \"collection\":\"\",\n \"db\":\"ambience\"\n }\n ]\n }\n ],\n \"inheritedRoles\":[\n \n ],\n \"roleName\":\"customremoveRole\"\n}'\n", "text": "The error I am facing isThe request body data syntax is incorrect. I believe you are missing two }.The following worked for me (redacted credentials and group id):", "username": "Jason_Tran" } ]
MongoDB Atlas API: Create New CustomRole With Curl & Payload
2023-10-06T03:58:18.195Z
MongoDB Atlas API: Create New CustomRole With Curl &amp; Payload
393
null
[ "atlas-device-sync", "atlas-cluster", "atlas" ]
[ { "code": "", "text": "As suggested, I’m making a separate post about this. I wasn’t going to initially since I thought it might just be me, but it seems like there are similar things going on with other people.I’ve been developing an app using MongoDB Realm with Device Sync. It’s still in development with only a few users testing, that too only periodically. The only connection to my database is through Device Sync. My Atlas and database instance are on Google Cloud us-central1. The total size of the data in my database is only 711 KB. Yet somehow, I have monthly data transfer rates of 20 GB and 80 GB. How is this at all possible when there are essentially no active users and the only purpose is for development and testing?Additionally, in Atlas Metrics it says only 84,483.1 B of data has been transferred for the month. How is this number in any way extrapolated to 20 GB and 80 GB?I also noticed in the Metrics and Real Time view there are constant operations, bytes in and out, 40-60 constant connections when no one is using the app, and in some cases, queries constantly being run.They don’t align with Device Sync logs. I was unable to find an answer so I’m on the verge of abandoning Atlas and Device Sync altogether and just using a local Realm, but just in case there is a solution, making this post.", "username": "Akansh" }, { "code": "", "text": "Hi Akansh,\nCould you share your app ID with us privately so we can look into it?", "username": "mpobrien" }, { "code": "", "text": "Just wanted to double check, I sent it via direct message. Is there some other way I should be providing it?", "username": "Akansh" }, { "code": "", "text": "Well as I expected, no response! Same thing as usual. @Anurag_Kadasne @mpobrien", "username": "Akansh" }, { "code": "", "text": "Akansh,\nI received your message and am reviewing it. Thanks for your patience ", "username": "mpobrien" }, { "code": "", "text": "Is this still under review", "username": "Akansh" }, { "code": "", "text": "Is this still under reviewYes.", "username": "mpobrien" }, { "code": "", "text": "Just wanted to double check, is it possible this usage is standard for a cluster and not expected to continue to scale exponentially with additional usage/users?I was wondering if this usage and constant communication is just related to the database internals or shard communication and is more of a “flat” fee that is incurred and required and not necessarily related to usage from Sync or incoming reads/writes.I hope that makes sense. If not, I’ll try to explain better.", "username": "Akansh" } ]
Abnormal Data Transfer Usage
2023-10-31T21:41:58.584Z
Abnormal Data Transfer Usage
278
null
[ "node-js" ]
[ { "code": "", "text": "\"I’m encountering a problem connecting to MongoDB Atlas, even though I’ve allowed network access for everyone by adding ‘0.0.0.0/0’ to the IP whitelist. When attempting to connect, I consistently receive the following error message: ‘MongooseServerSelectionError: Could not connect to any servers in your MongoDB Atlas cluster. One common reason is that you’re trying to access the database from an IP that isn’t whitelisted. Make sure your current IP address is on your Atlas cluster’s IP whitelist.’What could be the potential reasons for this issue, and how can I resolve it to successfully establish a connection to my MongoDB Atlas cluster?\"", "username": "Adilzhan_Serikzhanov" }, { "code": "", "text": "Hi @Adilzhan_Serikzhanov,Looks to be a generic network connection failure message. Hard to say the root cause of it at this stage but I’d recommend taking a look at my reply on following post and performing the same mentioned network tests from the client failing to connect.Also:Let me know the results of the network tests if you’re still having trouble.Regards,\nJason", "username": "Jason_Tran" } ]
Connecting to database
2023-11-07T13:58:12.264Z
Connecting to database
121
null
[ "aggregation", "queries", "atlas-search" ]
[ { "code": "db.collection.aggregate([\n {\n $search: {\n \"index\": 'search',\n \"count\": { \"type\": \"total\" },\n \"compound\": {\n \"must\": [{\n \"range\": {\n \"path\": \"timestamp\",\n \"gte\": ISODate('2020-01-01'),\n \"lte\": ISODate()\n }\n },\n {\n \"text\": {\n \"query\": '(.*)info(.*)',\n \"path\": ['field1', 'field2']\n },\n },\n {\n \"near\": {\n \"path\": 'timestamp',\n \"origin\": ISODate(),\n \"pivot\": 7776000000\n }\n }\n ],\n }\n }\n },\n { $skip: 10 },\n { $limit: 10 }\n])\n", "text": "HI\nI am trying to $skip and $limit after $search in the aggregation. Each and every time when I try to increase my skip size the execution time gets longer\nExample:My code:I need to know if is there any other way to optimise the query to get faster and if is there any way to specify ascending or descending order in the Atlas search index.", "username": "Nanthakumar_DG" }, { "code": "", "text": "Hi @Nanthakumar_DG - Welcome to the community Each and every time when I try to increase my skip size the execution time gets longerMongoDB still has to iterate over documents to skip them which explains what you were experiencing in the above quote.Just wanting to understand more of the use case details here - Is the question about pagination of Atlas Search results? Could you provide more details on the intended use case?Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "YEP, That’s about pagination each and every time when I go to the next page in UI response time is increasing as I mentioned in the Example.\nI found the solution for that and made a change in the Atlas search index and that works well.\nRef: storing-source-fields\nThanks for the reply @Jason_Tran", "username": "Nanthakumar_DG" }, { "code": "", "text": "Hello @Nanthakumar_DG ,Can you share your storedSource configuration please?I have exactly the same problem with pagination.", "username": "KaMi_Lml" }, { "code": "{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"lvl\": {\n \"type\": \"string\"\n },\n \"data\": [\n {\n \"dynamic\": true,\n \"type\": \"document\"\n },\n {\n \"type\": \"string\"\n }\n ],\n \"time\": {\n \"type\": \"date\"\n }\n }\n },\n \"storedSource\": {\n \"include\": [\n \"data\",\n \"lvl\",\n \"data\"\n ]\n }\n}\n", "text": "", "username": "Nanthakumar_DG" }, { "code": "", "text": "Hey @Nanthakumar_DG , we are working on improving this for Atlas Search currently, what was your expected latency for this query you were hoping to see? Did stored source help?", "username": "Elle_Shwer" }, { "code": "", "text": "@Elle_Shwer Yes storedSource helps to improve the performance of the query timing but still, I need help in this search index the accuracy of the result is not as expected. I need a full-text search type indexing in my case can anyone help with that this index should also need to be sorted in descending, skip and limit.", "username": "Nanthakumar_DG" }, { "code": "", "text": "Hey @Nanthakumar_DG since we posted this we have a sort solution that may also help here: https://www.mongodb.com/docs/atlas/atlas-search/sort/Pagination is still in progress.", "username": "Elle_Shwer" }, { "code": "", "text": "Can we vote somewhere for the pagination improvement? It’s becoming a bit of a problem for me (already using stored source etc.)", "username": "Ruud_van_Buul" }, { "code": "", "text": "@Ruud_van_Buul you can vote here: Faster Pagination – MongoDB Feedback Engine", "username": "Elle_Shwer" } ]
Query optimization while using skip and limit in Atlas search
2022-11-09T04:32:00.934Z
Query optimization while using skip and limit in Atlas search
4,018
null
[ "node-js", "serverless" ]
[ { "code": " MongoNetworkError: connection 1 to *IP*:27017 closed\n at Connection.onClose (.../node_modules/mongodb/lib/cmap/connection.js:134:19)\n at TLSSocket.<anonymous> (.../node_modules/mongodb/lib/cmap/connection.js:62:46)\n at TLSSocket.emit (node:events:513:28)\n at TLSSocket.emit (node:domain:489:12)\n at node:net:301:12\n at TCP.done (node:_tls_wrap:588:7)\nrequest.context.callbackWaitsForEmptyEventLoop = false;\n", "text": "Hello all, we are using MongoDB Serverless and connecting to it from AWS Lambda. I realized that after a while (a few minutes) of idle, subsequent database queries returns this error:The next few requests will continue to fail as each connection within the pool fails and reconnects. At one point the failures will subside, until I leave it idle for X minutes, and the problem will surface again.For each of my Lambda function, I have this set:with the mongodb client instance outside of the handler function, as suggested by the mongodb for lambda guide.I can also confirm it is not a network access issue as it currently has 0.0.0.0 allowed and it works consistently at fresh start.Any suggestions or help will be much appreciated! Thanks", "username": "Danny_Yang" }, { "code": "", "text": "@Danny_Yang it might have something to do with whitelisted ip addresses…", "username": "Occian_Diaali" }, { "code": "", "text": "@Danny_Yang – Did you find a solution? Running into the same problem in Vercel API routes.", "username": "Divyahans_Gupta" }, { "code": "", "text": "Please did you get a solution to this. I’m using nextjs and running into the same error", "username": "Efosa_Uyi-Idahor" }, { "code": "", "text": "Maybe it’s because your current ip address and ip address in mongodb atlas are not the same.\nyou need to add your current address to network access in mongodb atlas project that are you working on or add 0.0.0.0/0 to ip address list and this will make any Someone who can log in has a connection to the database (I recommend the first method)", "username": "Panda_Music" }, { "code": "", "text": "the second option is really an easy alternative to do with. can you specifying the first option, please? Like we need to adding extra option script inside our code or something.\nbecause it will throw some error when you just adding a character. you need to add your currenet IP address over and over after you make changes.", "username": "Orcastra" }, { "code": "", "text": "Through Network access option , Whitelist your current IP address or simply allow access from everywhere, i.e. 0.0.0.0/0 (includes your current IP address), also ensure that you have good internet connection.\nThis worked in my case. Hoping same for others.", "username": "Utsav_raj" }, { "code": "", "text": "I have whitelisted my ip, but I still have this issue. Does anyone have another solution?", "username": "Emmanuel_Davis" }, { "code": "", "text": "Currently getting the same error inside AWS using a lambda connecting to a mongodb instance.\nWhat is different is that the connection does work for most of the collections in the database. It is only one collection that while not large in terms of mongo collections is the larger than our other collections in our instance.Very weird that it would be happening with just one collection.edit: this is related to the size of the collection being read. In testing, limiting the size of the collection to 50 items does get past the error, but not useful for a production solution.", "username": "Brian_Forester1" } ]
MongoNetworkError: connection 1 to *IP*:27017 closed
2023-01-20T23:24:01.307Z
MongoNetworkError: connection 1 to *IP*:27017 closed
5,937
null
[ "node-js", "mongoose-odm", "compass", "atlas", "serverless" ]
[ { "code": "mongodb+srv://user:password@database-pe-0.vyi9q.mongodb.net/devssh -L 27017:10.0.4.19:27017 ec2-user@ec2-xx-xxx-xxx-xxx.compute-1.amazonaws.com -i key.pem", "text": "Hi I’m facing a problem when a I try to connect through a SSH Tunnel and a Bastion Host. I want to connect to my serverless instance on Atlas (AWS as Cloud Provider). I’ve created a VPC Endpoint to connect to Atlas in a more private way, I tested the connectivity and it works. Im using the connection string (mongodb+srv) provided by the Atlas UI, and can connect using the SSH Tunnel using the Tool MongoDB Compass. I can connect using the connection string if i connect directly from the bastion host. I have problems if a create a SSH Tunnel and use the connection string in my Node.js app with Mongoose. What Im doing wrong?\nThe connection string is:\nmongodb+srv://user:password@database-pe-0.vyi9q.mongodb.net/dev\nI know that this is resolved to an IP Address, and I change that IP Address to localhost, so this would be forwarded through the SSH Tunnel, but dont worked.\nssh -L 27017:10.0.4.19:27017 ec2-user@ec2-xx-xxx-xxx-xxx.compute-1.amazonaws.com -i key.pem\nIs there a way to achieve this without installing any deps in my app?\nI want to connect from my local environment where i have a nodejs app to a private mongodb database (Atlas) using SSH Tunnel but i cant", "username": "Martin_Munoz1" }, { "code": "", "text": "nneI have exactly the same issue. Have you had any luck resolving it?", "username": "Marcin_Wojciechowski" } ]
Connection issues through SSH Tunnel to Atlas Serverless
2023-05-24T15:44:29.416Z
Connection issues through SSH Tunnel to Atlas Serverless
891
null
[ "document-versioning" ]
[ { "code": "", "text": "Howdy all, I noticed there was a newer version of @realm/react available and the readme on the npm website is different to the documentation on the website, wondering if these will be brought in sync?", "username": "gymbuddy_ai" }, { "code": "", "text": "Hi @gymbuddy_ai!Short answer: yes.Long answer:\nWe’re bringing the React Native SDK docs in sync with the latest version of @realm/react. We did a big docs restructure at the beginning of the year to incorporate @realm/react. More recently, we finished completely overhauling our docs test suite.In the old test suite, all of the code examples are tested which is good! But in the new test suite, the code examples are all part of a real React Native app that runs on simulators. This makes the test suite much more natural.Only in the last few weeks have we had time to start updating the docs to reflect the latest @realm/react version. For example, the Manage Email/Password Users page is in sync with the latest @realm/react version.I’m currently updating the @realm/react Reference page. You can check out the draft PR if you want - but it’s still very much a mess. (DOCSP-27349) Update @realm/react reference page by krollins-mdb · Pull Request #3071 · mongodb/docs-realm · GitHubQuestion for you: is there anything in particular you’d like to see changed/updated in the docs?", "username": "Kyle_Rollins" } ]
Realm docs are not up to date with npm package. docs
2023-11-07T14:29:00.248Z
Realm docs are not up to date with npm package. docs
101
null
[ "transactions" ]
[ { "code": "", "text": "Hi all, in my certification test I found questions on replication and transaction that are not included in the certification exam guide. Could you please check? Thanks a lot in advance", "username": "ILENIA_MARIA_DIOMEDE" }, { "code": "", "text": "Hello Ilenia. Questions on replication and transaction will fall under the Topic: Drivers. More specifically Objective 6.2 & 6.4\nYou can find the exam objectives and topic level weighting in the [Associate Developer Exam Study Guide.]\nIf you have any further questions, please reach out to certification@mongodb.com\nThank you!", "username": "Heather_Davis" }, { "code": "", "text": "Though I passed the developer exam, I don’t find it fair to include replication and transaction in a developer exam? Objective 6.2 is Define how a (coding language) application connects/uses the (coding language) driver, and objective 6.4 is Identify what connection pooling is in terms of the driver and what advantages it offers. None of these points to replication and transaction.For a certification exam I think it is important to adhere what is written in the objectives. I think it’s reasonable for replication and transaction to be tested, but to be fair, these should be included in the exam objectives.", "username": "Marcus_Peck" } ]
Different topics in certification dev exam
2023-09-26T13:45:05.862Z
Different topics in certification dev exam
472
null
[ "node-js", "replication", "compass", "containers" ]
[ { "code": "mongodb://localhost:27017,localhost:27018,localhost:27019/?replicaSet=myReplicaSetgetaddrinfo ENOTFOUND mongo2", "text": "I have followed everything in the relevant document. All my outputs are correct as in the document, but I encounter an error when trying to establish a connection through the Compass GUI or with a framework like Node.js.Here is the mongodb documentationThe connection string I used for Compass:mongodb://localhost:27017,localhost:27018,localhost:27019/?replicaSet=myReplicaSetThe error I received in the Compass interface:getaddrinfo ENOTFOUND mongo2Could you guide me on how to resolve this issue?", "username": "htrgl" }, { "code": "docker ps\nping mongo2\n", "text": "When the docker cluster is up and running, what do you get when you run the command the following commands:", "username": "steevej" } ]
How can I connect to a MongoDB replica set cluster created with Docker using Compass?
2023-11-06T11:46:24.686Z
How can I connect to a MongoDB replica set cluster created with Docker using Compass?
117
null
[ "java" ]
[ { "code": "", "text": "Hi ,Please can anyone suggest me a good tutorial which I can use to practice Mongo DB Java driver apart from the one section that we have in the Mongo DB universityReason, In my first attempt for Mongo DB Developer Associate, I felt the Java driver questions were challenging and thought would need more practice.\nThanks in advance.", "username": "Manikandan_Veeraraghavan" }, { "code": "", "text": "Hi there, I passed the develolper exam recently and I can give you some tips without disclosing the actual exam content. In summary, my experience is lab is far more important than practicing multiple choice questions.I personally wrote a 20-page notes for this exam. It is comparable to an associate level in other technology products. It’s not something you can nail in a day, but it is definitely not that difficult.", "username": "Marcus_Peck" } ]
Mongo DB Java Learning
2023-11-07T06:46:00.027Z
Mongo DB Java Learning
97
null
[ "aggregation", "queries", "node-js" ]
[ { "code": "import { MongoDBAtlasVectorSearch } from \"langchain/vectorstores/mongodb_atlas\";\nconst store = new MongoDBAtlasVectorSearch(embeddings, { \"collection\": my_collection, \"indexName\": \"default\", \"textKey\": \"page_content\", \"embeddingKey\":\"page_embeddings\"})\nreturn await store.similaritySearchWithScore(qurey, 5,{preFilter:{name:\"test.pptx\"}})\n", "text": "I am using mongodb atlas search, my following code working fine, but i am unable to add filters, I see some examples but that are using raw aggregation, like in langchain wrapper, there is function(i.e. similaritySearchWithScore(qurey, 5,{preFilter:{name:“test_file.pptx”}})) which support filters as args. I used this but it gives error i.e.\nerror: PlanExecutor error during aggregation :: caused by :: “filter.name” must be a document\nCode:-How we can pass filters here??Thanks", "username": "Davinder_Singh3" }, { "code": "return await store.similaritySearchWithScore(qurey, 5,{preFilter:{name:\"test.pptx\"}})\ndocument", "text": "Hey @Davinder_Singh3,Welcome to the MongoDB Community forums!error: PlanExecutor error during aggregation :: caused by :: “filter.name” must be a documentAs per the $vectorSearch - documentation, the filter needs to be of document type and you can use the $eq operator here to resolve the error. Further, please refer to the Atlas Vector Search Pre-Filter to read more about it.In case you have further concerns, feel free to reach out.Best regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "{\n \"mappings\": {\n \"dynamic\": true,\n \"fields\": {\n \"page_embeddings\": {\n \"dimensions\": 1536,\n \"similarity\": \"cosine\",\n \"type\": \"knnVector\"\n }\n }\n }\n}```\n\nError:-\n****error: PlanExecutor error during aggregation :: caused by :: Path 'name' needs to be indexed as token****", "text": "@Kushagra_Kesav now i have following 2 queries", "username": "Davinder_Singh3" }, { "code": "$vectorSearchfiltertokentoken", "text": "Hi @Davinder_Singh3,Do we need to add all fields to following template??May I ask what you meant by ‘all fields’ here? Are you generating vector embeddings for multiple fields?which we want to filter in semantic search function, since i filtered one field which was not added to indexing and it throw given error.Yes, the $vectorSearch filter option matches only BSON boolean, string, and numeric values so you must index the fields as one of the following Atlas Search field types.If yes what “type” is for field of collection which has JSON data type?? i.e. for string we “type”: “token”, “normalizer”: “lowercase”.And, yes for the string - index a field as token type. Atlas Search indexes the terms in the string as a single token (searchable term) and stores them in a columnar storage format for efficient filtering or sorting operations. To read more about it, please refer to the Behavior of the token Type - MongoDB Docs.Best regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "@Kushagra_Kesav Should we create separate index for the filter fields? Or should we mention in the vector index template?For example, I want to apply vector search, lets say only for documents where country=‘IN’, should I create a separate index for country or should I add this field in vector index mapping?image404×550 46.9 KB", "username": "Sahas_Nanabala" }, { "code": "await store.similaritySearchWithScore(qurey, 5, { preFilter: { $and: [{ name: { $eq: \"test\" }, \"document_meta.Disclaimer.Label\": { $eq: \"Client Ready\" } }] } })\n{\n \"mappings\": {\n \"dynamic\": true,\n \"fields\": {\n \"page_embeddings\": {\n \"dimensions\": 1536,\n \"similarity\": \"cosine\",\n \"type\": \"knnVector\"\n },\n \"name\": {\n \"type\": \"token\",\n \"normalizer\": \"lowercase\"\n },\n \"document_meta.Disclaimer.Label\": {\n \"type\": \"token\",\n \"normalizer\": \"lowercase\"\n }\n }\n }\n}\nreturn await store.similaritySearchWithScore(qurey, 5,{ preFilter: { \"document_meta.Disclaimer.Label\": { $eq: \"Client Ready\" } } })\nawait store.similaritySearchWithScore(qurey, 5, { preFilter: { $and: [{ name: { $eq: \"test\" }, \"document_meta.Disclaimer.Label\": { $eq: \"Client Ready\" } }] } })\n", "text": "Thanks for clarifying @Kushagra_Kesav .It works well on “Name” field but on “document_meta.Disclaimer.Label” it just return empty i.eIt throw error as error: PlanExecutor error during aggregation :: caused by :: “filter.$and[0]” more than 1 filterNeed your inputs/guidance on 2 and 3 points. We have case where along with query, we want results filtered by multiple fields.Thanks,\nDavinder", "username": "Davinder_Singh3" } ]
How to apply filters to function "similaritySearchWithScore" of langchain?
2023-11-03T15:36:36.193Z
How to apply filters to function &ldquo;similaritySearchWithScore&rdquo; of langchain?
201
null
[ "containers" ]
[ { "code": "", "text": "I have a docker system contains a service that connects with mongodb service. During boot of the service I noticed the message “org.mongodb.driver.cluster: Exception in monitor thread while connecting to server localhost:27017” while the container and host name of the mongodb service is mongo1db.\nWhen the services are up and running the services is trying to connect with “localhost:27017” while the service environment and configuration to use mongo1db:27017 and when I login the service container (docker exec -it service /bin/bash) I can network connect to mongodb:27017 and no other. How I can fix this?", "username": "Muhammed_Alghwell" }, { "code": "host.docker.internal/etc/hosts", "text": "Hey @Muhammed_Alghwell,I noticed the message “org.mongodb.driver.cluster: Exception in monitor thread while connecting to server localhost:27017” while the container and host name of the mongodb service is mongo1db.In my understanding, it appears that the service inside your Docker container is attempting to establish a connection with MongoDB using “localhost:27017” as the hostname and port. However, you want it to connect to the MongoDB service with the hostname “mongo1db” and port “27017.” This issue is likely the result of some misconfiguration in your YAML file.I suspect that the correct mapping between the container environment and the MongoDB process running on the localhost may not be in place. If you intend to connect to the localhost, please ensure that the local hostname contains the entry ‘host.docker.internal’ in the ‘/etc/hosts’ file on your system.Could you kindly verify that your MongoDB connection string uses ‘mongo1db’ as the host, rather than ‘localhost’ or ‘127.0.0.1’? Also, try flushing any cached DNS lookups by restarting the Docker daemon and the service containers. The service may have initially resolved localhost.Perhaps to understand better, it would be helpful if you could share the following information:Best regards,\nKushagra", "username": "Kushagra_Kesav" } ]
Connecting wit mongodb service
2023-10-28T15:54:42.714Z
Connecting wit mongodb service
193
null
[ "sharding", "backup" ]
[ { "code": "", "text": "Hello,We have started to see this issue after upgrading to 3.6 and it has continued into 4.0. When we restore a shared cluster with config servers setup in a 3 node replication set we are getting timeouts on all activity from mongos.The error message in the config server logs is2021-04-08T16:24:47.251+0000 I COMMAND [conn470] Command on database config timed out waiting for read concern to be satisfied.\nCommand: { find: “databases”, filter: { _id: “b2b” }, readConcern: { level: “majority”, afterOpTime: { ts: Timestamp(1617856808, 1), t: 39 } }, maxTimeMS: 30000, $readPreference: { mode: “nearest” }, $replData: 1, $clusterTime: { clusterTime: Timestamp(1617899057, 5), signature: { hash: BinData(0, AD61160760CD2170230E457CFC08DF2D056E92E5), keyId: 6932855326379082068 } }, $configServerState: { opTime: { ts: Timestamp(1617856808, 1), t: 39 } }, $db: “config” }. Info: MaxTimeMSExpired: Error waiting for snapshot not less than { ts: Timestamp(1617856808, 1), t: 39 }, current relevant optime is { ts: Timestamp(1617899083, 2), t: 35 }. :: caused by :: operation exceeded time limitThis seems to persist for hours after the restore and numerous restarts of the mongos, config db’s and mongodbs will finally clear out. The error is confusing since it is looking for a snapshot not less than 1617856808 and the current optime is greater than that 1617899083.On the config database I am able to create and write to collections and replication is showing in sync across all the nodes.", "username": "Jonathan_Stairs" }, { "code": "", "text": "Even we are facing the same issue. Did you find any solution for this issue?", "username": "ashwin_reddy1" } ]
Restore of Sharded causing connections to hang with timeout
2021-04-08T16:40:06.805Z
Restore of Sharded causing connections to hang with timeout
2,482
null
[ "node-js" ]
[ { "code": "node_modules\\bson\\lib\\bson.cjs:479\n inspect ??= defaultInspect;\n ^^^\n\nSyntaxError: Unexpected token '??='\n", "text": "Getting this error when trying to start my server and connect to mongodb.\nI have Node v21.1.0 installed. Anyone fixed this error before?", "username": "wallinglee" }, { "code": "", "text": "I was facing the same issue.As a workaround, I integrated docker to my NodeJS application and used the latest stable version of Node (18.12.0). That solved the problem so I am guessing the issue stems from using Node v21", "username": "Darius_njihia" }, { "code": "", "text": "I don’t think so. I was using v18 and still had the same issue. I read another post that said upgrading node to the latest was the fix. It wasn’t.", "username": "wallinglee" }, { "code": "console.log(process.version)", "text": "@wallinglee we saw a similar question when 6.0.0 of the driver was released (MongoDB NodeJS Driver 6.0.0 Released - #3 by Warren_James) .The nullish coalescing assignment has been supported since Node.js 15, so it would be worth verifying that you’re indeed running your code in the version of the runtime you’ve indicated.If you add a console.log(process.version) to the code this should be fairly easy to validate.", "username": "alexbevi" }, { "code": "", "text": "Nothing logs because the error happens.", "username": "wallinglee" }, { "code": "", "text": "As a workaround, I integrated docker to my NodeJS application and used the latest stable version of Node (18.12.0).If you tried the same test using Node.js 21 (via docker) does the issue still occur?", "username": "alexbevi" }, { "code": "", "text": "I’m not using docker", "username": "wallinglee" }, { "code": "process: process {\n version: 'v14.18.0',\n versions: {\n node: '14.18.0',\n", "text": "node -v\nv18.12.0console.log(process)", "username": "wallinglee" }, { "code": "process", "text": "@wallinglee, based on your process output you’re still running Node 14 - even if you have Node 18 available.I’m not sure how you’re starting your server, but it appears an older version of Node is being used here.", "username": "alexbevi" }, { "code": "\"start\": \"node index.js\"\n", "text": "I just use npm run start in my node express app.", "username": "wallinglee" }, { "code": "", "text": "Yeah, it appears to be a node version error. I switched from v14.21.3 to v18.2.0 and then it worked fine.", "username": "Josue_Cerda" }, { "code": "", "text": "I’ve tried several node version installs. None fix the issue", "username": "wallinglee" }, { "code": "", "text": "Its not a Node Issue, its issue with mongoose V-8 and above, if you use mongoose V-7.4.1 then problem will be solve.", "username": "yaman_zaidi" }, { "code": "SyntaxError: Unexpected identifier\n", "text": "I’m getting a similar error. Tried downgrading mongoose to the version you specified, but it keeps happening.\nAny workaround?", "username": "Juanpi_N_A" }, { "code": "", "text": "I was facing the same issue and after trying different approaches, the one that worked for me was using mongoose version 7.6.3So in your server, run the command: npm i mongoose@7.6.3\nIt worked for me. Hopefully, it does for you too ", "username": "Sinmbf_Lost" }, { "code": "", "text": "Try using mongoose version 7.6.3 instead of the latest one. It worked for me :>", "username": "Sinmbf_Lost" }, { "code": "", "text": "Thank’s! It’s work for me to!", "username": "Buki_John" } ]
Node app - SyntaxError: Unexpected token
2023-10-31T23:30:26.503Z
Node app - SyntaxError: Unexpected token
1,084
null
[ "cxx" ]
[ { "code": "bson_tbsoncxx::document::viewbson_tbsoncxx::builder::basic::arraybsoncxx::document::view", "text": "I’m writing c++ code that needs to jive with existing c code.The structure in question has an array of bson_t entries, and I need to create a bsoncxx::document::view from the bson_t objects (to port them to a bsoncxx structure).What is the easiest way to do this? Am I overthinking it, that is to say, is bson_t compatible to just append to, for example, a bsoncxx::builder::basic::array?Do I need to access the data buffer and initialize a bsoncxx::document::view manually, and if so, how do I access the buffer from a bson_t pointer?Sorry for any lack of clarity, and I assure you I have been googling this for a while.", "username": "lnorth" }, { "code": "bson_get_databson_tbsoncxx::document::viewlenint main()\n{\n //`bsondata` represents the document { \"foo\" : \"bar\" }\n uint8_t bsondata[] = {0x12, 0x00, 0x00, 0x00, 0x02, 0x66, 0x6f, 0x6f, 0x00, 0x04, 0x00, 0x00, 0x00, 0x62, 0x61, 0x72, 0x00, 0x00};\n bson_t *bsontDoc = bson_new_from_data(bsondata, sizeof(bsondata));\n char* str = bson_as_canonical_extended_json(bsontDoc, NULL);\n std::cout<<\"--------bson_t Doc:--------\"<<std::endl;\n printf(\"%s\\n\", str);\n \n // Create a bsoncxx::document::view from the bson_t\n bsoncxx::document::view bsoncxxDoc = bsoncxx::document::view(bson_get_data(bsontDoc), bsontDoc->len);\n std::cout << \"--------bsoncxx::document::view Doc-------- \\n\" << bsoncxx::to_json(bsoncxxDoc) << std::endl;\n return 0;\n}\n", "text": "Hi @lnorth, welcome to MongoDB community.You can use bson_get_data to get the data held by bson_t and pass it to bsoncxx::document::view along with len.See related documentation here - bson_t - libbson 1.25.0", "username": "Rishabh_Bisht" }, { "code": "", "text": "Thanks for the response! Is there any way to do such a conversion without using the bson library, like accessing the actual data without the bson api? I’m not sure how to explain it but I’d much rather not handle bson_t at all, I feel like using both libbson and bsoncxx kind of defeats the point of lifting my module to a later standard.", "username": "lnorth" }, { "code": "bson_t", "text": "Few questions:", "username": "Rishabh_Bisht" }, { "code": "", "text": "I’m connected to a capped mongodb collection that is used as a data queue (or maybe more accurately like a stream…). I am not able to change the incoming data, unfortunately. So far, I am only working on one out of many consuming submodules. Structural change would definitely be the best solution yet I am not in the position to make any decisions with repercussions toward other submodules.", "username": "lnorth" }, { "code": "", "text": "So, just to make sure I understand correctly, is this an accurate representation of your situation? You can make changes only in your code, and only want to use bsoncxx/mongocxx?\nsetup1562×1202 19.5 KBNote: I am assuming here that your code can’t directly connect to MongoDB collection. If you can, then you can directly fetch the BSON documents using the C++ driver into a bsoncxx document.", "username": "Rishabh_Bisht" }, { "code": "", "text": "Wow. Yes this is accurate enough, the specifics are that my module IS a consumer but has to adhere to a standard defined somewhere upstream and used by all other consumers (bson_t in a predefined structure).", "username": "lnorth" }, { "code": "", "text": "Is your code making direct connection to the MongoDB collection?\nWho is responsible for fetching the BSON docs from MongoDB collection and populating it with bson_t?", "username": "Rishabh_Bisht" } ]
How do I create a bsoncxx::document::view from a bson_t?
2023-10-25T12:28:47.538Z
How do I create a bsoncxx::document::view from a bson_t?
249
null
[ "atlas-search", "atlas-cli", "local-dev-atlas-cli" ]
[ { "code": "$ brew install mongodb-atlas$ atlas deployments setup", "text": "Hello everyone!Today, we are excited to announce the release of a new local experience with Atlas, Atlas Search, and Atlas Vector Search with the Atlas CLI.The Atlas CLI, a unified command-line tool for creating and managing MongoDB Atlas deployments, now supports local development, including the ability to develop with Atlas Search and Atlas Vector Search locally. This makes it even easier to create full-text search or AI-powered applications, no matter your preferred environment for building with MongoDB.Please note that the new local experience is intended only for development purposes and not for production use cases.It only takes two commands to get started:Try it today and let us know what you think. If you’re interested in sharing a direct feedback, please send an email to local_dev_atlascli-eap@mongodb.com and we will get in touch with you.Jakub Lazinski", "username": "Jakub_Lazinski" }, { "code": "", "text": "Please note that the new local experience is intended only for development purposes and not for production use cases.Can you explain to us what are these limitations ? What are the mechanisms that prevent a use in production ?", "username": "Dan_Musorrafiti" }, { "code": "", "text": "Hi @Dan_Musorrafiti ,\nThe local development experience for Atlas, Atlas Search and Vector Search is designed and built with a focus on addressing the needs of local development and testing scenarios. To illustrate, local deployments operate as single-node replica sets and are accessible without requiring authentication.For Atlas Search to seamlessly function in a production environment, we recommend utilizing Atlas deployments hosted in the cloud.", "username": "Jakub_Lazinski" }, { "code": "", "text": "Attempting to use this for testing and want to be able to run the tests as part of a github actions flow.Using your implementation GitHub - mongodb/atlas-github-action: Github Action for the MongoDB Atlas CLI I can get the package loaded but it is useless as you dont support ubuntu for local use. The package linked only currently ubuntu.Is there a timeline for improving this or is there a known work around?", "username": "Jake_Turner" }, { "code": "", "text": "Hi Jake,The Public Preview of the local development experience for Atlas has indeed limited supportability. We’re planning to expand it to add Ubuntu and GitHub Actions support towards the General Availability.In meantime, could you share more details about what errors are you getting on the GitHub Action run?Thanks,\nJakub", "username": "Jakub_Lazinski" }, { "code": "", "text": "Is there any approximate timeline for GA or a reference to changes expected to be made?Listed my problems on the relevant GH repos", "username": "Jake_Turner" }, { "code": "", "text": "Sorry Jake for the late response, I was on leave.\nRegarding the timelines: we’re planning to look into the Ubuntu and GitHub Actions support in the first half of next year but can’t tell more precisely at this point.", "username": "Jakub_Lazinski" } ]
Introducing a Local Experience for Atlas, Atlas Search, and Atlas Vector Search with the Atlas CLI
2023-09-26T10:24:33.787Z
Introducing a Local Experience for Atlas, Atlas Search, and Atlas Vector Search with the Atlas CLI
866
null
[ "java" ]
[ { "code": "", "text": "Our application supports MongoDB version 4.2. We are using Java driver version 3.12. We want to upgrade to MongoDB 6.0, but our application supports mongodb 4.2 version. Can it be enough to update the java driver version to at least 4.7 to upgrade to Mongodb version 6.0?", "username": "G_S1" }, { "code": "", "text": "Hey @G_S1,Welcome to the MongoDB Community forums!We want to upgrade to MongoDB 6.0, but our application supports mongodb 4.2 version.When upgrading, it’s important to note that you don’t skip major versions. You should follow the upgrade path from 4.2 → 4.4 then to 5.0 and so on. You can find more details about the upgrade process in the MongoDB documentation: Upgrade Replica Set to 6.0.image1562×374 49 KB\nBefore proceeding with the upgrade, it’s recommended to review the release notes for version 6.0. These release notes will provide information on any issues that might make the new version not suitable for your production use. You can find the release notes here: MongoDB 6.0 Release Notes.Can it be enough to update the java driver version to at least 4.7 to upgrade to Mongodb version 6.0?To ensure that your application’s driver is compatible with the version of MongoDB you are planning to use. You can check driver compatibility in the MongoDB documentation: Java - Driver Compatibility Reference.Also, make sure to review the page that lists each version of MongoDB and the supported OS versions. This will ensure that your chosen OS is compatible with the MongoDB version you intend to use: Platform Support Matrix.In case of any further questions feel free to reach out.Best regards,\nKushagra", "username": "Kushagra_Kesav" } ]
MongoDB Upgrade 4.2 to 6.0
2023-11-07T07:06:46.809Z
MongoDB Upgrade 4.2 to 6.0
106
null
[ "field-encryption" ]
[ { "code": "", "text": "If Client-Side Field Level Encryption (CSFLE) supports working with multiple AWS KMS where customer manages keys in its AWS KMS and same MongoDB database uses different AWS KMS accounts keys for field level encryption.", "username": "David_Livshits" }, { "code": "", "text": "Hi David and welcome to the Community! With CSFLE, only the top level key lives in the AWS KMS, not the Data Encryption Keys so there isn’t a need for multiple KMS accounts. The top level keys is used as a Key Encryption Key and protects the Data Encryption Keys so they are never stored in an unencrypted state. The Keys and Key Vaults page in MongoDB CSFLE docs goes into detail about the different levels of keys, their purpose and how they are used. I hope that helps!Cynthia", "username": "Cynthia_Braund" }, { "code": "", "text": "Hi,\nThank you for your reply. The point is actually related to Bring Your Own Key (BYOK) for encryption at rest where customer (3rdParty) needs to manage master key for encryption. I understand that master key is used for encryption of other keys however in certain cases there is a requirement that these master keys will be managed by customer whereas database itself belongs to the service provider. So the question is if there is a way to define AWS KMS provider per document?", "username": "David_Livshits" }, { "code": "", "text": "Hi David,In Atlas BYOK there is a single customer managed key supported per project/cluster. It does not support customer management of keys at lower levels (database, document etc). If a customer needs to be able to control the key used for BYOK they should have their own cluster/project which would then give them their own key.Cynthia", "username": "Cynthia_Braund" }, { "code": "", "text": "Hi,\nThanks for reply, generally my question converges to the following:\nIn non-Atlas MongoDB there is a way to define different keyids for encryption different documents for the same collection.\nNow question: Can these different keys be encrypted with different master keys from different AWS KMS or the only one KMS master key can be utilized for protection.", "username": "David_Livshits" }, { "code": "", "text": "Hi David,For at-rest encryption, even in Enterprise Advanced, the customer managed key is not definable at the document level. This page in our docs explains how encryption at-rest works for Enterprise Advanced.The only solution for encrypting data with a different key at the document level is CSFLE but even with CSFLE there is only one AWS KMS key that can be used for it. It will not support something like an AWS KMS key per tenant where you have multiple tenants’ data in the same database, which is the use case I believe you are trying to solve for.Cynthia", "username": "Cynthia_Braund" }, { "code": "", "text": "Thanks Cynthia.\nAnother related questions:\n1.) If it possible to work with multiple key vaults for DEKs collection for the same database? If so can you provide a sample how to do it.\n2.) In documentation is written that KMS is communicated each time DEK should be decrypted, however I believe that it is cached and it is not required to communicate KMS for every database read/write operation (because of performance and cost reason). Please confirm.Thanks", "username": "David_Livshits" }, { "code": "", "text": "Hi David,My answers here are for CSFLE.\nFor question 1: the KeyVault is specified when configuring the MongoClient and only one KeyVault is allowed.\nFor question 2: you are correct that the DEK is cached at the driver side for 1 minute so whether or not you get a cache hit on it depends on how recently that DEK was requested from the KMS.Cynthia", "username": "Cynthia_Braund" }, { "code": "", "text": "Thanks for response.\nFew more questions:\n1.) So following your response (tell me if I’m wrong) I can create 2 MongoDB clients that are working with the same database and 2 different key vaults that are protected by 2 different KMS and each client works with different documents, right?2.) Another question: when DEK key is created with option that allows to provide key material, is there a way to provide encrypted key material (if so with what key it can be protected, probably by KMS) or it must be plaintext.3.) Can we choose different KIDs from the same KMS that can protect different DEKs?Thanks", "username": "David_Livshits" }, { "code": "", "text": "Hi David - From your questions it seems like you are trying to build a systems that allows you to use multiple KMS keys, where those key are managed by external customers and CSFLE was not designed to support this use case. If that is the case, and for something as important as security and encryption, I would advise against trying to make the solution work in a way it has not been designed or tested for.Cynthia", "username": "Cynthia_Braund" }, { "code": "", "text": "@Cynthia_Braund could you tell me if there is a way to pass AWS KMS key id to API request when creating project/cluster to ensure that encryption at rest is enabled?", "username": "Agnieszka_Welian" } ]
CSFLE with multiple AWS KMS
2023-02-15T09:29:29.043Z
CSFLE with multiple AWS KMS
1,475
null
[ "sharding", "migration" ]
[ { "code": "config.system.sessions\"Slow query\"2023-10-25T11:49:21Z charmed-mongodb.mongod[4753]: {\"t\":{\"$date\":\"2023-10-25T11:49:21.835+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn179\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"admin.$cmd\",\"command\":{\"_shardsvrMoveRange\":\"config.system.sessions\",\"toShard\":\"shard-one\",\"min\":{\"_id\":{\"id\":{\"$uuid\":\"83400000-0000-0000-0000-000000000000\"}}},\"waitForDelete\":false,\"epoch\":{\"$oid\":\"6538e3c125ae3b07d28d5eaf\"},\"fromShard\":\"shard-three\",\"maxChunkSizeBytes\":200000,\"forceJumbo\":2,\"secondaryThrottle\":false,\"writeConcern\":{\"w\":1,\"wtimeout\":0},\"$clusterTime\":{\"clusterTime\":{\"$timestamp\":{\"t\":1698234561,\"i\":1}},\"signature\":{\"hash\":{\"$binary\":{\"base64\":\"BDUe5gGly3g4iPDSiuCwtTRJTLU=\",\"subType\":\"0\"}},\"keyId\":7293828730399490051}},\"$configTime\":{\"$timestamp\":{\"t\":1698234561,\"i\":1}},\"$topologyTime\":{\"$timestamp\":{\"t\":1698233824,\"i\":2}},\"mayBypassWriteBlocking\":false,\"$db\":\"admin\"},\"numYields\":0,\"reslen\":236,\"locks\":{},\"writeConcern\":{\"w\":1,\"wtimeout\":0,\"provenance\":\"clientSupplied\"},\"remote\":\"10.18.246.22:60792\",\"protocol\":\"op_msg\",\"durationMillis\":536}}\nmongodmongossh.status() database: { _id: 'config', primary: 'config', partitioned: true },\n collections: {\n 'config.system.sessions': {\n shardKey: { _id: 1 },\n unique: false,\n balancing: true,\n chunkMetadata: [ { shard: 'shard-two', nChunks: 1024 } ],\n chunks: [\n 'too many chunks to print, use verbose if you want to force print'\n ],\n tags: []\n }\ntopiperfuse config\ndb.system.sessions.stats()\n sharded: false,\nconfig> db.system.sessions.find()MongoServerError: not authorized on config to execute command { find: \"system.sessions\", filter: {}, lsid: { id: UUID(\"2b7221a9-41ce-457b-a1e0-a1caf417b012\") }, $clusterTime: { clusterTime: Timestamp(1698306641, 2), signature: { hash: BinData(0, 7DAE0335273F250F7F1469FD3167EBAA61253250), keyId: 7293828730399490051 } }, $db: \"config\" }\n", "text": "When I remove the shard that hosts the config.system.sessions collection, drainage takes about 20-25 minutes.When I look at the logs for this shard I see \"Slow query\":From the docs: config.system.sessionsThere is 1024 chunks in this collection, and here is more useful info from sh.status() before removing:I have also logged into the machines hosting the shards/config servers and seen that on each:I have also logged into the shard hosting the collection and ran:and saw:this seems wrong, but I am not sure.I thought I would try re-sharding the collection, but when I tried to view the collection contents with config> db.system.sessions.find() I got:", "username": "Mia_Altieri" }, { "code": "[direct: mongos] test> db.sample.findOne()\n{ _id: ObjectId(\"6548e52848f65c4016a4cea6\"), name: 'Millie Curtis' }\n[direct: mongos] test>\nchunkMetadata: [\n { shard: 'shard01', nChunks: 614 },\n { shard: 'shard02', nChunks: 218 },\n { shard: 'shard03', nChunks: 192 }\n ],\nconfig> db.system.sessions.find()", "text": "Hi @Mia_Altieri and welcome to MongoDB community forums!!Removing a shard from a sharding deployment is expected to take time from minutes to hours even to days and also depends on how the data have been distributed in the chunks on these shards.Removing the shard from the deployment is however not a simple process. If the balancer is enabled in the configuration, as soon as you enter the remove shard command, the balancer will try to balance all the chunks in the available shards.This also depends on how the shard key has been selected to make the event distribution between the chunks.\nPlease refer to the documentation to understand how to select the shard key in more detail.Based on the above case I tried to replicate in my local environment and test the scenario.I have around 20000 documents like:sharded on the name field. Since the name is unique, all the documents have moved to the same shard while other shards are still empty.One I remove the shard, I see the balancer trying to balance the chunks between the shardsBelow is the status after around 10 mins into removing of the shard process as begun:Why is this query slow?Perhaps this is due to other processes running in the background, which might be causing the slowdown.How can I make this query faster?Is the removing of shard a regular process? Could you clarify how often is the removal of the shard done?\nChanging the shard key might be a way to make the process fast as the chunks would be more evenly distributed among all the shards and only a few number of chunks needs to be balanced rather than balancing one chunk between all the remaining available chunk.config> db.system.sessions.find() I got:Finally, can you confirm the shard where you are running this command?Regards\nAasawari", "username": "Aasawari" } ]
Slow drainage of collection `config.system.sessions` when performing `removeShard`
2023-10-26T07:20:06.291Z
Slow drainage of collection `config.system.sessions` when performing `removeShard`
215
null
[]
[ { "code": "", "text": "Hi all, in Mongo Database. I have one collection ex: test1collection, the size of the collection is 640GB on disk. I have the following index on the collections, this index deletes data after expires, but the collection continues growing :\n{\nv: 2,\nkey: { processStart: -1 },\nname: ‘processStart_-1’,\nexpireAfterSeconds: 10368000\n}I can try manually deleting data from the collection and after I run the “compact” command, but I can not reduce space. anyone, help me, find a reason why increase data?\nOS Version: CentOS Stream release 8\nMongoDB Version: version 6.0.4\nFS type: ext4", "username": "mesaflave" }, { "code": "", "text": "what’s the output of collstats?", "username": "Kobe_W" }, { "code": "", "text": "dev1 [direct: primary] logs_store> db.GCA_REQUEST_LOG.stats()\n{\nok: 1,\ncapped: false,\nwiredTiger: {\nmetadata: { formatVersion: 1 },\ncreationString: ‘access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),assert=(commit_timestamp=none,durable_timestamp=none,read_timestamp=none,write_timestamp=off),block_allocation=best,block_compressor=zlib,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,import=(compare_timestamp=oldest_timestamp,enabled=false,file_metadata=,metadata_file=,repair=false),internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=false),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_custom=(prefix=,start_generation=0,suffix=),merge_max=15,merge_min=0),memory_page_image_max=0,memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,readonly=false,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,tiered_object=false,tiered_storage=(auth_token=,bucket=,bucket_prefix=,cache_directory=,local_retention=300,name=,object_target_size=0),type=file,value_format=u,verbose=,write_timestamp_usage=none’,\ntype: ‘file’,\nuri: ‘statistics:table:collection-0-8665055136015507582’,\nLSM: {\n‘bloom filter false positives’: 0,\n‘bloom filter hits’: 0,\n‘bloom filter misses’: 0,\n‘bloom filter pages evicted from cache’: 0,\n‘bloom filter pages read into cache’: 0,\n‘bloom filters in the LSM tree’: 0,\n‘chunks in the LSM tree’: 0,\n‘highest merge generation in the LSM tree’: 0,\n‘queries that could have benefited from a Bloom filter that did not exist’: 0,\n‘sleep for LSM checkpoint throttle’: 0,\n‘sleep for LSM merge throttle’: 0,\n‘total size of bloom filters’: 0\n},\n‘block-manager’: {\n‘allocations requiring file extension’: 1981964,\n‘blocks allocated’: 10109322,\n‘blocks freed’: 4308289,\n‘checkpoint size’: Long(“655643594752”),\n‘file allocation unit size’: 4096,\n‘file bytes available for reuse’: 19681280,\n‘file magic number’: 120897,\n‘file major version number’: 1,\n‘file size in bytes’: Long(“655670501376”),\n‘minor version number’: 0\n},\nbtree: {\n‘btree checkpoint generation’: 5083,\n‘btree clean tree checkpoint expiration time’: 0,\n‘btree compact pages reviewed’: 0,\n‘btree compact pages rewritten’: 0,\n‘btree compact pages skipped’: 0,\n‘btree skipped by compaction as process would not reduce size’: 0,\n‘column-store fixed-size leaf pages’: 0,\n‘column-store fixed-size time windows’: 0,\n‘column-store internal pages’: 0,\n‘column-store variable-size RLE encoded values’: 0,\n‘column-store variable-size deleted values’: 0,\n‘column-store variable-size leaf pages’: 0,\n‘fixed-record size’: 0,\n‘maximum internal page size’: 4096,\n‘maximum leaf page key size’: 2867,\n‘maximum leaf page size’: 32768,\n‘maximum leaf page value size’: 67108864,\n‘maximum tree depth’: 6,\n‘number of key/value pairs’: 0,\n‘overflow pages’: 0,\n‘row-store empty values’: 0,\n‘row-store internal pages’: 0,\n‘row-store leaf pages’: 0\n},\ncache: {\n‘bytes currently in the cache’: Long(“2427971378”),\n‘bytes dirty in the cache cumulative’: Long(“633825821413”),\n‘bytes read into cache’: Long(“12256179671704”),\n‘bytes written from cache’: Long(“2498523664743”),\n‘checkpoint blocked page eviction’: 30308,\n‘checkpoint of history store file blocked non-history store page eviction’: 0,\n‘data source pages selected for eviction unable to be evicted’: 1029138,\n‘eviction gave up due to detecting an out of order on disk value behind the last update on the chain’: 0,\n‘eviction gave up due to detecting an out of order tombstone ahead of the selected on disk update’: 0,\n‘eviction gave up due to detecting an out of order tombstone ahead of the selected on disk update after validating the update chain’: 0,\n‘eviction gave up due to detecting out of order timestamps on the update chain after the selected on disk update’: 0,\n‘eviction gave up due to needing to remove a record from the history store but checkpoint is running’: 0,\n‘eviction walk passes of a file’: 933577,\n‘eviction walk target pages histogram - 0-9’: 101452,\n‘eviction walk target pages histogram - 10-31’: 178749,\n‘eviction walk target pages histogram - 128 and higher’: 0,\n‘eviction walk target pages histogram - 32-63’: 207556,\n‘eviction walk target pages histogram - 64-128’: 445820,\n‘eviction walk target pages reduced due to history store cache pressure’: 0,\n‘eviction walks abandoned’: 61395,\n‘eviction walks gave up because they restarted their walk twice’: 34487,\n‘eviction walks gave up because they saw too many pages and found no candidates’: 155305,\n‘eviction walks gave up because they saw too many pages and found too few candidates’: 89774,\n‘eviction walks reached end of tree’: 333537,\n‘eviction walks restarted’: 0,\n‘eviction walks started from root of tree’: 341586,\n‘eviction walks started from saved location in tree’: 591991,\n‘hazard pointer blocked page eviction’: 364415,\n‘history store table insert calls’: 0,\n‘history store table insert calls that returned restart’: 0,\n‘history store table out-of-order resolved updates that lose their durable timestamp’: 0,\n‘history store table out-of-order updates that were fixed up by reinserting with the fixed timestamp’: 0,\n‘history store table reads’: 0,\n‘history store table reads missed’: 0,\n‘history store table reads requiring squashed modifies’: 0,\n‘history store table truncation by rollback to stable to remove an unstable update’: 0,\n‘history store table truncation by rollback to stable to remove an update’: 0,\n‘history store table truncation to remove an update’: 0,\n‘history store table truncation to remove range of updates due to key being removed from the data page during reconciliation’: 0,\n‘history store table truncation to remove range of updates due to out-of-order timestamp update on data page’: 0,\n‘history store table writes requiring squashed modifies’: 0,\n‘in-memory page passed criteria to be split’: 503184,\n‘in-memory page splits’: 225530,\n‘internal pages evicted’: 8829006,\n‘internal pages split during eviction’: 704,\n‘leaf pages split during eviction’: 384233,\n‘modified pages evicted’: 2500698,\n‘overflow pages read into cache’: 0,\n‘page split during eviction deepened the tree’: 0,\n‘page written requiring history store records’: 0,\n‘pages read into cache’: 50475472,\n‘pages read into cache after truncate’: 0,\n‘pages read into cache after truncate in prepare state’: 0,\n‘pages requested from the cache’: 764903473,\n‘pages seen by eviction walk’: 1115338812,\n‘pages written from cache’: 10099160,\n‘pages written requiring in-memory restoration’: 1950736,\n‘the number of times full update inserted to history store’: 0,\n‘the number of times reverse modify inserted to history store’: 0,\n‘tracked dirty bytes in the cache’: 144228979,\n‘unmodified pages evicted’: 56209191\n},\ncache_walk: {\n‘Average difference between current eviction generation when the page was last considered’: 0,\n‘Average on-disk page image size seen’: 0,\n‘Average time in cache for pages that have been visited by the eviction server’: 0,\n‘Average time in cache for pages that have not been visited by the eviction server’: 0,\n‘Clean pages currently in cache’: 0,\n‘Current eviction generation’: 0,\n‘Dirty pages currently in cache’: 0,\n‘Entries in the root page’: 0,\n‘Internal pages currently in cache’: 0,\n‘Leaf pages currently in cache’: 0,\n‘Maximum difference between current eviction generation when the page was last considered’: 0,\n‘Maximum page size seen’: 0,\n‘Minimum on-disk page image size seen’: 0,\n‘Number of pages never visited by eviction server’: 0,\n‘On-disk page image sizes smaller than a single allocation unit’: 0,\n‘Pages created in memory and never written’: 0,\n‘Pages currently queued for eviction’: 0,\n‘Pages that could not be queued for eviction’: 0,\n‘Refs skipped during cache traversal’: 0,\n‘Size of the root page’: 0,\n‘Total number of pages currently in cache’: 0\n},\n‘checkpoint-cleanup’: {\n‘pages added for eviction’: 95103,\n‘pages removed’: 1369822,\n‘pages skipped during tree walk’: Long(“36020221950”),\n‘pages visited’: Long(“36429321051”)\n},\ncompression: {\n‘compressed page maximum internal page size prior to compression’: 4096,\n'compressed page maximum leaf page size prior to compression ': 127796,\n‘compressed pages read’: 41547463,\n‘compressed pages written’: 9372162,\n‘number of blocks with compress ratio greater than 64’: 0,\n‘number of blocks with compress ratio smaller than 16’: 3417818,\n‘number of blocks with compress ratio smaller than 2’: 943578,\n‘number of blocks with compress ratio smaller than 32’: 7745849,\n‘number of blocks with compress ratio smaller than 4’: 3319900,\n‘number of blocks with compress ratio smaller than 64’: 0,\n‘number of blocks with compress ratio smaller than 8’: 26120318,\n‘page written failed to compress’: 0,\n‘page written was too small to compress’: 726998\n},\ncursor: {\n‘Total number of entries skipped by cursor next calls’: 9697,\n‘Total number of entries skipped by cursor prev calls’: 0,\n‘Total number of entries skipped to position the history store cursor’: 0,\n‘Total number of times a search near has exited due to prefix config’: 0,\n‘bulk loaded cursor insert calls’: 0,\n‘cache cursors reuse count’: 18102358,\n‘close calls that result in cache’: 18102363,\n‘create calls’: 404,\n‘cursor next calls that skip due to a globally visible history store tombstone’: 0,\n‘cursor next calls that skip greater than or equal to 100 entries’: 1,\n‘cursor next calls that skip less than 100 entries’: 238867709,\n‘cursor prev calls that skip due to a globally visible history store tombstone’: 0,\n‘cursor prev calls that skip greater than or equal to 100 entries’: 0,\n‘cursor prev calls that skip less than 100 entries’: 1,\n‘insert calls’: 44381763,\n‘insert key and value bytes’: Long(“1893558025960”),\nmodify: 0,\n‘modify key and value bytes affected’: 0,\n‘modify value bytes modified’: 0,\n‘next calls’: 238867710,\n‘open cursor count’: 2,\n‘operation restarted’: 3121296,\n‘prev calls’: 1,\n‘remove calls’: 14016942,\n‘remove key bytes removed’: 70084710,\n‘reserve calls’: 0,\n‘reset calls’: 74080595,\n‘search calls’: 28071811,\n‘search history store calls’: 0,\n‘search near calls’: 1138742,\n‘truncate calls’: 0,\n‘update calls’: 0,\n‘update key and value bytes’: 0,\n‘update value size change’: 0\n},\nreconciliation: {\n‘approximate byte size of timestamps in pages written’: 952285648,\n‘approximate byte size of transaction IDs in pages written’: 476142696,\n‘dictionary matches’: 0,\n‘fast-path pages deleted’: 0,\n‘internal page key bytes discarded using suffix compression’: 9656793,\n‘internal page multi-block writes’: 21650,\n‘leaf page key bytes discarded using prefix compression’: 0,\n‘leaf page multi-block writes’: 388724,\n‘leaf-page overflow keys’: 0,\n‘maximum blocks required for a page’: 63,\n‘overflow values written’: 0,\n‘page checksum matches’: 0,\n‘page reconciliation calls’: 2569162,\n‘page reconciliation calls for eviction’: 2004660,\n‘pages deleted’: 24102,\n'pages written including an aggregated newest start durable timestamp ': 636214,\n'pages written including an aggregated newest stop durable timestamp ': 38722,\n'pages written including an aggregated newest stop timestamp ': 33836,\n‘pages written including an aggregated newest stop transaction ID’: 33836,\n'pages written including an aggregated newest transaction ID ': 663253,\n'pages written including an aggregated oldest start timestamp ': 636052,\n‘pages written including an aggregated prepare’: 0,\n‘pages written including at least one prepare’: 0,\n‘pages written including at least one start durable timestamp’: 6905689,\n‘pages written including at least one start timestamp’: 6905689,\n‘pages written including at least one start transaction ID’: 6905689,\n‘pages written including at least one stop durable timestamp’: 2189973,\n‘pages written including at least one stop timestamp’: 2189973,\n‘pages written including at least one stop transaction ID’: 2189973,\n‘records written including a prepare’: 0,\n‘records written including a start durable timestamp’: 45416180,\n‘records written including a start timestamp’: 45416180,\n‘records written including a start transaction ID’: 45416164,\n‘records written including a stop durable timestamp’: 14101673,\n‘records written including a stop timestamp’: 14101673,\n‘records written including a stop transaction ID’: 14101673\n},\nsession: {\n‘object compaction’: 0,\n‘tiered operations dequeued and processed’: 0,\n‘tiered operations scheduled’: 0,\n‘tiered storage local retention time (secs)’: 0\n},\ntransaction: {\n‘race to read prepared update retry’: 0,\n‘rollback to stable history store records with stop timestamps older than newer records’: 0,\n‘rollback to stable inconsistent checkpoint’: 0,\n‘rollback to stable keys removed’: 0,\n‘rollback to stable keys restored’: 0,\n‘rollback to stable restored tombstones from history store’: 0,\n‘rollback to stable restored updates from history store’: 0,\n‘rollback to stable skipping delete rle’: 0,\n‘rollback to stable skipping stable rle’: 0,\n‘rollback to stable sweeping history store keys’: 0,\n‘rollback to stable updates removed from history store’: 0,\n‘transaction checkpoints due to obsolete pages’: 0,\n‘update conflicts’: 0\n}\n},\nsharded: false,\nsize: 11259480833004,\ncount: 252458055,\nnumOrphanDocs: 0,\nstorageSize: 655670501376,\ntotalIndexSize: 24217636864,\ntotalSize: 679888138240,\nindexSizes: {\nid: 9989586944,\nNUMBER_INDX: 3707850752,\nREQUEST_START_INDX: 8452722688,\nmethodName_1: 2067476480\n},\navgObjSize: 44599,\nns: ‘logs_store.GCA_REQUEST_LOG’,\nnindexes: 4,\nscaleFactor: 1\n}", "username": "mesaflave" } ]
Unclean space in mongodb
2023-11-05T21:24:15.557Z
Unclean space in mongodb
143
null
[ "replication", "compass" ]
[ { "code": "", "text": "Hi Team,We have setup a replicaset mongodb in linux. 3 node(1primary and 2 secondary).\nI am able to connect to primary via compass but after stepdown of primary, 2node has been new primary. But unable to connect via compass. From system i have network access to all 3 nodes.\nI am stuck in this. Please advise me.Thanks,\nKiran", "username": "Kiran_Joshy" }, { "code": "", "text": "How is your connection string looking?", "username": "Viswa_Rudraraju" }, { "code": "", "text": "mongodb://username:password@ip:27017,ip2:27017,ip3:27017/?authSource=admin&replicaSet=r0", "username": "Kiran_Joshy" } ]
Unable to connect new primary after stepdown of old primary in ReplicaSET
2023-11-02T06:13:45.402Z
Unable to connect new primary after stepdown of old primary in ReplicaSET
175
null
[ "aggregation", "indexes", "performance" ]
[ { "code": " 'namespace': 'dap-ods.bookings',\n 'optimizedPipeline': True,\n 'parsedQuery': {'$and': [{'contacts.value': {'$eq': '11133 3336-8878'}},\n {'segments.departureAirportCode': {'$eq': 'SEA'}},\n {'segments.departureDateTimeStnLocal': {'$lte': datetime.datetime(2023, 10, 14, 0, 0)}},\n {'segments.departureDateTimeStnLocal': {'$gte': datetime.datetime(2023, 10, 13, 0, 0)}}]},\n 'planCacheKey': '6820AF83',\n 'plannerVersion': 1,\n 'queryHash': 'D0CBB6D1',\n 'rejectedPlans': [{'inputStage': {'filter': {'$and': [{'contacts.value': {'$eq': '11133 3336-8878'}},\n {'segments.departureAirportCode': {'$eq': 'SEA'}},\n {'segments.departureDateTimeStnLocal': {'$gte': datetime.datetime(2023, 10, 13, 0, 0)}}]},\n 'inputStage': {'direction': 'forward',\n 'indexBounds': {'segments.departureDateTimeStnLocal': ['[new '\n 'Date(-9223372036854775808), '\n 'new '\n 'Date(1697241600000)]']},\n 'indexName': 'segments.departureDateTimeStnLocal_1',\n 'indexVersion': 2,\n 'isMultiKey': True,\n 'isPartial': False,\n 'isSparse': False,\n 'isUnique': False,\n 'keyPattern': {'segments.departureDateTimeStnLocal': 1},\n 'multiKeyPaths': {'segments.departureDateTimeStnLocal': ['segments']},\n 'stage': 'IXSCAN'},\n 'stage': 'FETCH'},\n 'stage': 'PROJECTION_SIMPLE',\n 'transformBy': {'_id': True,\n 'businessKey': True,\n 'passengersInfo': True}},\n {'inputStage': {'filter': {'$and': [{'contacts.value': {'$eq': '1111333336 8878'}},\n {'segments.departureAirportCode': {'$eq': 'SEA'}},\n {'segments.departureDateTimeStnLocal': {'$lte': datetime.datetime(2023, 10, 14, 0, 0)}}]},\n 'inputStage': {'direction': 'forward',\n 'indexBounds': {'segments.departureDateTimeStnLocal': ['[new '\n 'Date(1697155200000), '\n 'new '\n 'Date(9223372036854775807)]']},\n 'indexName': 'segments.departureDateTimeStnLocal_1',\n 'indexVersion': 2,\n 'isMultiKey': True,\n 'isPartial': False,\n 'isSparse': False,\n 'isUnique': False,\n 'keyPattern': {'segments.departureDateTimeStnLocal': 1},\n 'multiKeyPaths': {'segments.departureDateTimeStnLocal': ['segments']},\n 'stage': 'IXSCAN'},\n 'stage': 'FETCH'},\n 'stage': 'PROJECTION_SIMPLE',\n 'transformBy': {'_id': True,\n 'businessKey': True,\n 'passengersInfo': True}},\n {'inputStage': {'filter': {'$and': [{'contacts.value': {'$eq': '11133 3336-8878'}},\n {'segments.departureAirportCode': {'$eq': 'SEA'}},\n {'segments.departureDateTimeStnLocal': {'$gte': datetime.datetime(2023, 10, 13, 0, 0)}}]},\n 'inputStage': {'direction': 'forward',\n 'indexBounds': {'segments.departureAirportCode': ['[MinKey, '\n 'MaxKey]'],\n 'segments.departureDateTimeStnLocal': ['[new '\n 'Date(-9223372036854775808), '\n 'new '\n 'Date(1697241600000)]']},\n 'indexName': 'seg_departureDate_depAirport_1',\n 'indexVersion': 2,\n 'isMultiKey': True,\n 'isPartial': False,\n 'isSparse': False,\n 'isUnique': False,\n 'keyPattern': {'segments.departureAirportCode': 1,\n 'segments.departureDateTimeStnLocal': 1},\n 'multiKeyPaths': {'segments.departureAirportCode': ['segments'],\n 'segments.departureDateTimeStnLocal': ['segments']},\n 'stage': 'IXSCAN'},\n 'stage': 'FETCH'},\n 'stage': 'PROJECTION_SIMPLE',\n 'transformBy': {'_id': True,\n 'businessKey': True,\n 'passengersInfo': True}},\n {'inputStage': {'filter': {'$and': [{'contacts.value': {'$eq': '11133 3336-8878'}},\n {'segments.departureAirportCode': {'$eq': 'SEA'}},\n {'segments.departureDateTimeStnLocal': {'$lte': datetime.datetime(2023, 10, 14, 0, 0)}}]},\n 'inputStage': {'direction': 'forward',\n 'indexBounds': {'segments.departureAirportCode': ['[MinKey, '\n 'MaxKey]'],\n 'segments.departureDateTimeStnLocal': ['[new '\n 'Date(1697155200000), '\n 'new '\n 'Date(9223372036854775807)]']},\n 'indexName': 'seg_departureDate_depAirport_1',\n 'indexVersion': 2,\n 'isMultiKey': True,\n 'isPartial': False,\n 'isSparse': False,\n 'isUnique': False,\n 'keyPattern': {'segments.departureAirportCode': 1,\n 'segments.departureDateTimeStnLocal': 1},\n 'multiKeyPaths': {'segments.departureAirportCode': ['segments'],\n 'segments.departureDateTimeStnLocal': ['segments']},\n 'stage': 'IXSCAN'},\n 'stage': 'FETCH'},\n 'stage': 'PROJECTION_SIMPLE',\n 'transformBy': {'_id': True,\n 'businessKey': True,\n 'passengersInfo': True}}],\n 'winningPlan': {'inputStage': {'filter': {'$and': [{'segments.departureAirportCode': {'$eq': 'SEA'}},\n {'segments.departureDateTimeStnLocal': {'$lte': datetime.datetime(2023, 10, 14, 0, 0)}},\n {'segments.departureDateTimeStnLocal': {'$gte': datetime.datetime(2023, 10, 13, 0, 0)}}]},\n 'inputStage': {'direction': 'forward',\n 'indexBounds': {'contacts.value': ['[\"11133 3336-8878\", \"11133 3336-8878\"]']},\n 'indexName': 'contacts_1',\n 'indexVersion': 2,\n 'isMultiKey': True,\n 'isPartial': False,\n 'isSparse': False,\n 'isUnique': False,\n 'keyPattern': {'contacts.value': 1},\n 'multiKeyPaths': {'contacts.value': ['contacts']},\n 'stage': 'IXSCAN'},\n 'stage': 'FETCH'},\n 'stage': 'PROJECTION_SIMPLE',\n 'transformBy': {'_id': True,\n 'businessKey': True,\n 'passengersInfo': True}}},```\n \n\nHere is the original query we tried\n```agg_pipeline = [\n {'$match':{\"contacts.value\":\"11133 3336-8878\"}},\n {'$match':{\"segments.departureDateTimeStnLocal\":{'$gte':start_date, '$lte':end_date},'segments.departureAirportCode':'SEA'}},\n {'$project':{'businessKey':1, 'passengersInfo':1}}\n]\nexplain_output = db.command('aggregate', 'test_collection', pipeline=agg_pipeline, explain=True)```", "text": "We are trying to fetch data out of 100 million records. The filter attributes are in different arrays. We created separate indexes on both arrays. We used multiple stages to filter the records. But when wee checking the execution plan, both match stages are combined and the index is chosen. Its taking more than 20 seconds to get the final result set(only 23 records). Below the execution plan", "username": "Habeeb_Raja" }, { "code": "executionStats```agg_pipeline = [\n {'$match':{\"contacts.value\":\"11133 3336-8878\"}},\n {'$match':{\"segments.departureDateTimeStnLocal\":{'$gte':start_date, '$lte':end_date},'segments.departureAirportCode':'SEA'}},\n {'$project':{'businessKey':1, 'passengersInfo':1}}\n]\nagg_pipeline = [\n {'$match': {\n \"contacts.value\": \"11133 3336-8878\",\n \"segments.departureDateTimeStnLocal\": { '$gte': start_date, '$lte': end_date },\n \"segments.departureAirportCode\": \"SEA\"\n }},\n {'$project': {\n 'businessKey': 1,\n 'passengersInfo': 1\n }}\n]\n", "text": "Hello @Habeeb_Raja ,Firstly, I would recommend you to run your query with explain in executionStats mode (e.g. `db.collection.explain(‘executionStats’).aggregate(…)) and examine the output.As per my understanding, You can consider following changesCombine your two $match stages into one to make the query more efficient. This will reduce the number of documents that need to be processed and eliminate the need for combining separate $match conditions.If your result set is large but you only need a limited number of records, consider adding a $limit stage to limit the number of documents returned. This can improve performance and reduce network traffic.If you need to process a large number of documents, you might consider splitting your aggregation into smaller batches and using the $skip and $limit stages to process the data incrementally.Here’s the improved aggregation pipeline with the suggested changes:Remember to fine-tune your indexes and adapt the pipeline based on the specifics of your dataset and query patterns for the best performance.Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "\"executionStats\" : {\n \"executionSuccess\" : true,\n \"nReturned\" : 23,\n \"executionTimeMillis\" : 69019,\n \"totalKeysExamined\" : 6666,\n \"totalDocsExamined\" : 6666,\n \"executionStages\" : {\n \"stage\" : \"FETCH\",\n \"filter\" : {\n \"$and\" : [\n {\n \"segments.departureAirportCode\" : {\n \"$eq\" : \"SEA\"\n }\n },\n {\n \"segments.departureDateTimeStnLocal\" : {\n \"$lte\" : ISODate(\"2023-10-14T00:00:00Z\")\n }\n },\n {\n \"segments.departureDateTimeStnLocal\" : {\n \"$gte\" : ISODate(\"2023-10-13T00:00:00Z\")\n }\n }\n ]\n },\n \"nReturned\" : 23,\n \"executionTimeMillisEstimate\" : 32163,\n \"works\" : 6668,\n \"advanced\" : 23,\n \"needTime\" : 6643,\n \"needYield\" : 0,\n \"saveState\" : 4543,\n \"restoreState\" : 4543,\n \"isEOF\" : 1,\n \"docsExamined\" : 6666,\n \"alreadyHasObj\" : 0,\n \"inputStage\" : {\n \"stage\" : \"IXSCAN\",\n \"nReturned\" : 6666,\n \"executionTimeMillisEstimate\" : 38,\n \"works\" : 6667,\n \"advanced\" : 6666,\n \"needTime\" : 0,\n \"needYield\" : 0,\n \"saveState\" : 4543,\n \"restoreState\" : 4543,\n \"isEOF\" : 1,\n \"keyPattern\" : {\n \"contacts.value\" : 1\n },\n \"indexName\" : \"contacts_1\",\n \"isMultiKey\" : true,\n \"multiKeyPaths\" : {\n \"contacts.value\" : [\n \"contacts\"\n ]\n },\n \"isUnique\" : false,\n \"isSparse\" : false,\n \"isPartial\" : false,\n \"indexVersion\" : 2,\n \"direction\" : \"forward\",\n \"indexBounds\" : {\n \"contacts.value\" : [\n \"[\\\"11133 3336-8878\\\", \\\"11133 3336-8878\\\"]\"\n ]\n },\n \"keysExamined\" : 6666,\n \"seeks\" : 1,\n \"dupsTested\" : 6666,\n \"dupsDropped\" : 0\n }\n }\n}\n", "text": "@Tarun_Gaur Thanks for the update.We have tried the changes you suggested. Here is the outputFinal result will be 23 documents. But the Keys Examined is 6666. How to reduce that. Also we are not much clear about 3rd point “aggregation into smaller batches and using the $skip and $limit stages to process the data incrementally”. Can you share some reference to that", "username": "Habeeb_Raja" }, { "code": "db.collection.getIndexes()db.collection.stats()", "text": "we are not much clear about 3rd point “aggregation into smaller batches and using the $skip and $limit stages to process the data incrementally”. Can you share some reference to thatQueries might return many results. To make navigating results easier, one can use the aggregation pipeline stages to paginate the query results. As you mentioned that your results only includes 23 documents so this scenario does not require pagination. Kindly refer How to Paginate Query Results to learn more about this.We are trying to fetch data out of 100 million records.Final result will be 23 documents. But the Keys Examined is 6666. How to reduce that.When the collection gets much larger, with respect to the indexing, the query still should return relatively quick (with caveat of data size vs. hardware capabilities, of course). I think the indexes you have created might be optimal for your use-case.Can you share more details mentioned below for me to provide further optimization recommendations?Regards,\nTarun", "username": "Tarun_Gaur" } ]
Array Index issue - Not working
2023-10-24T13:37:39.457Z
Array Index issue - Not working
292
null
[]
[ { "code": "... SELinux is preventing /usr/bin/mongod from search access on the directory /var/lib/containers/storage/overlay-containers/fe6f1040c1373930efe68c777805ecd4c921631e3a87b8806af1fe0cdf266d8b/userdata/shm. For complete SELinux messages run: sealert -l 5263cddc-183a-472b-9098-bf4599c8453c ... ", "text": "Hi guys.A complete novice here. I’ve just installed vanilla-default MongoDB and right away SELinux shows denials:\n... SELinux is preventing /usr/bin/mongod from search access on the directory /var/lib/containers/storage/overlay-containers/fe6f1040c1373930efe68c777805ecd4c921631e3a87b8806af1fe0cdf266d8b/userdata/shm. For complete SELinux messages run: sealert -l 5263cddc-183a-472b-9098-bf4599c8453c ... \nWhat is Mongo’s business in checking such path (and there is more)? - I refuse to believe (yet) that my Mongo is trojaned.Many thanks, L.", "username": "Bez_Non" }, { "code": "", "text": "What did the complete messages say?", "username": "Jack_Woehr" }, { "code": "SELinux is preventing /usr/bin/mongod from search access on the directory /var/lib/containers/storage/overlay. For complete SELinux messages run: sealert -l 5263cddc-183a-472b-9098-bf4599c8453cSELinux is preventing /usr/bin/mongod from search access on the directory /proc/sys/fs/binfmt_misc. For complete SELinux messages run: sealert -l e2430433-e3b9-4bd9-9ac1-9616418c8612SELinux is preventing /usr/bin/mongod from search access on the directory /var/lib/nfs/rpc_pipefs. For complete SELinux messages run: sealert -l bc61357c-c100-4d44-a43a-5b90008b44b8", "text": "That is the pretty much the whole message. next would be whole sealert and left at the end would be a custom SE module to “fix” this. But just from looking at those syslog errors - what MongoDB want from:\n…\nSELinux is preventing /usr/bin/mongod from search access on the directory /var/lib/containers/storage/overlay. For complete SELinux messages run: sealert -l 5263cddc-183a-472b-9098-bf4599c8453c\nor…\nSELinux is preventing /usr/bin/mongod from search access on the directory /proc/sys/fs/binfmt_misc. For complete SELinux messages run: sealert -l e2430433-e3b9-4bd9-9ac1-9616418c8612\nanother one:\nSELinux is preventing /usr/bin/mongod from search access on the directory /var/lib/nfs/rpc_pipefs. For complete SELinux messages run: sealert -l bc61357c-c100-4d44-a43a-5b90008b44b8Seem these three paths MongoDB attempts to access repeatedly and then data inside /var/lib/containers/storage/overlayThis must be trivially easy to reproduce - I’m on Centos Stream with mongodb-org-server-6.0.11-1.el9.x86_64", "username": "Bez_Non" }, { "code": "", "text": "SELinux is preventing /usr/bin/mongod from search access on the directory /var/lib/containers/storage/overlay. For complete SELinux messages run: sealert -l 5263cddc-183a-472b-9098-bf4599c8453cWell, do you see “the complete SELinux message” by running the command it gives you?", "username": "Jack_Woehr" }, { "code": "", "text": "Like I said - it would be to show whole sealert - you need that?\nThere won’t much more apart some details - but in the essence, SELinux is already saying that MongoDB has no business looking there & that on it’s own is valid question/issue enough.It looks like, if not the culprit, that - ftdc - is a player here.", "username": "Bez_Non" }, { "code": "", "text": "Intersting. Maybe @Tarun_Gaur knows the answer?", "username": "Jack_Woehr" }, { "code": "", "text": "It looks like, if not the culprit, that - ftdc - is a player here.Likely this is the case FTDC iterates over mounts to report on disk usage.", "username": "chris" } ]
What's MongoDB business in "other" filesystem paths? SELinux
2023-11-03T10:38:24.571Z
What&rsquo;s MongoDB business in &ldquo;other&rdquo; filesystem paths? SELinux
197
null
[]
[ { "code": "", "text": "Good morning,\nI don’t understand why since yesterday my requests have been extremely long, more or less 2 to 3 minutes, because I’m with the free version? but I don’t think I saw that there was a request limit?\nIf anyone could tell me, that would be great.", "username": "jordan_canot" }, { "code": "", "text": "but I don’t think I saw that there was a request limit?But there are:Why would anyone pay if there was no limit on the free stuff?", "username": "steevej" }, { "code": "", "text": "Why would anyone pay if there was no limit on the free stuff?Yes, but I should have received an alert or something, sorry, I’m new to mongo, I’m developing a small local application, I don’t think I’ve exceeded the authorised limit, do you know where I can see that?image1664×682 34.7 KB", "username": "jordan_canot" }, { "code": "", "text": "It is free and it is also shared.So what ever others are doing with their free and shared clusters that are running on the same physical infrastructure influence that latency of your queries.The better gauge of the performance of your queries is their explain plan.As for alerts, I do not know but you can check the following:", "username": "steevej" }, { "code": "", "text": "It is free and it is also shared.So what ever others are doing with their free and shared clusters that are running on the same physical infrastructure influence that latency of your queries.The better gauge of the performance of your queries is their explain plan.As for alerts, I do not know but you can check the following:https://www.mongodb.com/docs/atlas/reference/alert-conditions/OK thanks, I’ve read it but I don’t think I’ve exceeded the limits, I have a database with two collections and 5 users in one collection and as far as the data is concerned I don’t know where I should look to see if I’ve spent their limits over a period of 7 days.", "username": "jordan_canot" }, { "code": "", "text": "I am not using the free tier for anything serious so I have never look at the performance metrics so I would not know how to help you further.", "username": "steevej" }, { "code": "M0M2M5", "text": "Hi Jordan,I don’t know where I should look to see if I’ve spent their limits over a period of 7 days.Contact the Atlas in-app chat support team to see if they can verify if you’ve exceeded the data transfer limits which are:Regards,\nJason", "username": "Jason_Tran" } ]
Very slow request since yesterday
2023-11-05T14:11:02.987Z
Very slow request since yesterday
140
null
[ "aggregation", "dot-net" ]
[ { "code": "internal async Task<IEnumerable<BsonDocument>> GetDocumentsAsync(string collectionName, int? pageIndex, int? pageSize) {\n var collection = _database.GetCollection<BsonDocument>(collectionName);\n\n var aggregationPipeline = new BsonDocument[]\n {\n new BsonDocument(\"$skip\", pageSize * (pageIndex ?? 0)),\n new BsonDocument(\"$limit\", pageSize ?? 10)\n };\n\n var result = await collection.Aggregate(aggregationPipeline).ToListAsync();\n\n return result; \n}\n", "text": "I’m trying to perform a simple aggregation pipeline for Mongo DB using the C# driver. Here is my code :But the aggregate function takes ~7s.I’ve tried the same code (with the same pipelines) but in node.js and it takes only 0.7s (10 times less).The total count of documents in the collection is 30K. Why does it take so long using the C# driver ? Is there any way to fix this performance issue ?Thank you", "username": "ChaReborn" }, { "code": "ToListAsyncBsonDocumentCollection.aggregateToCursorAsync()ToCursorAsync", "text": "Hi, @ChaReborn,Welcome to the MongoDB Community Forums. I understand that you are observing a 10x performance difference between the .NET/C# Driver and the Node.js Driver. Reviewing your C# code, I notice that you call ToListAsync which will not only execute the query, but also exhaustion of the cursor possibly requiring multiple roundtrips to the server to fetch all the 16MB batches. It will also deserialize the returned binary BSON returned on the wire into C# BsonDocument objects.Without seeing your Node.js code it is hard to say whether your Node.js code is doing the same amount of work. Typical Node.js usage would be to return the result of Collection.aggregate, which is a cursor and not the deserialized results. You can achieve a similar result by calling ToCursorAsync() in your C# code. This will execute the query and return a cursor to the results that can be lazily iterated.Please try ToCursorAsync in your C# code and see if you achieve similar performance results as your Node.js code.Sincerely,\nJames", "username": "James_Kovacs" } ]
MongoDb : Aggregate method taking too long
2023-11-03T08:57:09.843Z
MongoDb : Aggregate method taking too long
185
null
[ "python" ]
[ { "code": "from django.db import models\nfrom django.contrib.auth.models import User\nimport uuid\n\n\nclass Profile(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n unique_id = models.UUIDField(default=uuid.uuid4, primary_key=True)\n image = models.FileField(upload_to='profile_pic', null=True)\n last_text = models.CharField(max_length=100)\n online_status = models.BooleanField(default=False)\n last_seen = models.DateTimeField(auto_now=True)\n\n\nclass ChatRoom(models.Model):\n name = models.CharField(max_length=100, null=True)\n slug = models.SlugField(unique=True, null=True)\n room_id = models.CharField(max_length=64) \n", "text": "I am building class models using Django and mongodb is connected to my server successfully.When I create a new object in “ChatRoom” model, I can create and save new data and everything is snyc to my database.However, when I tried to create a object in “Profile” model, I got this error.No exception message suppliedThis is my code.models.pyHow can I fix this database error? please let me know if u need more information", "username": "Vaibhav_Paliwal" }, { "code": "", "text": "Hi Vaibhav! Were you able to fix the issue? I’m stuck with a similar error.", "username": "Anurag_Dubey" } ]
DatabaseError at /admin/core/profile/add/ No exception message supplied
2023-06-06T12:14:41.987Z
DatabaseError at /admin/core/profile/add/ No exception message supplied
1,154
null
[ "node-js", "graphql", "serverless" ]
[ { "code": "", "text": "I am considering MongoDB as a database for websites that require a mostly read-heavy workload.Which data options in MongoDB Atlas have the highest performance for reading data?\nData API, GraphQL, or Realm web SDK?I can’t use the node.js driver as I am planning to run the website in a serverless environment.", "username": "Arpan_Patel" }, { "code": "", "text": "Hi Arpan_Patel,The Data API and Realm Web SDK should have similar performance for read operations; the primary benefit of the Web SDK is automation and built-in authentication whereas the Data API provides a RESTful API interface, making it easy to interact with MongoDB through HTTP endpoints and a standardized API that follows REST principles.GraphQL would be the slowest because it has to generate schemas, so I would recommend one of the above offerings.", "username": "Kaylee_Won" }, { "code": "", "text": "Thanks for answering it.\nAlso is there a performance difference in using data API’s built-in post endpoint vs creating a custom GET endpoint to read the data from the same collection?", "username": "Arpan_Patel" } ]
Performance question - Atlas data api vs graphql api vs realm-web sdk
2023-10-11T21:31:46.373Z
Performance question - Atlas data api vs graphql api vs realm-web sdk
294
null
[ "node-js" ]
[ { "code": "net.Socket", "text": "Today, Cloudflare announced their support for TCP sockets in Workers, along with day-1 support for Postgres.I would love to see some form of first class support, either in the main package or through another package, but I can’t think of a good way to do this (aside from forking the Node driver). I took a look at Postgres’ solution here; they check if net.Socket exists, otherwise the Cloudflare implementation is used instead. Seems straightforward, there might be a more elegant way to make that check.What are other peoples’ thoughts on this? Would something like this be worth a pull request?", "username": "Cole_Crouter" }, { "code": "", "text": "Hey @Cole_Crouter, I’m the Product Manager for the Node.js Driver so I’d be happy to chime in here.Our engineers provided early feedback on the Socket API design, however we have not yet completed a review of the final product Cloudflare released this week.This is something we are reviewing in detail and plan to share more information on as we progress. We always welcome feedback and participation from the developer community, so if you have ideas either open a ticket in our Jira under the NODE project or continue to ask questions here on our forums.", "username": "alexbevi" }, { "code": "", "text": "Hi @alexbevi - that sounds very interresting Any updates on this project? Would be amazing to be able to connect to MongoDB from a Cloudflare worker (without having to use a slow HTTP API or data proxy).", "username": "Alex_Bjorlig" }, { "code": "nodejs_compatnodejs_compat", "text": "Hi @Alex_Bjorlig, and apologies for the delay in getting back to you.Cloudflare Workers aren’t 100% API compatible with Node.js, though they do offer a nodejs_compat compatibility flag to improve this developer experience.The MongoDB Node.js Driver leverages a number of Node.js APIs internally, which our team found during preliminary testing weren’t compatible with Cloudflare Workers (even using nodejs_compat).We are working on improving this as it currently prevents users from using the MongoDB Node.js Driver in this environment at all. Once we have this functional and understand the implications for our developers we’ll have more information to share.", "username": "alexbevi" }, { "code": "", "text": "No worries We are deployed with Vercel, and are slowly and steadily preparing our application for the edge. Because we use MongoDB, this is a critical step of the journey.We expect the edge to make the app faster, as we won’t have cold starts (and getting closer to the end-user for non-data interactions)You can always reach out if you need someone to test initial iterations or similar. (And if we can help in other ways, please let us know )", "username": "Alex_Bjorlig" }, { "code": "", "text": "Também estamos esperando isso para trazer todos nossos bancos de dados para a MongoDB.", "username": "Alison_Zigulich" }, { "code": "", "text": "Hi Alex,Wanted to check in to see if there was any progress of using the officials mongo driver now on cw workers? Is this something that the team is working towards (to have more support for node packages) or will workers always have this limitation…We’re using the dataAPI which is slow, but works, moving forward would like a more permanent scalable solution.", "username": "Rishi_uttam" }, { "code": "", "text": "Disclaimer; I’m not affiliated with MongoDB - just a “normal” customer.I hope/think MongoDB is really busy at the moment, making the official MongoDB driver compatible with the worker’s environment. Cloudflare recently shared this post, and the feature set is impressive. Combined with Cloudflare’s work on Hyperdrive - it seems like a lot of effort is being put into how to make initial connections fast. From the article:…it maintains a set of regional database connection pools across Cloudflare’s network, so a Cloudflare Worker avoids making a fresh connection to a database on every request. Instead, the Worker can establish a connection to Hyperdrive (fast!), with Hyperdrive maintaining a pool of ready-to-go connections back to the database. Since a database can be anywhere from 30ms to (often) 300ms away over a single round-trip (let alone the seven or more you need for a new connection), having a pool of available connections dramatically reduces the latency issue that short-lived connections would otherwise suffer.I hope MongoDB is actively working on:Hopefully, we soon get more information - so MongoDB can be an attractive alternative to Hyperdrive/D1. Would be such a shame if MongoDB misses the open window they have right now to be an early adopter of the edge.", "username": "Alex_Bjorlig" }, { "code": "", "text": "Excellent post Alex. I do hope the Atlas team takes heed of this advice, many startups are looking towards serverless data base at the edge with a quick response time, and if Mongo fails to adapt with the tools that are already out there then consumers may depart to greener pastures.", "username": "Rishi_uttam" }, { "code": "", "text": "We are working on improving this as it currently prevents users from using the MongoDB Node.js Driver in this environment at all. Once we have this functional and understand the implications for our developers we’ll have more information to share.Any updates here? Would be incredible to see MongoDB moving fast in this space ", "username": "Alex_Bjorlig" }, { "code": "", "text": "Hi @Alex_Bjorlig,We are working with Cloudflare on this still, however at the moment we are blocked as Atlas requires TLS for all connections, however Cloudflare Workers only supports mTLS at the moment (and TLS cannot be disabled in Altas).", "username": "alexbevi" } ]
Cloudflare Workers integration is now possible
2023-05-16T21:21:25.692Z
Cloudflare Workers integration is now possible
1,598
null
[ "atlas-cluster" ]
[ { "code": "", "text": "We are running into some issues with Atlas DB / Federated databases while using Tableau –We have tried working with the sample data from the AWS instance and that is working with Tableau, but now we are trying to connect with our database instance.Any suggestions? We are completely stuck…Not much out there to help with this issue.from Mongodb log files\n{“level”:“info”,“timestamp”:“2023-03-10T21:28:54.524Z”,“logger”:“command”,“msg”:“failed to parse command”,“hostname”:“federateddatabaseinstance0-qrodv.a.query.mongodb.net”,“correlationID”:“174b2c10459967288af2d484”,“commandName”:“unknown”,“error”:“command endSessions is unsupported”,“ts”:1678483734524462721}{“level”:“debug”,“timestamp”:“2023-03-10T21:28:54.565Z”,“msg”:“dropping connection with read error”,“hostname”:“federateddatabaseinstance0-qrodv.a.query.mongodb.net”,“bytesRead”:0,“error”:“an internal error occurred”,“ts”:1678483734565696550}{“level”:“warn”,“timestamp”:“2023-03-10T21:28:54.565Z”,“msg”:“failed reading message from client”,“hostname”:“federateddatabaseinstance0-qrodv.a.query.mongodb.net”,“error”:“an internal error occurred”,“ts”:1678483734565742351}", "username": "Sam_Marrazzo" }, { "code": "", "text": "Hi @Sam_Marrazzo, My name is Alexi Antonino and I am the product manager for Atlas SQL, Tableau Connector. I can help you if you’d like. Can I have a bit more information? Did you follow these steps to connect from Tableau? https://www.mongodb.com/docs/atlas/data-federation/query/sql/tableau/connect/ And are you getting an error from within Tableau? Are you connecting with Tableau Desktop. Server, or Prep? Our connector doesn’t support Tableau Online just yet.\nIf it is easier, we can communicate via email and set up a call so that I can view first hand where you are getting blocked.Best,Alexi.antonino@mongodb.com", "username": "Alexi_Antonino" }, { "code": "", "text": "Yes we did follow the directions you posted and we tried to and it worked connecting to the AWS sample data.\nWe are getting an error within Tableau our team will send you the exact error to your email we receive from Tableau\nWe are using Tableau Desktop -\n\nScreenshot 2023-03-12 at 2.57.53 PM1032×974 65.1 KB\n", "username": "Sam_Marrazzo" }, { "code": "", "text": "Thanks @Sam_Marrazzo - I will be on the look out for that specific email with the error!", "username": "Alexi_Antonino" }, { "code": "", "text": "Tableau can connect to Atlas DB / Federated databases as long as the database supports one of Tableau’s native connectors. These connectors allow Tableau to access the database and query the data. Depending on the connector, Tableau may also be able to visualize the data directly without requiring any additional programming. For example,what-is-chatgpt understand through Tableau supports native connectors for PostgreSQL, and Amazon Athena, both of which are compatible with Atlas DB / Federated databases.", "username": "shagufta_shamid" }, { "code": "", "text": "Atlas DB is a type of federated database that allows users to access data stored in multiple databases as if it were a single database. This type of database system can be particularly useful for organizations that have data spread across multiple databases or even across different cloud providers. When using Tableau with Atlas DB, users can connect to multiple databases at once and easily access the data they need for analysis or reporting. By utilizing Tableau’s drag-and-drop interface and powerful visualizations to Pinterest video users can quickly gain insights into their data and make informed business decisions. With Atlas DB’s federated approach to data storage and Tableau’s intuitive interface, organizations can streamline their data analytics processes and gain a competitive edge in their respective industries to car", "username": "paint_beast" }, { "code": "", "text": "Alexi.antonino@mongodb.comAlexi we finally upgraded after lots of testing to MongoDB 5.0I received this error now… From Tableau IDE.An error occurred while communicating with the MongoDB Atlas by MongoDB data source ‘Untitled Data Source’\nBad Connection: Tableau could not connect to the data source.\nError Code: FAB9A2C5\nConnection failed.\nUnable to connect to the MongoDB Atlas by MongoDB server “”. Check that the server is running and that you have access privileges to the requested database.\nConnector Class: mongodb_jdbc, Version: 1.1.0I have privileges and the server is running.String = mongodb://appetitreporting-qrodv.a.query.mongodb.net/VirtualDatabase0?ssl=true&authSource=adminI sent you an email as well", "username": "Sam_Marrazzo" }, { "code": "", "text": "Hi Sam - I am not seeing the email, but I will keep searching for it (it isn’t in my spam either). One thing I will ask, in the connection dialog within Tableau, for Database, are you entering in “VirtualDatabase0”? or another thing to try, are you able to connect to this federated database via Compass? That will help verify connection to the Federated DB.Here is my calendly link Calendly - Alexi Antonino, feel free to put a meeting on my calendar where we can share screens and get past this error.Best,\nAlexi", "username": "Alexi_Antonino" }, { "code": "", "text": "Hi Alexi\nWe would really appreciate it and thank you if we also could get some help connecting the tableau desktop to atlas sql .\nwe are running also in an issue with the same error code.\nwe did follow up this link to connect:but still when we try to connect from the tableau desktop we get the same error :An error occurred while communicating with the MongoDB Atlas by MongoDB data source ‘Untitled Data Source’\nBad Connection: Tableau could not connect to the data source.\nError Code: FAB9A2C5\nCouldn’t connect due to a timeout. Please check your hostname and port. If necessary, set a longer connection timeout in the MongoDB URI.\nUnable to connect to the MongoDB Atlas by MongoDB server “”. Check that the server is running and that you have access privileges to the requested database.\nConnector Class: mongodb_jdbc, Version: 1.2.0thank you", "username": "Netanel_Piattelli" }, { "code": "", "text": "Hello @Netanel_Piattelli - sorry for the delay, I am just now seeing this. I am guessing this isn’t a timeout issue (yes it’s timing out, but this is just the result) and more of a connection problem. A few things would help, and you may wish to send me this via email for privacy alexi.antonino@mongodb.com:Here is my calendly link Calendly - Alexi Antonino If you’d like to schedule a quick meeting.Best,\nAlexi", "username": "Alexi_Antonino" } ]
Atlas DB / Federated databases while using Tableau –
2023-03-10T22:30:42.539Z
Atlas DB / Federated databases while using Tableau –
1,510
null
[ "java", "connecting", "serverless" ]
[ { "code": "", "text": "I have searched for the answer and I have not found a satisfied answer, is it true that for now the serverless instance with aws lambda function can only be through ip whitelist 0.0.0.0/0 (allow from anywhere), is there no safer way?\nI read the documentation that serverless instances do not support network peering, and private endpoints are still in the development stage and cannot be used yet.", "username": "Developer_JS_Backend" }, { "code": "", "text": "Hello Developer_JS_BackendThanks for the question. Serverless Instances do support private endpoints. See this link for more information.Best,\nAnurag", "username": "Anurag_Kadasne" } ]
How is the secure connection between serverless instance of mongodb atlas and aws lambda?
2023-11-06T14:00:47.598Z
How is the secure connection between serverless instance of mongodb atlas and aws lambda?
112
null
[ "aggregation" ]
[ { "code": "{\n _id: ObjectId('...'),\n name: \"CyM_PC\",\n description:\"CyM_PC (Outside, Inside)\",\n network_objects: [\n \"IN03PGSUR\",\n \"IN04PGSUR\",\n \"IN05PGSUR\"\n],\n group_objects: [\n \"Other_Users\"\n]\n}\n{\n _id: ObjectId('...'),\n name: \"IN03PGSUR\",\n description: \"Created during migration\"\n}\n{\n \"$lookup\": {\n \"from\": \"network_groups\", \n \"localField\": \"group_objects\", \n \"foreignField\": \"name\", \n \"pipeline\": [\n {\n \"$lookup\": {\n \"from\": \"network_groups\", \n \"localField\": \"group_objects\", \n \"foreignField\": \"name\", \n \"as\": \"networks_inside\"\n }\n }\n ], \n \"as\": \"networks\"\n }\n }\n", "text": "I have a collection named network_groups and another collection named network_objects.\nEach network group can contain multiple network_objects elements and group_objects( which are elements within the network_groups collection) as well.\nHere is an example of each:The thing is inside group_objects there could be other network_groups items with network_groups inside and this could go on for a few layers.\nI would like to extract all network_objects in the nested network_groups, so I have made a loopup query:But this just covers 2 layers of nesting. I would like to keep going as long as there is a network_groups item with group_objects inside.\nIs there a way to perform such an iterative query.Thanks", "username": "Pablo_Horstrand" }, { "code": "", "text": "Perhaps $graphLookup would serve your purpose since it does recursive lookup operations.", "username": "steevej" } ]
Nested lookup - how to flatten objects in an interative manner
2023-11-06T12:57:45.350Z
Nested lookup - how to flatten objects in an interative manner
95
null
[ "database-tools" ]
[ { "code": "", "text": "my csv import is working but only 548 rows are imported into atlas. I checked that my csv file is not corrupted. The csv contains 14k rows. Is there a way to increase the buffer size? thanks", "username": "Rick_Delpo" }, { "code": "", "text": "thanks anyway guys but I solved the problem", "username": "Rick_Delpo" }, { "code": "", "text": "What was your issue and solution for those that come across this post later?", "username": "chris" }, { "code": "", "text": "It was really kind of a stupid thing. My source folder had multiple versions of the csv file name so when i renamed the file to a truly unique name it imported all 14k records. My guess is that the mongoimport timed out when i confueed it.", "username": "Rick_Delpo" } ]
Mongoimport not importing entire csv
2023-11-06T15:47:08.988Z
Mongoimport not importing entire csv
90
null
[ "atlas-functions", "flutter" ]
[ { "code": "", "text": "I have a function which iterates over records, putting them into a hashmap to return to my flutter app. For the app I also add a numeric index value (which is needed later) by the app. The function returns two different types for this numeric value (it’s a position index used later) $numberLong for 0, $numberInt for 1. And this of course is a problem for the JSON parser package I’m using.I’m curious as to the reason 0 is represented in EJSON one way for 0, while non-zero is represented another?Is there a way in the function to declare this so zero and non-zero are returned with the same EJSON type?My other choice is to use pass a ones-based index down then decrement when using, which i’d rather not do if there’s a better alternative.", "username": "Josh_Whitehouse" }, { "code": "exports = async function(arg){\n // This default function will get a value and find a document in MongoDB\n // To see plenty more examples of what you can do with functions see: \n // https://www.mongodb.com/docs/atlas/app-services/functions/\n var findResult = [];\n try {\n // Get a value from the context (see \"Values\" tab)\n // Update this to reflect your value's name.\n // var valueName = \"value_name\";\n // var value = context.values.get(valueName);\n for(var i = 0; i < 6; i++) {\n findResult.push(i);\n }\n\n } catch(err) {\n console.log(\"Error occurred while executing findOne:\", err.message);\n\n return { error: err.message };\n }\n\n // To call other named functions:\n // var result = context.functions.execute(\"function_name\", arg1, arg2);\n\n return { result: findResult };\n};\nEJSON.parse('{\"result\":[{\"$numberLong\":\"0\"},{\"$numberInt\":\"1\"},{\"$numberInt\":\"2\"},{\"$numberInt\":\"3\"},{\"$numberInt\":\"4\"},{\"$numberInt\":\"5\"}]}')\n", "text": "Here is a test example function I wrote to show the output…And the results…I tried using 1 as the first number to start the counter, thinking 0 might be problem, but the first value returned is always $numberLong, the subsequent are always $numberIntBlockquote", "username": "Josh_Whitehouse" }, { "code": "NumberJSON.stringify()", "text": "Hi @Josh_Whitehouse,Unfortunately, Javascript only has ONE type, that is Number, so there’ll always be a bit of discrepancy when trying to set a representation: data that’s not coming from a collection (where a schema can be set to force a specific type) isn’t consistently represented by EJSON, can I ask what’s the actual use case?What the function is returning is an object, so the fields are in fact just numbers: if you’re transmitting that result outside (say, in an HTTP Endpoint), you can also use JSON.stringify() to set the response body, so any JSON-compliant parser would work, EJSON is a possibility, but not the only one.", "username": "Paolo_Manna" }, { "code": "", "text": "the function is called from a flutter app - which uses a JSON parsing package, which makes scanning the JSON object returned for both $numberLong and $numberInt very tricky (the parser is constructs the flutter object from JSON in a strict fashion.For now, I am going to workaround this by passing the value as a string, then converting it to a number in the flutter code once it’s been parsed.", "username": "Josh_Whitehouse" }, { "code": "JSON.stringify(result)jsonDecode", "text": "For now, I am going to workaround this by passing the value as a string, then converting it to a number in the flutter code once it’s been parsed.Yes, that’s correct: the suggested way is to use JSON.stringify(result) as a returned value, to ensure everything is JSON-compatible, then use jsonDecode on that string on the client side", "username": "Paolo_Manna" } ]
0 is returned as { "$numberLong": "0" } 1 is returned as { "$numberInt": "1" }
2023-11-05T16:49:14.147Z
0 is returned as { &ldquo;$numberLong&rdquo;: &ldquo;0&rdquo; } 1 is returned as { &ldquo;$numberInt&rdquo;: &ldquo;1&rdquo; }
125
null
[ "aggregation", "queries", "node-js", "crud" ]
[ { "code": "", "text": "Hi Everyone,\nI have a aggregation pipeline where the match criteria is generated dynamically by end user and the updates as well. Right now I convert the user input to a aggregation pipeline and I have some scenarios where I am using $lookup for matching parents and child. Now the thing is that collection on which I am running the aggregation is having 24m records and the aggregation is timeout due to which. And there is no scope of optimization for the pipeline because I am asked not the do too many custom changes for user input. And keep it simple.\nNow my question is. Is it a best practice to increase the aggregate pipeline timeout and batch size so that It wouldn’t timeout.\nOr should I change all of my approach to use multiple fetch and update calls.\nAnd right now I am not using mongo atlas.", "username": "yashdeep_hinge" }, { "code": "", "text": "Hey @yashdeep_hinge,Welcome to the MongoDB Community forums!I convert the user input to a aggregation pipeline and I have some scenarios where I am using $lookup for matching parents and childCould you share the aggregation pipeline you are executing dynamically and the sample dataset after omitting the sensitive information?I am running the aggregation is having 24m records and the aggregation is timeout due to which.Could you share the error log you’ve encountered, and the frequency of timeouts, and explain how you arrived at the conclusion that 24 million documents are responsible for timeouts?Is it a best practice to increase the aggregate pipeline timeout and batch size so that It wouldn’t timeout.\nOr should I change all of my approaches to use multiple fetch and update calls?In my opinion, there could be a couple of reasons, including slow hardware or poor query performance. However, to gain more understanding, please share the following additional information:Look forward to hearing from you.Regards,\nKushagra", "username": "Kushagra_Kesav" } ]
For a 24m records collection is it correct to update using aggregation with custom timeout settings or do multiple batch updates using updateMany and other methods?
2023-11-02T09:36:30.080Z
For a 24m records collection is it correct to update using aggregation with custom timeout settings or do multiple batch updates using updateMany and other methods?
156
null
[ "dot-net", "connecting", "atlas-cluster", "android", "kotlin" ]
[ { "code": "implementation 'org.mongodb:mongodb-driver-sync:4.7.2'+srvIllegalArgumentException+srv+srvclass Database(client: MongoClient){\n\n companion object {\n\n fun connect(username : String?, password: String?) : Database?\n {\n val databaseObject: Database\n lateinit var mongoClient: MongoClient\n\n if (username == null || password == null)\n return null;\n\n try {\n val connString = ConnectionString(\"mongodb://$username:$password@gmmdb.gioj2hh.mongodb.net/?retryWrites=true&w=majority\")\n val settings = MongoClientSettings.builder()\n .applyConnectionString(connString)\n .serverApi(\n ServerApi.builder().version(ServerApiVersion.V1).build()\n )\n .build()\n\n mongoClient = MongoClients.create(settings)\n\n val database = mongoClient.getDatabase(\"GMMDB\")\n val collection = database.getCollection(\"gmm_estoque\")\n \n // after around 30 seconds this function is called it throws me a TimeoutException\n // probably because I'm not really connected to the cluster app\n collection.insertOne(Document()\n .append(\"_id\", ObjectId())\n .append(\"Date\", Date.from(Instant.now()))\n .append(\"Code\", 192)\n .append(\"Name\", \"Asa de galinha\")\n .append(\"Marca\", \"Sadia\")\n .append(\"Disponibilidade\", 1)\n )\n }\n catch (e: Exception) {\n Log.d(\"AppDebug\", e.toString() + \" - \\n\\n Message: \" + e.message + \" -\\n\\n StackTrace: \" + e.stackTrace)\n }\n\n return Database(mongoClient)\n }\n }\n}\nplugins {\n id 'com.android.application'\n id 'org.jetbrains.kotlin.android'\n}\n\nandroid {\n namespace 'com.freto.barcodereader'\n compileSdk 33\n\n defaultConfig {\n applicationId \"com.freto.barcodereader\"\n minSdk 21\n targetSdk 33\n versionCode 1\n versionName \"1.0\"\n\n testInstrumentationRunner \"androidx.test.runner.AndroidJUnitRunner\"\n }\n\n viewBinding {\n enabled = true\n }\n\n buildTypes {\n release {\n minifyEnabled false\n proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'\n }\n }\n compileOptions {\n sourceCompatibility JavaVersion.VERSION_1_8\n targetCompatibility JavaVersion.VERSION_1_8\n }\n kotlinOptions {\n jvmTarget = '1.8'\n }\n}\n\ndependencies {\n implementation 'org.mongodb:mongodb-driver-sync:4.5.1'\n implementation 'androidx.core:core-ktx:1.9.0'\n implementation 'androidx.appcompat:appcompat:1.5.1'\n implementation 'com.google.android.material:material:1.7.0'\n implementation 'androidx.constraintlayout:constraintlayout:2.1.4'\n testImplementation 'junit:junit:4.13.2'\n androidTestImplementation 'androidx.test.ext:junit:1.1.3'\n androidTestImplementation 'androidx.test.espresso:espresso-core:3.4.0'\n}\n", "text": "Specs:\nAndroid Studio (Native)\nKotlin\nJava Driver 4.5.1I tried to connect to my MongoDB Atlas Cluster from a native Kotlin Android App and none of the ways I tried worked. It works just fine in my desktop app in C# 6.0. Here is what I did:1- I’m using java driver 4.5.1, because I cant build the app with mongodb driver versions higher than 4.5.1, it doesn’t work for me. If I try to implement it in build.gradle, for example: implementation 'org.mongodb:mongodb-driver-sync:4.7.2' when I try to build the app I receive this:Invalid build configuration. Attempt to create a global synthetic for ‘Record desugaring’ without a global-synthetics consumer.Stacktrace:\nhttps://pastebin.com/Wnszgb3Gbut versions less or equal to 4.5.1 builds fine.2- After rolling back to version 4.5.1 a new problem occurs, if my connection string contains +srv in it, it throws me an IllegalArgumentException when trying to connect from the connection string, so I have to remove the +srv to “connect” to my Atlas Cluster.3- If I remove the +srv it goes fine through all connection functions, but it doesn’t connect to anything. If I try to run any database CRUD operation, I’ll receive a TimeoutException after around 30 seconds because I’m actually not logged in to the MongoDB Atlas Cluster.Here is my connection code:gradle:I’ve tried many things, but nothing seems to work, so I need help getting it to work, thank you very much, I enjoy MongoDB very much!", "username": "Raique_Familia" }, { "code": "", "text": "Hello @Raique_Familia ,Welcome to MongoDB Community.Thank you for your question. Could you give a brief on your use case on what are you trying to build? Is there a reason you are using Java Driver instead of Realm Database?You can connect Android App to Cluster via Realm App Id. Please refer to Quick Start section of the documentation.Another way is to connect the app remotely to Atlas Cluster, as explained in Realm Bytes, but it comes with limitations.I hope the provided information is helpful. Please dont hesitate to ask if you need more assistance.Cheers, \nHenna", "username": "henna.s" }, { "code": "", "text": "No, Henna it’s not been helpful. Why would you try to introduce Realm instead of telling us why it gives us errors like that? and no Henna there’s absolutely no reason to try to use Drivers cause we’re totally out of our minds and definitely needed somebody to question that part of our decision making process.\nUsing Kotlin Driver above 4.5.1 throws desugaring exception and even using 4.5.1 in context of the android project it throws no such method exception. When you try to debug the app you can clearly see that ConnectionString()'s giving us that kind of trouble and i don’t know why. But if you’re doing it somewhere like in main() function of kotlin language it works like a charm.\nso what is it? is it some configuration that’s to be done in android side or is it something that Kotlin Driver needs to go farther to be able to support? cause both errors are the same on KMongo, too.", "username": "MyName_MyLastname" }, { "code": "", "text": "Hi @henna.s, thank you for your answer. I am having the same problem as described by the OP.My usecase is that I want to push data from my Android app to a MongoDB instance (not Atlas). In the end it is some kind of logging. Realm seams not fitting since it seems targeted towards Atlas and also I do not need local storage, but just wan to create new records in my database. How can I solve the problem?mongodb - Kotlin error: \"Invalid build configuration. Attempt to create a global synthetic for 'Record desugaring' without a global-synthetics consumer.\" - Stack Overflow suggests that the SDK is outdated. Do we need to wait for a new release of the SDK?", "username": "Johann_Hemmann" } ]
How to properly connect my Kotlin android App with Mongodb Atlas Cluster?
2022-10-28T02:49:31.245Z
How to properly connect my Kotlin android App with Mongodb Atlas Cluster?
4,437
null
[ "crud", "mongodb-shell" ]
[ { "code": "db.types.insertOne(\n {\n \"_id\": 1,\n \"value\": Int32(\"1\"),\n \"expectedType\": \"Int32\"\n }\n)\nexpectedTypeinsertOne(...)expectedTypeupdateOne(...)", "text": "I’ve seen in the mongosh documentation that you can provide a type hint when inserting data into a collection:Unfortunately, that expectedType parameter doesn’t seem to be documented anywhere, in particular, it’s not on the reference manual page for insertOne(...).Is the expectedType parameter still supported? And if so, can it also be used for updateOne(...)? (How?)Last but not least, can someone please let me know how to find documentation on it? ", "username": "_Christian" }, { "code": "expectedType", "text": "Unfortunately, that expectedType parameter doesn’t seem to be documented anywhereIt is not documented because it is not a parameter. It is there as an example so that you can verify in Compass or mongosh (with $type) that the store value is really the expectedType. The hint mentioned in your first link is the constructor Int32() as indicated by the sentence:The Int32() constructor can be used to explicitly specify 32-bit integers.", "username": "steevej" }, { "code": "", "text": "Ouch. Thanks. Somehow, I read that as being a param, when in fact, it was part of the first param’s JSON block… ", "username": "_Christian" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Is the expectedType parameter available on updateOne(...)?
2023-11-06T11:10:23.548Z
Is the expectedType parameter available on updateOne(&hellip;)?
88
null
[ "python" ]
[ { "code": "", "text": "Created a cluster with 3 nodes (A - Primary, B, C). Everything worked fine. Now,How can we recover the cluster and make node C functional?", "username": "Ronak_Shah" }, { "code": "import random\nfrom pymongo import MongoClient\nreplica_set_name = \"rs02\"\nhost1 = 'localhost:6000'\nhost2 = 'localhost:6001'\nhost3 = 'localhost:6002'\nconnection_string = f\"mongodb://{host1},{host2},{host3}/?replicaSet={replica_set_name}\"\n\ntry:\n client = MongoClient(connection_string)\n db = client['test']\n collection = db['sample']\n\n while True:\n random_number = random.randint(1, 100)\n collection.insert_one({'random_number': random_number})\n print(f\"Inserted: {random_number}\")\n\nexcept Exception as e:\n print(f\"An error occurred: {e}\")\n", "text": "Hi @Ronak_Shah and welcome to MongoDB community forums!!Based on the above deployment configuration, I tried to insert sample data infinitely using the code below while I tried to perform the shutdown from the secondary and then primary as mentioned.The sample code:While the insertion was being performed, I tried the following steps:Please note that the test has been conducted on MongoDB version 6.0.5 and the shutdown performed have been graceful shutdowns.In order to triage your issue further could you help me with some information as:Regards\nAasawari", "username": "Aasawari" } ]
Mongo Cluster: Node unable to join the cluster after failure
2023-10-25T07:36:31.635Z
Mongo Cluster: Node unable to join the cluster after failure
208
null
[ "serverless" ]
[ { "code": "", "text": "Hi Folks,Just a quick word of warning… if you’ve got a Mongo DB serverless database running you better keep an eye on the bill. I’ve just been stung for $155 for 2 1/2 days. To say I’m shocked is an understatement .The pricing structure is really not user friendly and TBH I feel a bit scammed. Mongo Team - I feel like there should be some kind of warning during the setup about the possibility of this, or maybe force the user to set a price cap per month. Perhaps example pricing would also help…FYI - I only uploaded about 4.5 million docs (1.5 gb) and did some ‘manual testing’ which involved searching for records and displaying the data on a webpage. I also had a server add new docs every 15 minutes (about 20-100 docs)… not exactly enterprise scale stuff.Hopefully nobody else get caught out like me Take it easy,Nathan.", "username": "Nathan_Shields" }, { "code": "", "text": "Hi Nathan,\nI work in the serverless PM team and I am sorry your experience has been less than perfect. As serverless is one of the latest offering from MongDB, we are always looking to improve the product. I will be reaching out to you to discuss your issues in more details.Vishal", "username": "Vishal_Dhiman" }, { "code": "", "text": "Hello Folks,So I setup a serverless Mongo DB database to test a small project I’m working on… it’s ended up costing me $190 for 3 days. Luckily I spotted the bill - God only knows what it could have been by the end of the month!So what did I do wrong?\nSimple - I chose ‘serverless instance’ instead of a fixed monthly contract. The serverless sounds cheap - just pay for what you use… however the pricing structure isn’t very user friendly and you can be caught out BIG TIME! How big? Well my database of 4.5million simple documents, with me as the only user, cost me ~$190 for 3 days!Please don’t be like me.Mongo team, you really need to work on the docs for that product. Give some examples of how the pricing works in ‘real life scenarios’. Searching online shows I’m not the first to be hit by this… I hope I’m the last!On the plus side the M5 server I’ve now got is very good… and not any slower! It will also take at least 6 months to run up the same bill… not 3 days!!Take care folks,Nathan.", "username": "Nathan_Shields" }, { "code": "", "text": "so, did you fix this? i’m about to use this", "username": "AL_N_A" }, { "code": "", "text": "Hi friends!\nBig warning! Mongodb serverless is a real scam. I was charged $312 for a 7 days for nothing. More than $40 per day!! We just uploaded a database of small online community application. 1.5 gbyte. Less than 200 active users a day. Their pricing claims that cost of read is $0.1 per million reads. This means my 200 users made 400million reads a day!!\nI tried to reach them through a support chat but every response from them takes hours. I requested some details. NO ANSWER.\nBottom line I was a big fun of mongodb and deployed a dozen of projects. I recommend my customers mongodb whenever it may fit requirements. It is very convenient database especially for startups,but they caught us this time \nThey always were more expensive then others, but this time they bet my imagination. I’m really disappointed and angry. I hope my lesson will be helpful for others. So I’m still waiting for a details. Will share with you in my blog.\nCheers!", "username": "Pavel_Smirnov" }, { "code": "", "text": "mongodb and deployed a dozen of projects. I recommend my customers mongodb whenever it may fit requirements. It is very convenient database especially for startups,but they caught us this timeWe have a very simlar problem. For a day when nobody used our platform, mongo says they received 0.1M read, which is impossible. With a database of 95MB and 200 users using our platfor for 2 hours, they say we did more than 200M read. I think they are wrong computing the reads (or this is a scam)", "username": "Fabrizio_Ruggeri" }, { "code": "", "text": "Thank you all for reporting this. I was just about to subscribe to serverless and feared that I was unable to calculate the cost for our application. A friend of mine warned me about their pricing and surprises that may come up. You confirmed this. I’ll stay miles away from Serverless for the moment.", "username": "Louis" }, { "code": "", "text": "My bill went from $20/month to over $1000 . I looked at the billing usage and it started to spike on the 04/08/23 to 500 million RPU (weird its a round number). keep in mind there were no development changes and no change in traffic. Support said they will get back to me. Had to shut down the project in the mean time until this gets figured out. Hopefully support will get back to me soon.", "username": "Zeke_Schriefer" }, { "code": "", "text": "We are currently giving it a try and i think it really is VERY expensive, definitely discussing migrating back in the next weeks.", "username": "Milo_Tischler" }, { "code": "", "text": "Hi,I am from the Atlas Serverless team and am very sorry for the experience you’ve had. Please see the “Serverless Pricing” section of this post for more information on how the bill is calculated along with this article on helpful tips to optimize workloads on your Serverless instance.We apologize for the experience you have had. Please let us know if you have any additional questions by Direct Messaging me or by contacting support by clicking on the icon on the bottom right of this page.Regards,\nAnurag", "username": "Anurag_Kadasne" }, { "code": "", "text": "I am seeing the same issue here, it’s been running for 5 days now and already $20? How do I see the reads? I am sure I barely have any reads, I use the DB once a day, around 7k records.", "username": "Ed_Durguti" }, { "code": "", "text": "If you go to the “View Monitoring” tab, you should be able to see a chart for “Read/Write Units”. I would also recommend you take a look at the article and post linked in my previous post to get a better understanding of how pricing for serverless works. Please feel free to direct message me if you have other questions.Regards,\nAnurag", "username": "Anurag_Kadasne" }, { "code": "", "text": "I created a serverless instance to test for my app! I was in the free tier and moved to serverless assuming my costs would be better matched for that case as I have spiking load. To my shock, I am seeing a 20 dollar daily bill, with RPUs spiking into millions for no obvious reasons. There seems to be some issues with the pricing here. This basically makes serverless not cost effective at all and should I just go to dedicated instance?", "username": "Parikh_Jain" }, { "code": "", "text": "Thanks, I read the doc and then adjusted accordingly, indexed fields has resulted in less RPUs", "username": "Ed_Durguti" }, { "code": "", "text": "Hi Parikh_JainPlease see the “Serverless Pricing” section of this post for more information on how the bill is calculated along with this article on helpful tips to optimize workloads on your Serverless instance.", "username": "Anurag_Kadasne" }, { "code": "", "text": "I want to point out that there is a bug if you are using MongoDB Compass that can lead to indexes not being used.If you create an index in MongoDB Compass, it won’t be used by the Node.js driver (and possibly others, though I can’t speak to that personally).MongoDB Compass will be able to use the index just fine, making you think it’s working, when it’s not. People should be aware this is a possible cause of their bill being high.I explain more in the post below:", "username": "Justin_Jaeger" }, { "code": "", "text": "I had the same issue, $122 in 2 days. I had a few megabytes of data on my instance, I won’t ever use this product again. The app was not live, it had 0 users. Just me playing around with my API\nScreenshot 2023-09-06 at 5.23.51 PM2282×904 266 KB\n", "username": "Yusuf_Bagha" }, { "code": "", "text": "Hi YusufI am from the Serverless team and am terribly sorry about the experience you’ve had. Based on your screenshot, it seems like there were a lot of unindexed queries being run. I have sent you a direct message to better understand your use case. I would also suggest checking out the links posted in my responses above. Looking forward to corresponding over direct message.", "username": "Anurag_Kadasne" }, { "code": "", "text": "It’s worth mentioning again, after reading the docs my bill is significantly lower, although this is just a learning app for me, so no real customers/data.", "username": "Ed_Durguti" }, { "code": "", "text": "Also got hit for $180 in 7 days. We have 0 users besides 2 developers testing our website. We do have a socket API writing to database constantly but the bandwidth for that is minimal. Pretty ridiculous. Definitely feels like a scam.", "username": "Sesan_Chang" } ]
WARNING! -- Mongo DB Serverless Pricing
2022-09-16T07:09:08.408Z
WARNING! &ndash; Mongo DB Serverless Pricing
6,639